code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def twoDimensionalScatter(title, title_x, title_y, x, y, lim_x = None, lim_y = None, color = 'b', size = 20, alpha=None): """ Create a two-dimensional scatter plot. INPUTS """ plt.figure() plt.scatter(x, y, c=color, s=size, alpha=alpha, edgecolors='none') plt.xlabel(title_x) plt.ylabel(title_y) plt.title(title) if type(color) is not str: plt.colorbar() if lim_x: plt.xlim(lim_x[0], lim_x[1]) if lim_y: plt.ylim(lim_y[0], lim_y[1])
def function[twoDimensionalScatter, parameter[title, title_x, title_y, x, y, lim_x, lim_y, color, size, alpha]]: constant[ Create a two-dimensional scatter plot. INPUTS ] call[name[plt].figure, parameter[]] call[name[plt].scatter, parameter[name[x], name[y]]] call[name[plt].xlabel, parameter[name[title_x]]] call[name[plt].ylabel, parameter[name[title_y]]] call[name[plt].title, parameter[name[title]]] if compare[call[name[type], parameter[name[color]]] is_not name[str]] begin[:] call[name[plt].colorbar, parameter[]] if name[lim_x] begin[:] call[name[plt].xlim, parameter[call[name[lim_x]][constant[0]], call[name[lim_x]][constant[1]]]] if name[lim_y] begin[:] call[name[plt].ylim, parameter[call[name[lim_y]][constant[0]], call[name[lim_y]][constant[1]]]]
keyword[def] identifier[twoDimensionalScatter] ( identifier[title] , identifier[title_x] , identifier[title_y] , identifier[x] , identifier[y] , identifier[lim_x] = keyword[None] , identifier[lim_y] = keyword[None] , identifier[color] = literal[string] , identifier[size] = literal[int] , identifier[alpha] = keyword[None] ): literal[string] identifier[plt] . identifier[figure] () identifier[plt] . identifier[scatter] ( identifier[x] , identifier[y] , identifier[c] = identifier[color] , identifier[s] = identifier[size] , identifier[alpha] = identifier[alpha] , identifier[edgecolors] = literal[string] ) identifier[plt] . identifier[xlabel] ( identifier[title_x] ) identifier[plt] . identifier[ylabel] ( identifier[title_y] ) identifier[plt] . identifier[title] ( identifier[title] ) keyword[if] identifier[type] ( identifier[color] ) keyword[is] keyword[not] identifier[str] : identifier[plt] . identifier[colorbar] () keyword[if] identifier[lim_x] : identifier[plt] . identifier[xlim] ( identifier[lim_x] [ literal[int] ], identifier[lim_x] [ literal[int] ]) keyword[if] identifier[lim_y] : identifier[plt] . identifier[ylim] ( identifier[lim_y] [ literal[int] ], identifier[lim_y] [ literal[int] ])
def twoDimensionalScatter(title, title_x, title_y, x, y, lim_x=None, lim_y=None, color='b', size=20, alpha=None): """ Create a two-dimensional scatter plot. INPUTS """ plt.figure() plt.scatter(x, y, c=color, s=size, alpha=alpha, edgecolors='none') plt.xlabel(title_x) plt.ylabel(title_y) plt.title(title) if type(color) is not str: plt.colorbar() # depends on [control=['if'], data=[]] if lim_x: plt.xlim(lim_x[0], lim_x[1]) # depends on [control=['if'], data=[]] if lim_y: plt.ylim(lim_y[0], lim_y[1]) # depends on [control=['if'], data=[]]
def _objective(self, x): """ The objective function passed to the optimizer. It combines the likelihood and the priors. Failures are handled robustly. The algorithm will try several times to return the objective, and will raise the original exception if the objective cannot be computed. :param x: the parameters of the model. :parameter type: np.array """ try: self.optimizer_array = x obj = self.objective_function() self._fail_count = 0 except (LinAlgError, ZeroDivisionError, ValueError):#pragma: no cover if self._fail_count >= self._allowed_failures: raise self._fail_count += 1 return np.inf return obj
def function[_objective, parameter[self, x]]: constant[ The objective function passed to the optimizer. It combines the likelihood and the priors. Failures are handled robustly. The algorithm will try several times to return the objective, and will raise the original exception if the objective cannot be computed. :param x: the parameters of the model. :parameter type: np.array ] <ast.Try object at 0x7da1b0efd090> return[name[obj]]
keyword[def] identifier[_objective] ( identifier[self] , identifier[x] ): literal[string] keyword[try] : identifier[self] . identifier[optimizer_array] = identifier[x] identifier[obj] = identifier[self] . identifier[objective_function] () identifier[self] . identifier[_fail_count] = literal[int] keyword[except] ( identifier[LinAlgError] , identifier[ZeroDivisionError] , identifier[ValueError] ): keyword[if] identifier[self] . identifier[_fail_count] >= identifier[self] . identifier[_allowed_failures] : keyword[raise] identifier[self] . identifier[_fail_count] += literal[int] keyword[return] identifier[np] . identifier[inf] keyword[return] identifier[obj]
def _objective(self, x): """ The objective function passed to the optimizer. It combines the likelihood and the priors. Failures are handled robustly. The algorithm will try several times to return the objective, and will raise the original exception if the objective cannot be computed. :param x: the parameters of the model. :parameter type: np.array """ try: self.optimizer_array = x obj = self.objective_function() self._fail_count = 0 # depends on [control=['try'], data=[]] except (LinAlgError, ZeroDivisionError, ValueError): #pragma: no cover if self._fail_count >= self._allowed_failures: raise # depends on [control=['if'], data=[]] self._fail_count += 1 return np.inf # depends on [control=['except'], data=[]] return obj
def make_edge_vectors(adjacency_matrix, num_edge_types, depth, name=None): """Gets edge vectors for the edge types in the adjacency matrix. Args: adjacency_matrix: A [batch, num_nodes, num_nodes] tensor of ints. num_edge_types: Number of different edge types depth: Number of channels name: a string Returns: A [batch, num_nodes, num_nodes, depth] vector of tensors """ with tf.variable_scope(name, default_name="edge_vectors"): att_adj_vectors_shape = [num_edge_types, depth] adjacency_matrix_shape = common_layers.shape_list(adjacency_matrix) adj_vectors = ( tf.get_variable( "adj_vectors", att_adj_vectors_shape, initializer=tf.random_normal_initializer(0, depth**-0.5)) * (depth**0.5)) # Avoiding gathers so that it works on TPUs # adjacency_matrix_one_hot has shape # [batch, num_nodes, num_nodes, num_edge_types] adjacency_matrix_one_hot = tf.one_hot(adjacency_matrix, num_edge_types) att_adj_vectors = tf.matmul( tf.reshape(tf.to_float(adjacency_matrix_one_hot), [-1, num_edge_types]), adj_vectors) return tf.reshape(att_adj_vectors, [adjacency_matrix_shape[0], adjacency_matrix_shape[1], adjacency_matrix_shape[2], depth])
def function[make_edge_vectors, parameter[adjacency_matrix, num_edge_types, depth, name]]: constant[Gets edge vectors for the edge types in the adjacency matrix. Args: adjacency_matrix: A [batch, num_nodes, num_nodes] tensor of ints. num_edge_types: Number of different edge types depth: Number of channels name: a string Returns: A [batch, num_nodes, num_nodes, depth] vector of tensors ] with call[name[tf].variable_scope, parameter[name[name]]] begin[:] variable[att_adj_vectors_shape] assign[=] list[[<ast.Name object at 0x7da20e9b2f80>, <ast.Name object at 0x7da20e9b0eb0>]] variable[adjacency_matrix_shape] assign[=] call[name[common_layers].shape_list, parameter[name[adjacency_matrix]]] variable[adj_vectors] assign[=] binary_operation[call[name[tf].get_variable, parameter[constant[adj_vectors], name[att_adj_vectors_shape]]] * binary_operation[name[depth] ** constant[0.5]]] variable[adjacency_matrix_one_hot] assign[=] call[name[tf].one_hot, parameter[name[adjacency_matrix], name[num_edge_types]]] variable[att_adj_vectors] assign[=] call[name[tf].matmul, parameter[call[name[tf].reshape, parameter[call[name[tf].to_float, parameter[name[adjacency_matrix_one_hot]]], list[[<ast.UnaryOp object at 0x7da20e9b2860>, <ast.Name object at 0x7da20e9b0070>]]]], name[adj_vectors]]] return[call[name[tf].reshape, parameter[name[att_adj_vectors], list[[<ast.Subscript object at 0x7da20e9b2b30>, <ast.Subscript object at 0x7da20e9b1540>, <ast.Subscript object at 0x7da20e9b2830>, <ast.Name object at 0x7da1b1e14730>]]]]]
keyword[def] identifier[make_edge_vectors] ( identifier[adjacency_matrix] , identifier[num_edge_types] , identifier[depth] , identifier[name] = keyword[None] ): literal[string] keyword[with] identifier[tf] . identifier[variable_scope] ( identifier[name] , identifier[default_name] = literal[string] ): identifier[att_adj_vectors_shape] =[ identifier[num_edge_types] , identifier[depth] ] identifier[adjacency_matrix_shape] = identifier[common_layers] . identifier[shape_list] ( identifier[adjacency_matrix] ) identifier[adj_vectors] =( identifier[tf] . identifier[get_variable] ( literal[string] , identifier[att_adj_vectors_shape] , identifier[initializer] = identifier[tf] . identifier[random_normal_initializer] ( literal[int] , identifier[depth] **- literal[int] ))* ( identifier[depth] ** literal[int] )) identifier[adjacency_matrix_one_hot] = identifier[tf] . identifier[one_hot] ( identifier[adjacency_matrix] , identifier[num_edge_types] ) identifier[att_adj_vectors] = identifier[tf] . identifier[matmul] ( identifier[tf] . identifier[reshape] ( identifier[tf] . identifier[to_float] ( identifier[adjacency_matrix_one_hot] ),[- literal[int] , identifier[num_edge_types] ]), identifier[adj_vectors] ) keyword[return] identifier[tf] . identifier[reshape] ( identifier[att_adj_vectors] , [ identifier[adjacency_matrix_shape] [ literal[int] ], identifier[adjacency_matrix_shape] [ literal[int] ], identifier[adjacency_matrix_shape] [ literal[int] ], identifier[depth] ])
def make_edge_vectors(adjacency_matrix, num_edge_types, depth, name=None): """Gets edge vectors for the edge types in the adjacency matrix. Args: adjacency_matrix: A [batch, num_nodes, num_nodes] tensor of ints. num_edge_types: Number of different edge types depth: Number of channels name: a string Returns: A [batch, num_nodes, num_nodes, depth] vector of tensors """ with tf.variable_scope(name, default_name='edge_vectors'): att_adj_vectors_shape = [num_edge_types, depth] adjacency_matrix_shape = common_layers.shape_list(adjacency_matrix) adj_vectors = tf.get_variable('adj_vectors', att_adj_vectors_shape, initializer=tf.random_normal_initializer(0, depth ** (-0.5))) * depth ** 0.5 # Avoiding gathers so that it works on TPUs # adjacency_matrix_one_hot has shape # [batch, num_nodes, num_nodes, num_edge_types] adjacency_matrix_one_hot = tf.one_hot(adjacency_matrix, num_edge_types) att_adj_vectors = tf.matmul(tf.reshape(tf.to_float(adjacency_matrix_one_hot), [-1, num_edge_types]), adj_vectors) return tf.reshape(att_adj_vectors, [adjacency_matrix_shape[0], adjacency_matrix_shape[1], adjacency_matrix_shape[2], depth]) # depends on [control=['with'], data=[]]
def paragraphs(quantity=2, separator='\n\n', wrap_start='', wrap_end='', html=False, sentences_quantity=3, as_list=False): """Random paragraphs.""" if html: wrap_start = '<p>' wrap_end = '</p>' separator = '\n\n' result = [] for i in xrange(0, quantity): result.append(wrap_start + sentences(sentences_quantity) + wrap_end) if as_list: return result else: return separator.join(result)
def function[paragraphs, parameter[quantity, separator, wrap_start, wrap_end, html, sentences_quantity, as_list]]: constant[Random paragraphs.] if name[html] begin[:] variable[wrap_start] assign[=] constant[<p>] variable[wrap_end] assign[=] constant[</p>] variable[separator] assign[=] constant[ ] variable[result] assign[=] list[[]] for taget[name[i]] in starred[call[name[xrange], parameter[constant[0], name[quantity]]]] begin[:] call[name[result].append, parameter[binary_operation[binary_operation[name[wrap_start] + call[name[sentences], parameter[name[sentences_quantity]]]] + name[wrap_end]]]] if name[as_list] begin[:] return[name[result]]
keyword[def] identifier[paragraphs] ( identifier[quantity] = literal[int] , identifier[separator] = literal[string] , identifier[wrap_start] = literal[string] , identifier[wrap_end] = literal[string] , identifier[html] = keyword[False] , identifier[sentences_quantity] = literal[int] , identifier[as_list] = keyword[False] ): literal[string] keyword[if] identifier[html] : identifier[wrap_start] = literal[string] identifier[wrap_end] = literal[string] identifier[separator] = literal[string] identifier[result] =[] keyword[for] identifier[i] keyword[in] identifier[xrange] ( literal[int] , identifier[quantity] ): identifier[result] . identifier[append] ( identifier[wrap_start] + identifier[sentences] ( identifier[sentences_quantity] )+ identifier[wrap_end] ) keyword[if] identifier[as_list] : keyword[return] identifier[result] keyword[else] : keyword[return] identifier[separator] . identifier[join] ( identifier[result] )
def paragraphs(quantity=2, separator='\n\n', wrap_start='', wrap_end='', html=False, sentences_quantity=3, as_list=False): """Random paragraphs.""" if html: wrap_start = '<p>' wrap_end = '</p>' separator = '\n\n' # depends on [control=['if'], data=[]] result = [] for i in xrange(0, quantity): result.append(wrap_start + sentences(sentences_quantity) + wrap_end) # depends on [control=['for'], data=[]] if as_list: return result # depends on [control=['if'], data=[]] else: return separator.join(result)
def defilter(cur, prev, filter_type, bpp=4): """Decode a chunk""" if filter_type == 0: # No filter return cur elif filter_type == 1: # Sub xp = 0 for xc in range(bpp, len(cur)): cur[xc] = (cur[xc] + cur[xp]) % 256 xp += 1 elif filter_type == 2: # Up for xc in range(len(cur)): cur[xc] = (cur[xc] + prev[xc]) % 256 elif filter_type == 3: # Average xp = 0 for i in range(bpp): cur[i] = (cur[i] + prev[i] // 2) % 256 for xc in range(bpp, len(cur)): cur[xc] = (cur[xc] + ((cur[xp] + prev[xc]) // 2)) % 256 xp += 1 elif filter_type == 4: # Paeth xp = 0 for i in range(bpp): cur[i] = (cur[i] + prev[i]) % 256 for xc in range(bpp, len(cur)): a = cur[xp] b = prev[xc] c = prev[xp] p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: value = a elif pb <= pc: value = b else: value = c cur[xc] = (cur[xc] + value) % 256 xp += 1 else: raise ValueError('Unrecognized scanline filter type: {}'.format(filter_type)) return cur
def function[defilter, parameter[cur, prev, filter_type, bpp]]: constant[Decode a chunk] if compare[name[filter_type] equal[==] constant[0]] begin[:] return[name[cur]] return[name[cur]]
keyword[def] identifier[defilter] ( identifier[cur] , identifier[prev] , identifier[filter_type] , identifier[bpp] = literal[int] ): literal[string] keyword[if] identifier[filter_type] == literal[int] : keyword[return] identifier[cur] keyword[elif] identifier[filter_type] == literal[int] : identifier[xp] = literal[int] keyword[for] identifier[xc] keyword[in] identifier[range] ( identifier[bpp] , identifier[len] ( identifier[cur] )): identifier[cur] [ identifier[xc] ]=( identifier[cur] [ identifier[xc] ]+ identifier[cur] [ identifier[xp] ])% literal[int] identifier[xp] += literal[int] keyword[elif] identifier[filter_type] == literal[int] : keyword[for] identifier[xc] keyword[in] identifier[range] ( identifier[len] ( identifier[cur] )): identifier[cur] [ identifier[xc] ]=( identifier[cur] [ identifier[xc] ]+ identifier[prev] [ identifier[xc] ])% literal[int] keyword[elif] identifier[filter_type] == literal[int] : identifier[xp] = literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[bpp] ): identifier[cur] [ identifier[i] ]=( identifier[cur] [ identifier[i] ]+ identifier[prev] [ identifier[i] ]// literal[int] )% literal[int] keyword[for] identifier[xc] keyword[in] identifier[range] ( identifier[bpp] , identifier[len] ( identifier[cur] )): identifier[cur] [ identifier[xc] ]=( identifier[cur] [ identifier[xc] ]+(( identifier[cur] [ identifier[xp] ]+ identifier[prev] [ identifier[xc] ])// literal[int] ))% literal[int] identifier[xp] += literal[int] keyword[elif] identifier[filter_type] == literal[int] : identifier[xp] = literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[bpp] ): identifier[cur] [ identifier[i] ]=( identifier[cur] [ identifier[i] ]+ identifier[prev] [ identifier[i] ])% literal[int] keyword[for] identifier[xc] keyword[in] identifier[range] ( identifier[bpp] , identifier[len] ( identifier[cur] )): identifier[a] = identifier[cur] [ identifier[xp] ] identifier[b] = identifier[prev] [ identifier[xc] ] identifier[c] = identifier[prev] [ identifier[xp] ] identifier[p] = identifier[a] + identifier[b] - identifier[c] identifier[pa] = identifier[abs] ( identifier[p] - identifier[a] ) identifier[pb] = identifier[abs] ( identifier[p] - identifier[b] ) identifier[pc] = identifier[abs] ( identifier[p] - identifier[c] ) keyword[if] identifier[pa] <= identifier[pb] keyword[and] identifier[pa] <= identifier[pc] : identifier[value] = identifier[a] keyword[elif] identifier[pb] <= identifier[pc] : identifier[value] = identifier[b] keyword[else] : identifier[value] = identifier[c] identifier[cur] [ identifier[xc] ]=( identifier[cur] [ identifier[xc] ]+ identifier[value] )% literal[int] identifier[xp] += literal[int] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[filter_type] )) keyword[return] identifier[cur]
def defilter(cur, prev, filter_type, bpp=4): """Decode a chunk""" if filter_type == 0: # No filter return cur # depends on [control=['if'], data=[]] elif filter_type == 1: # Sub xp = 0 for xc in range(bpp, len(cur)): cur[xc] = (cur[xc] + cur[xp]) % 256 xp += 1 # depends on [control=['for'], data=['xc']] # depends on [control=['if'], data=[]] elif filter_type == 2: # Up for xc in range(len(cur)): cur[xc] = (cur[xc] + prev[xc]) % 256 # depends on [control=['for'], data=['xc']] # depends on [control=['if'], data=[]] elif filter_type == 3: # Average xp = 0 for i in range(bpp): cur[i] = (cur[i] + prev[i] // 2) % 256 # depends on [control=['for'], data=['i']] for xc in range(bpp, len(cur)): cur[xc] = (cur[xc] + (cur[xp] + prev[xc]) // 2) % 256 xp += 1 # depends on [control=['for'], data=['xc']] # depends on [control=['if'], data=[]] elif filter_type == 4: # Paeth xp = 0 for i in range(bpp): cur[i] = (cur[i] + prev[i]) % 256 # depends on [control=['for'], data=['i']] for xc in range(bpp, len(cur)): a = cur[xp] b = prev[xc] c = prev[xp] p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: value = a # depends on [control=['if'], data=[]] elif pb <= pc: value = b # depends on [control=['if'], data=[]] else: value = c cur[xc] = (cur[xc] + value) % 256 xp += 1 # depends on [control=['for'], data=['xc']] # depends on [control=['if'], data=[]] else: raise ValueError('Unrecognized scanline filter type: {}'.format(filter_type)) return cur
def __netjson_protocol(self, radio): """ determines NetJSON protocol radio attribute """ htmode = radio.get('htmode') hwmode = radio.get('hwmode', None) if htmode.startswith('HT'): return '802.11n' elif htmode.startswith('VHT'): return '802.11ac' return '802.{0}'.format(hwmode)
def function[__netjson_protocol, parameter[self, radio]]: constant[ determines NetJSON protocol radio attribute ] variable[htmode] assign[=] call[name[radio].get, parameter[constant[htmode]]] variable[hwmode] assign[=] call[name[radio].get, parameter[constant[hwmode], constant[None]]] if call[name[htmode].startswith, parameter[constant[HT]]] begin[:] return[constant[802.11n]] return[call[constant[802.{0}].format, parameter[name[hwmode]]]]
keyword[def] identifier[__netjson_protocol] ( identifier[self] , identifier[radio] ): literal[string] identifier[htmode] = identifier[radio] . identifier[get] ( literal[string] ) identifier[hwmode] = identifier[radio] . identifier[get] ( literal[string] , keyword[None] ) keyword[if] identifier[htmode] . identifier[startswith] ( literal[string] ): keyword[return] literal[string] keyword[elif] identifier[htmode] . identifier[startswith] ( literal[string] ): keyword[return] literal[string] keyword[return] literal[string] . identifier[format] ( identifier[hwmode] )
def __netjson_protocol(self, radio): """ determines NetJSON protocol radio attribute """ htmode = radio.get('htmode') hwmode = radio.get('hwmode', None) if htmode.startswith('HT'): return '802.11n' # depends on [control=['if'], data=[]] elif htmode.startswith('VHT'): return '802.11ac' # depends on [control=['if'], data=[]] return '802.{0}'.format(hwmode)
def get_dependency_type(_type): """ Get the dependency type string for SlurmPrinter :rtype: str """ if _type == DependencyTypes.AFTER: return 'after' elif _type == DependencyTypes.AFTER_ANY: return 'afterany' elif _type == DependencyTypes.AFTER_CORR: return 'aftercorr' elif _type == DependencyTypes.AFTER_NOT_OK: return 'afternotok' elif _type == DependencyTypes.AFTER_OK: return 'afterok' else: return None
def function[get_dependency_type, parameter[_type]]: constant[ Get the dependency type string for SlurmPrinter :rtype: str ] if compare[name[_type] equal[==] name[DependencyTypes].AFTER] begin[:] return[constant[after]]
keyword[def] identifier[get_dependency_type] ( identifier[_type] ): literal[string] keyword[if] identifier[_type] == identifier[DependencyTypes] . identifier[AFTER] : keyword[return] literal[string] keyword[elif] identifier[_type] == identifier[DependencyTypes] . identifier[AFTER_ANY] : keyword[return] literal[string] keyword[elif] identifier[_type] == identifier[DependencyTypes] . identifier[AFTER_CORR] : keyword[return] literal[string] keyword[elif] identifier[_type] == identifier[DependencyTypes] . identifier[AFTER_NOT_OK] : keyword[return] literal[string] keyword[elif] identifier[_type] == identifier[DependencyTypes] . identifier[AFTER_OK] : keyword[return] literal[string] keyword[else] : keyword[return] keyword[None]
def get_dependency_type(_type): """ Get the dependency type string for SlurmPrinter :rtype: str """ if _type == DependencyTypes.AFTER: return 'after' # depends on [control=['if'], data=[]] elif _type == DependencyTypes.AFTER_ANY: return 'afterany' # depends on [control=['if'], data=[]] elif _type == DependencyTypes.AFTER_CORR: return 'aftercorr' # depends on [control=['if'], data=[]] elif _type == DependencyTypes.AFTER_NOT_OK: return 'afternotok' # depends on [control=['if'], data=[]] elif _type == DependencyTypes.AFTER_OK: return 'afterok' # depends on [control=['if'], data=[]] else: return None
def write(self, source=None, **kwargs): '''Wrappe r to call the writer's write method if present. Args: source(pandasdmx.model.Message, iterable): stuff to be written. If a :class:`pandasdmx.model.Message` is given, the writer itself must determine what to write unless specified in the keyword arguments. If an iterable is given, the writer should write each item. Keyword arguments may specify what to do with the output depending on the writer's API. Defaults to self.msg. Returns: type: anything the writer returns. ''' if not source: source = self.msg return self._writer.write(source=source, **kwargs)
def function[write, parameter[self, source]]: constant[Wrappe r to call the writer's write method if present. Args: source(pandasdmx.model.Message, iterable): stuff to be written. If a :class:`pandasdmx.model.Message` is given, the writer itself must determine what to write unless specified in the keyword arguments. If an iterable is given, the writer should write each item. Keyword arguments may specify what to do with the output depending on the writer's API. Defaults to self.msg. Returns: type: anything the writer returns. ] if <ast.UnaryOp object at 0x7da18f722bf0> begin[:] variable[source] assign[=] name[self].msg return[call[name[self]._writer.write, parameter[]]]
keyword[def] identifier[write] ( identifier[self] , identifier[source] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[if] keyword[not] identifier[source] : identifier[source] = identifier[self] . identifier[msg] keyword[return] identifier[self] . identifier[_writer] . identifier[write] ( identifier[source] = identifier[source] ,** identifier[kwargs] )
def write(self, source=None, **kwargs): """Wrappe r to call the writer's write method if present. Args: source(pandasdmx.model.Message, iterable): stuff to be written. If a :class:`pandasdmx.model.Message` is given, the writer itself must determine what to write unless specified in the keyword arguments. If an iterable is given, the writer should write each item. Keyword arguments may specify what to do with the output depending on the writer's API. Defaults to self.msg. Returns: type: anything the writer returns. """ if not source: source = self.msg # depends on [control=['if'], data=[]] return self._writer.write(source=source, **kwargs)
def from_json(cls, data): """Return object based on JSON / dict input Args: data (dict): Dictionary containing a serialized User object Returns: :obj:`User`: User object representing the data """ user = cls() user.user_id = data['userId'] user.username = data['username'] user.auth_system = data['authSystem'] user.roles = data['roles'] return user
def function[from_json, parameter[cls, data]]: constant[Return object based on JSON / dict input Args: data (dict): Dictionary containing a serialized User object Returns: :obj:`User`: User object representing the data ] variable[user] assign[=] call[name[cls], parameter[]] name[user].user_id assign[=] call[name[data]][constant[userId]] name[user].username assign[=] call[name[data]][constant[username]] name[user].auth_system assign[=] call[name[data]][constant[authSystem]] name[user].roles assign[=] call[name[data]][constant[roles]] return[name[user]]
keyword[def] identifier[from_json] ( identifier[cls] , identifier[data] ): literal[string] identifier[user] = identifier[cls] () identifier[user] . identifier[user_id] = identifier[data] [ literal[string] ] identifier[user] . identifier[username] = identifier[data] [ literal[string] ] identifier[user] . identifier[auth_system] = identifier[data] [ literal[string] ] identifier[user] . identifier[roles] = identifier[data] [ literal[string] ] keyword[return] identifier[user]
def from_json(cls, data): """Return object based on JSON / dict input Args: data (dict): Dictionary containing a serialized User object Returns: :obj:`User`: User object representing the data """ user = cls() user.user_id = data['userId'] user.username = data['username'] user.auth_system = data['authSystem'] user.roles = data['roles'] return user
def recv(request_context=None, non_blocking=False): """Receives data from websocket. :param request_context: :param bool non_blocking: :rtype: bytes|str :raises IOError: If unable to receive a message. """ if non_blocking: result = uwsgi.websocket_recv_nb(request_context) else: result = uwsgi.websocket_recv(request_context) return result
def function[recv, parameter[request_context, non_blocking]]: constant[Receives data from websocket. :param request_context: :param bool non_blocking: :rtype: bytes|str :raises IOError: If unable to receive a message. ] if name[non_blocking] begin[:] variable[result] assign[=] call[name[uwsgi].websocket_recv_nb, parameter[name[request_context]]] return[name[result]]
keyword[def] identifier[recv] ( identifier[request_context] = keyword[None] , identifier[non_blocking] = keyword[False] ): literal[string] keyword[if] identifier[non_blocking] : identifier[result] = identifier[uwsgi] . identifier[websocket_recv_nb] ( identifier[request_context] ) keyword[else] : identifier[result] = identifier[uwsgi] . identifier[websocket_recv] ( identifier[request_context] ) keyword[return] identifier[result]
def recv(request_context=None, non_blocking=False): """Receives data from websocket. :param request_context: :param bool non_blocking: :rtype: bytes|str :raises IOError: If unable to receive a message. """ if non_blocking: result = uwsgi.websocket_recv_nb(request_context) # depends on [control=['if'], data=[]] else: result = uwsgi.websocket_recv(request_context) return result
def _add_seg_to_output(out, data, enumerate_chroms=False): """Export outputs to 'seg' format compatible with IGV and GenePattern. """ out_file = "%s.seg" % os.path.splitext(out["cns"])[0] if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export", "seg"] if enumerate_chroms: cmd += ["--enumerate-chroms"] cmd += ["-o", tx_out_file, out["cns"]] do.run(cmd, "CNVkit export seg") out["seg"] = out_file return out
def function[_add_seg_to_output, parameter[out, data, enumerate_chroms]]: constant[Export outputs to 'seg' format compatible with IGV and GenePattern. ] variable[out_file] assign[=] binary_operation[constant[%s.seg] <ast.Mod object at 0x7da2590d6920> call[call[name[os].path.splitext, parameter[call[name[out]][constant[cns]]]]][constant[0]]] if <ast.UnaryOp object at 0x7da1b19b95a0> begin[:] with call[name[file_transaction], parameter[name[data], name[out_file]]] begin[:] variable[cmd] assign[=] list[[<ast.Call object at 0x7da1b19bbd90>, <ast.Constant object at 0x7da1b19bbb20>, <ast.Constant object at 0x7da1b19b82b0>]] if name[enumerate_chroms] begin[:] <ast.AugAssign object at 0x7da1b19b9960> <ast.AugAssign object at 0x7da1b19b9cf0> call[name[do].run, parameter[name[cmd], constant[CNVkit export seg]]] call[name[out]][constant[seg]] assign[=] name[out_file] return[name[out]]
keyword[def] identifier[_add_seg_to_output] ( identifier[out] , identifier[data] , identifier[enumerate_chroms] = keyword[False] ): literal[string] identifier[out_file] = literal[string] % identifier[os] . identifier[path] . identifier[splitext] ( identifier[out] [ literal[string] ])[ literal[int] ] keyword[if] keyword[not] identifier[utils] . identifier[file_exists] ( identifier[out_file] ): keyword[with] identifier[file_transaction] ( identifier[data] , identifier[out_file] ) keyword[as] identifier[tx_out_file] : identifier[cmd] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[sys] . identifier[executable] ), literal[string] ), literal[string] , literal[string] ] keyword[if] identifier[enumerate_chroms] : identifier[cmd] +=[ literal[string] ] identifier[cmd] +=[ literal[string] , identifier[tx_out_file] , identifier[out] [ literal[string] ]] identifier[do] . identifier[run] ( identifier[cmd] , literal[string] ) identifier[out] [ literal[string] ]= identifier[out_file] keyword[return] identifier[out]
def _add_seg_to_output(out, data, enumerate_chroms=False): """Export outputs to 'seg' format compatible with IGV and GenePattern. """ out_file = '%s.seg' % os.path.splitext(out['cns'])[0] if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [os.path.join(os.path.dirname(sys.executable), 'cnvkit.py'), 'export', 'seg'] if enumerate_chroms: cmd += ['--enumerate-chroms'] # depends on [control=['if'], data=[]] cmd += ['-o', tx_out_file, out['cns']] do.run(cmd, 'CNVkit export seg') # depends on [control=['with'], data=['tx_out_file']] # depends on [control=['if'], data=[]] out['seg'] = out_file return out
def get_models(): """Finds all models, returning a list of model number and names sorted increasing. Returns: [(13, 000013-modelname), (17, 000017-modelname), ...etc] """ all_models = gfile.Glob(os.path.join(models_dir(), '*.meta')) model_filenames = [os.path.basename(m) for m in all_models] model_numbers_names = sorted([ (shipname.detect_model_num(m), shipname.detect_model_name(m)) for m in model_filenames]) return model_numbers_names
def function[get_models, parameter[]]: constant[Finds all models, returning a list of model number and names sorted increasing. Returns: [(13, 000013-modelname), (17, 000017-modelname), ...etc] ] variable[all_models] assign[=] call[name[gfile].Glob, parameter[call[name[os].path.join, parameter[call[name[models_dir], parameter[]], constant[*.meta]]]]] variable[model_filenames] assign[=] <ast.ListComp object at 0x7da18c4cd420> variable[model_numbers_names] assign[=] call[name[sorted], parameter[<ast.ListComp object at 0x7da18c4cf7f0>]] return[name[model_numbers_names]]
keyword[def] identifier[get_models] (): literal[string] identifier[all_models] = identifier[gfile] . identifier[Glob] ( identifier[os] . identifier[path] . identifier[join] ( identifier[models_dir] (), literal[string] )) identifier[model_filenames] =[ identifier[os] . identifier[path] . identifier[basename] ( identifier[m] ) keyword[for] identifier[m] keyword[in] identifier[all_models] ] identifier[model_numbers_names] = identifier[sorted] ([ ( identifier[shipname] . identifier[detect_model_num] ( identifier[m] ), identifier[shipname] . identifier[detect_model_name] ( identifier[m] )) keyword[for] identifier[m] keyword[in] identifier[model_filenames] ]) keyword[return] identifier[model_numbers_names]
def get_models(): """Finds all models, returning a list of model number and names sorted increasing. Returns: [(13, 000013-modelname), (17, 000017-modelname), ...etc] """ all_models = gfile.Glob(os.path.join(models_dir(), '*.meta')) model_filenames = [os.path.basename(m) for m in all_models] model_numbers_names = sorted([(shipname.detect_model_num(m), shipname.detect_model_name(m)) for m in model_filenames]) return model_numbers_names
def item_set_goto(itemset, inputsymbol, productionset): """returns an itemset locate inside itemset every element with inputsymbol following cursor for every located item, append its itemclosure""" resultset = LR0ItemSet() for item in itemset.itemlist: if item.next_symbol() == inputsymbol: newitem = LR0Item(item.rule, item.position + 1) resultset.append_item(newitem) return _build_item_closure(resultset, productionset)
def function[item_set_goto, parameter[itemset, inputsymbol, productionset]]: constant[returns an itemset locate inside itemset every element with inputsymbol following cursor for every located item, append its itemclosure] variable[resultset] assign[=] call[name[LR0ItemSet], parameter[]] for taget[name[item]] in starred[name[itemset].itemlist] begin[:] if compare[call[name[item].next_symbol, parameter[]] equal[==] name[inputsymbol]] begin[:] variable[newitem] assign[=] call[name[LR0Item], parameter[name[item].rule, binary_operation[name[item].position + constant[1]]]] call[name[resultset].append_item, parameter[name[newitem]]] return[call[name[_build_item_closure], parameter[name[resultset], name[productionset]]]]
keyword[def] identifier[item_set_goto] ( identifier[itemset] , identifier[inputsymbol] , identifier[productionset] ): literal[string] identifier[resultset] = identifier[LR0ItemSet] () keyword[for] identifier[item] keyword[in] identifier[itemset] . identifier[itemlist] : keyword[if] identifier[item] . identifier[next_symbol] ()== identifier[inputsymbol] : identifier[newitem] = identifier[LR0Item] ( identifier[item] . identifier[rule] , identifier[item] . identifier[position] + literal[int] ) identifier[resultset] . identifier[append_item] ( identifier[newitem] ) keyword[return] identifier[_build_item_closure] ( identifier[resultset] , identifier[productionset] )
def item_set_goto(itemset, inputsymbol, productionset): """returns an itemset locate inside itemset every element with inputsymbol following cursor for every located item, append its itemclosure""" resultset = LR0ItemSet() for item in itemset.itemlist: if item.next_symbol() == inputsymbol: newitem = LR0Item(item.rule, item.position + 1) resultset.append_item(newitem) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']] return _build_item_closure(resultset, productionset)
def harmonics_1d(harmonic_out, x, freqs, h_range, kind='linear', fill_value=0, axis=0): '''Populate a harmonic tensor from a time-frequency representation. Parameters ---------- harmonic_out : np.ndarray, shape=(len(h_range), X.shape) The output array to store harmonics X : np.ndarray The input energy freqs : np.ndarray, shape=(x.shape[axis]) The frequency values corresponding to x's elements along the chosen axis. h_range : list-like, non-negative Harmonics to compute. The first harmonic (1) corresponds to `x` itself. Values less than one (e.g., 1/2) correspond to sub-harmonics. kind : str Interpolation type. See `scipy.interpolate.interp1d`. fill_value : float The value to fill when extrapolating beyond the observed frequency range. axis : int The axis along which to compute harmonics See Also -------- harmonics scipy.interpolate.interp1d Examples -------- Estimate the harmonics of a time-averaged tempogram >>> y, sr = librosa.load(librosa.util.example_audio_file(), ... duration=15, offset=30) >>> # Compute the time-varying tempogram and average over time >>> tempi = np.mean(librosa.feature.tempogram(y=y, sr=sr), axis=1) >>> # We'll measure the first five harmonics >>> h_range = [1, 2, 3, 4, 5] >>> f_tempo = librosa.tempo_frequencies(len(tempi), sr=sr) >>> # Build the harmonic tensor >>> t_harmonics = librosa.interp_harmonics(tempi, f_tempo, h_range) >>> print(t_harmonics.shape) (5, 384) >>> # And plot the results >>> import matplotlib.pyplot as plt >>> plt.figure() >>> librosa.display.specshow(t_harmonics, x_axis='tempo', sr=sr) >>> plt.yticks(0.5 + np.arange(len(h_range)), ... ['{:.3g}'.format(_) for _ in h_range]) >>> plt.ylabel('Harmonic') >>> plt.xlabel('Tempo (BPM)') >>> plt.tight_layout() We can also compute frequency harmonics for spectrograms. To calculate subharmonic energy, use values < 1. >>> h_range = [1./3, 1./2, 1, 2, 3, 4] >>> S = np.abs(librosa.stft(y)) >>> fft_freqs = librosa.fft_frequencies(sr=sr) >>> S_harm = librosa.interp_harmonics(S, fft_freqs, h_range, axis=0) >>> print(S_harm.shape) (6, 1025, 646) >>> plt.figure() >>> for i, _sh in enumerate(S_harm, 1): ... plt.subplot(3,2,i) ... librosa.display.specshow(librosa.amplitude_to_db(_sh, ... ref=S.max()), ... sr=sr, y_axis='log') ... plt.title('h={:.3g}'.format(h_range[i-1])) ... plt.yticks([]) >>> plt.tight_layout() ''' # Note: this only works for fixed-grid, 1d interpolation f_interp = scipy.interpolate.interp1d(freqs, x, kind=kind, axis=axis, copy=False, bounds_error=False, fill_value=fill_value) idx_out = [slice(None)] * harmonic_out.ndim # Compute the output index of the interpolated values interp_axis = 1 + (axis % x.ndim) # Iterate over the harmonics range for h_index, harmonic in enumerate(h_range): idx_out[0] = h_index # Iterate over frequencies for f_index, frequency in enumerate(freqs): # Offset the output axis by 1 to account for the harmonic index idx_out[interp_axis] = f_index # Estimate the harmonic energy at this frequency across time harmonic_out[tuple(idx_out)] = f_interp(harmonic * frequency)
def function[harmonics_1d, parameter[harmonic_out, x, freqs, h_range, kind, fill_value, axis]]: constant[Populate a harmonic tensor from a time-frequency representation. Parameters ---------- harmonic_out : np.ndarray, shape=(len(h_range), X.shape) The output array to store harmonics X : np.ndarray The input energy freqs : np.ndarray, shape=(x.shape[axis]) The frequency values corresponding to x's elements along the chosen axis. h_range : list-like, non-negative Harmonics to compute. The first harmonic (1) corresponds to `x` itself. Values less than one (e.g., 1/2) correspond to sub-harmonics. kind : str Interpolation type. See `scipy.interpolate.interp1d`. fill_value : float The value to fill when extrapolating beyond the observed frequency range. axis : int The axis along which to compute harmonics See Also -------- harmonics scipy.interpolate.interp1d Examples -------- Estimate the harmonics of a time-averaged tempogram >>> y, sr = librosa.load(librosa.util.example_audio_file(), ... duration=15, offset=30) >>> # Compute the time-varying tempogram and average over time >>> tempi = np.mean(librosa.feature.tempogram(y=y, sr=sr), axis=1) >>> # We'll measure the first five harmonics >>> h_range = [1, 2, 3, 4, 5] >>> f_tempo = librosa.tempo_frequencies(len(tempi), sr=sr) >>> # Build the harmonic tensor >>> t_harmonics = librosa.interp_harmonics(tempi, f_tempo, h_range) >>> print(t_harmonics.shape) (5, 384) >>> # And plot the results >>> import matplotlib.pyplot as plt >>> plt.figure() >>> librosa.display.specshow(t_harmonics, x_axis='tempo', sr=sr) >>> plt.yticks(0.5 + np.arange(len(h_range)), ... ['{:.3g}'.format(_) for _ in h_range]) >>> plt.ylabel('Harmonic') >>> plt.xlabel('Tempo (BPM)') >>> plt.tight_layout() We can also compute frequency harmonics for spectrograms. To calculate subharmonic energy, use values < 1. >>> h_range = [1./3, 1./2, 1, 2, 3, 4] >>> S = np.abs(librosa.stft(y)) >>> fft_freqs = librosa.fft_frequencies(sr=sr) >>> S_harm = librosa.interp_harmonics(S, fft_freqs, h_range, axis=0) >>> print(S_harm.shape) (6, 1025, 646) >>> plt.figure() >>> for i, _sh in enumerate(S_harm, 1): ... plt.subplot(3,2,i) ... librosa.display.specshow(librosa.amplitude_to_db(_sh, ... ref=S.max()), ... sr=sr, y_axis='log') ... plt.title('h={:.3g}'.format(h_range[i-1])) ... plt.yticks([]) >>> plt.tight_layout() ] variable[f_interp] assign[=] call[name[scipy].interpolate.interp1d, parameter[name[freqs], name[x]]] variable[idx_out] assign[=] binary_operation[list[[<ast.Call object at 0x7da18f813850>]] * name[harmonic_out].ndim] variable[interp_axis] assign[=] binary_operation[constant[1] + binary_operation[name[axis] <ast.Mod object at 0x7da2590d6920> name[x].ndim]] for taget[tuple[[<ast.Name object at 0x7da18f8100a0>, <ast.Name object at 0x7da18f811f30>]]] in starred[call[name[enumerate], parameter[name[h_range]]]] begin[:] call[name[idx_out]][constant[0]] assign[=] name[h_index] for taget[tuple[[<ast.Name object at 0x7da18f8124d0>, <ast.Name object at 0x7da18f811030>]]] in starred[call[name[enumerate], parameter[name[freqs]]]] begin[:] call[name[idx_out]][name[interp_axis]] assign[=] name[f_index] call[name[harmonic_out]][call[name[tuple], parameter[name[idx_out]]]] assign[=] call[name[f_interp], parameter[binary_operation[name[harmonic] * name[frequency]]]]
keyword[def] identifier[harmonics_1d] ( identifier[harmonic_out] , identifier[x] , identifier[freqs] , identifier[h_range] , identifier[kind] = literal[string] , identifier[fill_value] = literal[int] , identifier[axis] = literal[int] ): literal[string] identifier[f_interp] = identifier[scipy] . identifier[interpolate] . identifier[interp1d] ( identifier[freqs] , identifier[x] , identifier[kind] = identifier[kind] , identifier[axis] = identifier[axis] , identifier[copy] = keyword[False] , identifier[bounds_error] = keyword[False] , identifier[fill_value] = identifier[fill_value] ) identifier[idx_out] =[ identifier[slice] ( keyword[None] )]* identifier[harmonic_out] . identifier[ndim] identifier[interp_axis] = literal[int] +( identifier[axis] % identifier[x] . identifier[ndim] ) keyword[for] identifier[h_index] , identifier[harmonic] keyword[in] identifier[enumerate] ( identifier[h_range] ): identifier[idx_out] [ literal[int] ]= identifier[h_index] keyword[for] identifier[f_index] , identifier[frequency] keyword[in] identifier[enumerate] ( identifier[freqs] ): identifier[idx_out] [ identifier[interp_axis] ]= identifier[f_index] identifier[harmonic_out] [ identifier[tuple] ( identifier[idx_out] )]= identifier[f_interp] ( identifier[harmonic] * identifier[frequency] )
def harmonics_1d(harmonic_out, x, freqs, h_range, kind='linear', fill_value=0, axis=0): """Populate a harmonic tensor from a time-frequency representation. Parameters ---------- harmonic_out : np.ndarray, shape=(len(h_range), X.shape) The output array to store harmonics X : np.ndarray The input energy freqs : np.ndarray, shape=(x.shape[axis]) The frequency values corresponding to x's elements along the chosen axis. h_range : list-like, non-negative Harmonics to compute. The first harmonic (1) corresponds to `x` itself. Values less than one (e.g., 1/2) correspond to sub-harmonics. kind : str Interpolation type. See `scipy.interpolate.interp1d`. fill_value : float The value to fill when extrapolating beyond the observed frequency range. axis : int The axis along which to compute harmonics See Also -------- harmonics scipy.interpolate.interp1d Examples -------- Estimate the harmonics of a time-averaged tempogram >>> y, sr = librosa.load(librosa.util.example_audio_file(), ... duration=15, offset=30) >>> # Compute the time-varying tempogram and average over time >>> tempi = np.mean(librosa.feature.tempogram(y=y, sr=sr), axis=1) >>> # We'll measure the first five harmonics >>> h_range = [1, 2, 3, 4, 5] >>> f_tempo = librosa.tempo_frequencies(len(tempi), sr=sr) >>> # Build the harmonic tensor >>> t_harmonics = librosa.interp_harmonics(tempi, f_tempo, h_range) >>> print(t_harmonics.shape) (5, 384) >>> # And plot the results >>> import matplotlib.pyplot as plt >>> plt.figure() >>> librosa.display.specshow(t_harmonics, x_axis='tempo', sr=sr) >>> plt.yticks(0.5 + np.arange(len(h_range)), ... ['{:.3g}'.format(_) for _ in h_range]) >>> plt.ylabel('Harmonic') >>> plt.xlabel('Tempo (BPM)') >>> plt.tight_layout() We can also compute frequency harmonics for spectrograms. To calculate subharmonic energy, use values < 1. >>> h_range = [1./3, 1./2, 1, 2, 3, 4] >>> S = np.abs(librosa.stft(y)) >>> fft_freqs = librosa.fft_frequencies(sr=sr) >>> S_harm = librosa.interp_harmonics(S, fft_freqs, h_range, axis=0) >>> print(S_harm.shape) (6, 1025, 646) >>> plt.figure() >>> for i, _sh in enumerate(S_harm, 1): ... plt.subplot(3,2,i) ... librosa.display.specshow(librosa.amplitude_to_db(_sh, ... ref=S.max()), ... sr=sr, y_axis='log') ... plt.title('h={:.3g}'.format(h_range[i-1])) ... plt.yticks([]) >>> plt.tight_layout() """ # Note: this only works for fixed-grid, 1d interpolation f_interp = scipy.interpolate.interp1d(freqs, x, kind=kind, axis=axis, copy=False, bounds_error=False, fill_value=fill_value) idx_out = [slice(None)] * harmonic_out.ndim # Compute the output index of the interpolated values interp_axis = 1 + axis % x.ndim # Iterate over the harmonics range for (h_index, harmonic) in enumerate(h_range): idx_out[0] = h_index # Iterate over frequencies for (f_index, frequency) in enumerate(freqs): # Offset the output axis by 1 to account for the harmonic index idx_out[interp_axis] = f_index # Estimate the harmonic energy at this frequency across time harmonic_out[tuple(idx_out)] = f_interp(harmonic * frequency) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
def parse_csv_response(data, unit_handler): """Handle CSV-formatted HTTP responses.""" return squish([parse_csv_dataset(d, unit_handler) for d in data.split(b'\n\n')])
def function[parse_csv_response, parameter[data, unit_handler]]: constant[Handle CSV-formatted HTTP responses.] return[call[name[squish], parameter[<ast.ListComp object at 0x7da1b11c01c0>]]]
keyword[def] identifier[parse_csv_response] ( identifier[data] , identifier[unit_handler] ): literal[string] keyword[return] identifier[squish] ([ identifier[parse_csv_dataset] ( identifier[d] , identifier[unit_handler] ) keyword[for] identifier[d] keyword[in] identifier[data] . identifier[split] ( literal[string] )])
def parse_csv_response(data, unit_handler): """Handle CSV-formatted HTTP responses.""" return squish([parse_csv_dataset(d, unit_handler) for d in data.split(b'\n\n')])
def done(self, msg_handle): """acknowledge completion of message""" self.sqs_client.delete_message( QueueUrl=self.queue_url, ReceiptHandle=msg_handle.handle, )
def function[done, parameter[self, msg_handle]]: constant[acknowledge completion of message] call[name[self].sqs_client.delete_message, parameter[]]
keyword[def] identifier[done] ( identifier[self] , identifier[msg_handle] ): literal[string] identifier[self] . identifier[sqs_client] . identifier[delete_message] ( identifier[QueueUrl] = identifier[self] . identifier[queue_url] , identifier[ReceiptHandle] = identifier[msg_handle] . identifier[handle] , )
def done(self, msg_handle): """acknowledge completion of message""" self.sqs_client.delete_message(QueueUrl=self.queue_url, ReceiptHandle=msg_handle.handle)
def mark(request): """ Handles marking of individual notifications as read or unread. Takes ``notification id`` and mark ``action`` as POST data. :param request: HTTP request context. :returns: Response to mark action of supplied notification ID. """ notification_id = request.POST.get('id', None) action = request.POST.get('action', None) success = True if notification_id: try: notification = Notification.objects.get(pk=notification_id, recipient=request.user) if action == 'read': notification.mark_as_read() msg = _("Marked as read") elif action == 'unread': notification.mark_as_unread() msg = _("Marked as unread") else: success = False msg = _("Invalid mark action.") except Notification.DoesNotExist: success = False msg = _("Notification does not exists.") else: success = False msg = _("Invalid Notification ID") ctx = {'msg': msg, 'success': success, 'action': action} return notification_redirect(request, ctx)
def function[mark, parameter[request]]: constant[ Handles marking of individual notifications as read or unread. Takes ``notification id`` and mark ``action`` as POST data. :param request: HTTP request context. :returns: Response to mark action of supplied notification ID. ] variable[notification_id] assign[=] call[name[request].POST.get, parameter[constant[id], constant[None]]] variable[action] assign[=] call[name[request].POST.get, parameter[constant[action], constant[None]]] variable[success] assign[=] constant[True] if name[notification_id] begin[:] <ast.Try object at 0x7da1b0f38b20> variable[ctx] assign[=] dictionary[[<ast.Constant object at 0x7da1b0d0cee0>, <ast.Constant object at 0x7da1b0d0f7c0>, <ast.Constant object at 0x7da1b0d0e500>], [<ast.Name object at 0x7da1b0d0e470>, <ast.Name object at 0x7da1b0d0ea10>, <ast.Name object at 0x7da1b0d0e980>]] return[call[name[notification_redirect], parameter[name[request], name[ctx]]]]
keyword[def] identifier[mark] ( identifier[request] ): literal[string] identifier[notification_id] = identifier[request] . identifier[POST] . identifier[get] ( literal[string] , keyword[None] ) identifier[action] = identifier[request] . identifier[POST] . identifier[get] ( literal[string] , keyword[None] ) identifier[success] = keyword[True] keyword[if] identifier[notification_id] : keyword[try] : identifier[notification] = identifier[Notification] . identifier[objects] . identifier[get] ( identifier[pk] = identifier[notification_id] , identifier[recipient] = identifier[request] . identifier[user] ) keyword[if] identifier[action] == literal[string] : identifier[notification] . identifier[mark_as_read] () identifier[msg] = identifier[_] ( literal[string] ) keyword[elif] identifier[action] == literal[string] : identifier[notification] . identifier[mark_as_unread] () identifier[msg] = identifier[_] ( literal[string] ) keyword[else] : identifier[success] = keyword[False] identifier[msg] = identifier[_] ( literal[string] ) keyword[except] identifier[Notification] . identifier[DoesNotExist] : identifier[success] = keyword[False] identifier[msg] = identifier[_] ( literal[string] ) keyword[else] : identifier[success] = keyword[False] identifier[msg] = identifier[_] ( literal[string] ) identifier[ctx] ={ literal[string] : identifier[msg] , literal[string] : identifier[success] , literal[string] : identifier[action] } keyword[return] identifier[notification_redirect] ( identifier[request] , identifier[ctx] )
def mark(request): """ Handles marking of individual notifications as read or unread. Takes ``notification id`` and mark ``action`` as POST data. :param request: HTTP request context. :returns: Response to mark action of supplied notification ID. """ notification_id = request.POST.get('id', None) action = request.POST.get('action', None) success = True if notification_id: try: notification = Notification.objects.get(pk=notification_id, recipient=request.user) if action == 'read': notification.mark_as_read() msg = _('Marked as read') # depends on [control=['if'], data=[]] elif action == 'unread': notification.mark_as_unread() msg = _('Marked as unread') # depends on [control=['if'], data=[]] else: success = False msg = _('Invalid mark action.') # depends on [control=['try'], data=[]] except Notification.DoesNotExist: success = False msg = _('Notification does not exists.') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: success = False msg = _('Invalid Notification ID') ctx = {'msg': msg, 'success': success, 'action': action} return notification_redirect(request, ctx)
def _energy_minimize_openbabel(self, tmp_dir, steps=1000, algorithm='cg', forcefield='UFF'): """Perform an energy minimization on a Compound Utilizes Open Babel (http://openbabel.org/docs/dev/) to perform an energy minimization/geometry optimization on a Compound by applying a generic force field. This function is primarily intended to be used on smaller components, with sizes on the order of 10's to 100's of particles, as the energy minimization scales poorly with the number of particles. Parameters ---------- steps : int, optionl, default=1000 The number of optimization iterations algorithm : str, optional, default='cg' The energy minimization algorithm. Valid options are 'steep', 'cg', and 'md', corresponding to steepest descent, conjugate gradient, and equilibrium molecular dynamics respectively. forcefield : str, optional, default='UFF' The generic force field to apply to the Compound for minimization. Valid options are 'MMFF94', 'MMFF94s', ''UFF', 'GAFF', and 'Ghemical'. Please refer to the Open Babel documentation (http://open-babel. readthedocs.io/en/latest/Forcefields/Overview.html) when considering your choice of force field. References ---------- .. [1] O'Boyle, N.M.; Banck, M.; James, C.A.; Morley, C.; Vandermeersch, T.; Hutchison, G.R. "Open Babel: An open chemical toolbox." (2011) J. Cheminf. 3, 33 .. [2] Open Babel, version X.X.X http://openbabel.org, (installed Month Year) If using the 'MMFF94' force field please also cite the following: .. [3] T.A. Halgren, "Merck molecular force field. I. Basis, form, scope, parameterization, and performance of MMFF94." (1996) J. Comput. Chem. 17, 490-519 .. [4] T.A. Halgren, "Merck molecular force field. II. MMFF94 van der Waals and electrostatic parameters for intermolecular interactions." (1996) J. Comput. Chem. 17, 520-552 .. [5] T.A. Halgren, "Merck molecular force field. III. Molecular geometries and vibrational frequencies for MMFF94." (1996) J. Comput. Chem. 17, 553-586 .. [6] T.A. Halgren and R.B. Nachbar, "Merck molecular force field. IV. Conformational energies and geometries for MMFF94." (1996) J. Comput. Chem. 17, 587-615 .. [7] T.A. Halgren, "Merck molecular force field. V. Extension of MMFF94 using experimental data, additional computational data, and empirical rules." (1996) J. Comput. Chem. 17, 616-641 If using the 'MMFF94s' force field please cite the above along with: .. [8] T.A. Halgren, "MMFF VI. MMFF94s option for energy minimization studies." (1999) J. Comput. Chem. 20, 720-729 If using the 'UFF' force field please cite the following: .. [3] Rappe, A.K., Casewit, C.J., Colwell, K.S., Goddard, W.A. III, Skiff, W.M. "UFF, a full periodic table force field for molecular mechanics and molecular dynamics simulations." (1992) J. Am. Chem. Soc. 114, 10024-10039 If using the 'GAFF' force field please cite the following: .. [3] Wang, J., Wolf, R.M., Caldwell, J.W., Kollman, P.A., Case, D.A. "Development and testing of a general AMBER force field" (2004) J. Comput. Chem. 25, 1157-1174 If using the 'Ghemical' force field please cite the following: .. [3] T. Hassinen and M. Perakyla, "New energy terms for reduced protein models implemented in an off-lattice force field" (2001) J. Comput. Chem. 22, 1229-1242 """ openbabel = import_('openbabel') for particle in self.particles(): try: get_by_symbol(particle.name) except KeyError: raise MBuildError("Element name {} not recognized. Cannot " "perform minimization." "".format(particle.name)) obConversion = openbabel.OBConversion() obConversion.SetInAndOutFormats("mol2", "pdb") mol = openbabel.OBMol() obConversion.ReadFile(mol, os.path.join(tmp_dir, "un-minimized.mol2")) ff = openbabel.OBForceField.FindForceField(forcefield) if ff is None: raise MBuildError("Force field '{}' not supported for energy " "minimization. Valid force fields are 'MMFF94', " "'MMFF94s', 'UFF', 'GAFF', and 'Ghemical'." "".format(forcefield)) warn( "Performing energy minimization using the Open Babel package. Please " "refer to the documentation to find the appropriate citations for " "Open Babel and the {} force field".format(forcefield)) ff.Setup(mol) if algorithm == 'steep': ff.SteepestDescent(steps) elif algorithm == 'md': ff.MolecularDynamicsTakeNSteps(steps, 300) elif algorithm == 'cg': ff.ConjugateGradients(steps) else: raise MBuildError("Invalid minimization algorithm. Valid options " "are 'steep', 'cg', and 'md'.") ff.UpdateCoordinates(mol) obConversion.WriteFile(mol, os.path.join(tmp_dir, 'minimized.pdb'))
def function[_energy_minimize_openbabel, parameter[self, tmp_dir, steps, algorithm, forcefield]]: constant[Perform an energy minimization on a Compound Utilizes Open Babel (http://openbabel.org/docs/dev/) to perform an energy minimization/geometry optimization on a Compound by applying a generic force field. This function is primarily intended to be used on smaller components, with sizes on the order of 10's to 100's of particles, as the energy minimization scales poorly with the number of particles. Parameters ---------- steps : int, optionl, default=1000 The number of optimization iterations algorithm : str, optional, default='cg' The energy minimization algorithm. Valid options are 'steep', 'cg', and 'md', corresponding to steepest descent, conjugate gradient, and equilibrium molecular dynamics respectively. forcefield : str, optional, default='UFF' The generic force field to apply to the Compound for minimization. Valid options are 'MMFF94', 'MMFF94s', ''UFF', 'GAFF', and 'Ghemical'. Please refer to the Open Babel documentation (http://open-babel. readthedocs.io/en/latest/Forcefields/Overview.html) when considering your choice of force field. References ---------- .. [1] O'Boyle, N.M.; Banck, M.; James, C.A.; Morley, C.; Vandermeersch, T.; Hutchison, G.R. "Open Babel: An open chemical toolbox." (2011) J. Cheminf. 3, 33 .. [2] Open Babel, version X.X.X http://openbabel.org, (installed Month Year) If using the 'MMFF94' force field please also cite the following: .. [3] T.A. Halgren, "Merck molecular force field. I. Basis, form, scope, parameterization, and performance of MMFF94." (1996) J. Comput. Chem. 17, 490-519 .. [4] T.A. Halgren, "Merck molecular force field. II. MMFF94 van der Waals and electrostatic parameters for intermolecular interactions." (1996) J. Comput. Chem. 17, 520-552 .. [5] T.A. Halgren, "Merck molecular force field. III. Molecular geometries and vibrational frequencies for MMFF94." (1996) J. Comput. Chem. 17, 553-586 .. [6] T.A. Halgren and R.B. Nachbar, "Merck molecular force field. IV. Conformational energies and geometries for MMFF94." (1996) J. Comput. Chem. 17, 587-615 .. [7] T.A. Halgren, "Merck molecular force field. V. Extension of MMFF94 using experimental data, additional computational data, and empirical rules." (1996) J. Comput. Chem. 17, 616-641 If using the 'MMFF94s' force field please cite the above along with: .. [8] T.A. Halgren, "MMFF VI. MMFF94s option for energy minimization studies." (1999) J. Comput. Chem. 20, 720-729 If using the 'UFF' force field please cite the following: .. [3] Rappe, A.K., Casewit, C.J., Colwell, K.S., Goddard, W.A. III, Skiff, W.M. "UFF, a full periodic table force field for molecular mechanics and molecular dynamics simulations." (1992) J. Am. Chem. Soc. 114, 10024-10039 If using the 'GAFF' force field please cite the following: .. [3] Wang, J., Wolf, R.M., Caldwell, J.W., Kollman, P.A., Case, D.A. "Development and testing of a general AMBER force field" (2004) J. Comput. Chem. 25, 1157-1174 If using the 'Ghemical' force field please cite the following: .. [3] T. Hassinen and M. Perakyla, "New energy terms for reduced protein models implemented in an off-lattice force field" (2001) J. Comput. Chem. 22, 1229-1242 ] variable[openbabel] assign[=] call[name[import_], parameter[constant[openbabel]]] for taget[name[particle]] in starred[call[name[self].particles, parameter[]]] begin[:] <ast.Try object at 0x7da1b1d4ba90> variable[obConversion] assign[=] call[name[openbabel].OBConversion, parameter[]] call[name[obConversion].SetInAndOutFormats, parameter[constant[mol2], constant[pdb]]] variable[mol] assign[=] call[name[openbabel].OBMol, parameter[]] call[name[obConversion].ReadFile, parameter[name[mol], call[name[os].path.join, parameter[name[tmp_dir], constant[un-minimized.mol2]]]]] variable[ff] assign[=] call[name[openbabel].OBForceField.FindForceField, parameter[name[forcefield]]] if compare[name[ff] is constant[None]] begin[:] <ast.Raise object at 0x7da1b1d48070> call[name[warn], parameter[call[constant[Performing energy minimization using the Open Babel package. Please refer to the documentation to find the appropriate citations for Open Babel and the {} force field].format, parameter[name[forcefield]]]]] call[name[ff].Setup, parameter[name[mol]]] if compare[name[algorithm] equal[==] constant[steep]] begin[:] call[name[ff].SteepestDescent, parameter[name[steps]]] call[name[ff].UpdateCoordinates, parameter[name[mol]]] call[name[obConversion].WriteFile, parameter[name[mol], call[name[os].path.join, parameter[name[tmp_dir], constant[minimized.pdb]]]]]
keyword[def] identifier[_energy_minimize_openbabel] ( identifier[self] , identifier[tmp_dir] , identifier[steps] = literal[int] , identifier[algorithm] = literal[string] , identifier[forcefield] = literal[string] ): literal[string] identifier[openbabel] = identifier[import_] ( literal[string] ) keyword[for] identifier[particle] keyword[in] identifier[self] . identifier[particles] (): keyword[try] : identifier[get_by_symbol] ( identifier[particle] . identifier[name] ) keyword[except] identifier[KeyError] : keyword[raise] identifier[MBuildError] ( literal[string] literal[string] literal[string] . identifier[format] ( identifier[particle] . identifier[name] )) identifier[obConversion] = identifier[openbabel] . identifier[OBConversion] () identifier[obConversion] . identifier[SetInAndOutFormats] ( literal[string] , literal[string] ) identifier[mol] = identifier[openbabel] . identifier[OBMol] () identifier[obConversion] . identifier[ReadFile] ( identifier[mol] , identifier[os] . identifier[path] . identifier[join] ( identifier[tmp_dir] , literal[string] )) identifier[ff] = identifier[openbabel] . identifier[OBForceField] . identifier[FindForceField] ( identifier[forcefield] ) keyword[if] identifier[ff] keyword[is] keyword[None] : keyword[raise] identifier[MBuildError] ( literal[string] literal[string] literal[string] literal[string] . identifier[format] ( identifier[forcefield] )) identifier[warn] ( literal[string] literal[string] literal[string] . identifier[format] ( identifier[forcefield] )) identifier[ff] . identifier[Setup] ( identifier[mol] ) keyword[if] identifier[algorithm] == literal[string] : identifier[ff] . identifier[SteepestDescent] ( identifier[steps] ) keyword[elif] identifier[algorithm] == literal[string] : identifier[ff] . identifier[MolecularDynamicsTakeNSteps] ( identifier[steps] , literal[int] ) keyword[elif] identifier[algorithm] == literal[string] : identifier[ff] . identifier[ConjugateGradients] ( identifier[steps] ) keyword[else] : keyword[raise] identifier[MBuildError] ( literal[string] literal[string] ) identifier[ff] . identifier[UpdateCoordinates] ( identifier[mol] ) identifier[obConversion] . identifier[WriteFile] ( identifier[mol] , identifier[os] . identifier[path] . identifier[join] ( identifier[tmp_dir] , literal[string] ))
def _energy_minimize_openbabel(self, tmp_dir, steps=1000, algorithm='cg', forcefield='UFF'): """Perform an energy minimization on a Compound Utilizes Open Babel (http://openbabel.org/docs/dev/) to perform an energy minimization/geometry optimization on a Compound by applying a generic force field. This function is primarily intended to be used on smaller components, with sizes on the order of 10's to 100's of particles, as the energy minimization scales poorly with the number of particles. Parameters ---------- steps : int, optionl, default=1000 The number of optimization iterations algorithm : str, optional, default='cg' The energy minimization algorithm. Valid options are 'steep', 'cg', and 'md', corresponding to steepest descent, conjugate gradient, and equilibrium molecular dynamics respectively. forcefield : str, optional, default='UFF' The generic force field to apply to the Compound for minimization. Valid options are 'MMFF94', 'MMFF94s', ''UFF', 'GAFF', and 'Ghemical'. Please refer to the Open Babel documentation (http://open-babel. readthedocs.io/en/latest/Forcefields/Overview.html) when considering your choice of force field. References ---------- .. [1] O'Boyle, N.M.; Banck, M.; James, C.A.; Morley, C.; Vandermeersch, T.; Hutchison, G.R. "Open Babel: An open chemical toolbox." (2011) J. Cheminf. 3, 33 .. [2] Open Babel, version X.X.X http://openbabel.org, (installed Month Year) If using the 'MMFF94' force field please also cite the following: .. [3] T.A. Halgren, "Merck molecular force field. I. Basis, form, scope, parameterization, and performance of MMFF94." (1996) J. Comput. Chem. 17, 490-519 .. [4] T.A. Halgren, "Merck molecular force field. II. MMFF94 van der Waals and electrostatic parameters for intermolecular interactions." (1996) J. Comput. Chem. 17, 520-552 .. [5] T.A. Halgren, "Merck molecular force field. III. Molecular geometries and vibrational frequencies for MMFF94." (1996) J. Comput. Chem. 17, 553-586 .. [6] T.A. Halgren and R.B. Nachbar, "Merck molecular force field. IV. Conformational energies and geometries for MMFF94." (1996) J. Comput. Chem. 17, 587-615 .. [7] T.A. Halgren, "Merck molecular force field. V. Extension of MMFF94 using experimental data, additional computational data, and empirical rules." (1996) J. Comput. Chem. 17, 616-641 If using the 'MMFF94s' force field please cite the above along with: .. [8] T.A. Halgren, "MMFF VI. MMFF94s option for energy minimization studies." (1999) J. Comput. Chem. 20, 720-729 If using the 'UFF' force field please cite the following: .. [3] Rappe, A.K., Casewit, C.J., Colwell, K.S., Goddard, W.A. III, Skiff, W.M. "UFF, a full periodic table force field for molecular mechanics and molecular dynamics simulations." (1992) J. Am. Chem. Soc. 114, 10024-10039 If using the 'GAFF' force field please cite the following: .. [3] Wang, J., Wolf, R.M., Caldwell, J.W., Kollman, P.A., Case, D.A. "Development and testing of a general AMBER force field" (2004) J. Comput. Chem. 25, 1157-1174 If using the 'Ghemical' force field please cite the following: .. [3] T. Hassinen and M. Perakyla, "New energy terms for reduced protein models implemented in an off-lattice force field" (2001) J. Comput. Chem. 22, 1229-1242 """ openbabel = import_('openbabel') for particle in self.particles(): try: get_by_symbol(particle.name) # depends on [control=['try'], data=[]] except KeyError: raise MBuildError('Element name {} not recognized. Cannot perform minimization.'.format(particle.name)) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['particle']] obConversion = openbabel.OBConversion() obConversion.SetInAndOutFormats('mol2', 'pdb') mol = openbabel.OBMol() obConversion.ReadFile(mol, os.path.join(tmp_dir, 'un-minimized.mol2')) ff = openbabel.OBForceField.FindForceField(forcefield) if ff is None: raise MBuildError("Force field '{}' not supported for energy minimization. Valid force fields are 'MMFF94', 'MMFF94s', 'UFF', 'GAFF', and 'Ghemical'.".format(forcefield)) # depends on [control=['if'], data=[]] warn('Performing energy minimization using the Open Babel package. Please refer to the documentation to find the appropriate citations for Open Babel and the {} force field'.format(forcefield)) ff.Setup(mol) if algorithm == 'steep': ff.SteepestDescent(steps) # depends on [control=['if'], data=[]] elif algorithm == 'md': ff.MolecularDynamicsTakeNSteps(steps, 300) # depends on [control=['if'], data=[]] elif algorithm == 'cg': ff.ConjugateGradients(steps) # depends on [control=['if'], data=[]] else: raise MBuildError("Invalid minimization algorithm. Valid options are 'steep', 'cg', and 'md'.") ff.UpdateCoordinates(mol) obConversion.WriteFile(mol, os.path.join(tmp_dir, 'minimized.pdb'))
def modifyReject(LowLayerCompatibility_presence=0, HighLayerCompatibility_presence=0): """MODIFY REJECT Section 9.3.15""" a = TpPd(pd=0x3) b = MessageType(mesType=0x13) # 00010011 c = BearerCapability() d = Cause() packet = a / b / c / d if LowLayerCompatibility_presence is 1: e = LowLayerCompatibilityHdr(ieiLLC=0x7C, eightBitLLC=0x0) packet = packet / e if HighLayerCompatibility_presence is 1: f = HighLayerCompatibilityHdr(ieiHLC=0x7D, eightBitHLC=0x0) packet = packet / f return packet
def function[modifyReject, parameter[LowLayerCompatibility_presence, HighLayerCompatibility_presence]]: constant[MODIFY REJECT Section 9.3.15] variable[a] assign[=] call[name[TpPd], parameter[]] variable[b] assign[=] call[name[MessageType], parameter[]] variable[c] assign[=] call[name[BearerCapability], parameter[]] variable[d] assign[=] call[name[Cause], parameter[]] variable[packet] assign[=] binary_operation[binary_operation[binary_operation[name[a] / name[b]] / name[c]] / name[d]] if compare[name[LowLayerCompatibility_presence] is constant[1]] begin[:] variable[e] assign[=] call[name[LowLayerCompatibilityHdr], parameter[]] variable[packet] assign[=] binary_operation[name[packet] / name[e]] if compare[name[HighLayerCompatibility_presence] is constant[1]] begin[:] variable[f] assign[=] call[name[HighLayerCompatibilityHdr], parameter[]] variable[packet] assign[=] binary_operation[name[packet] / name[f]] return[name[packet]]
keyword[def] identifier[modifyReject] ( identifier[LowLayerCompatibility_presence] = literal[int] , identifier[HighLayerCompatibility_presence] = literal[int] ): literal[string] identifier[a] = identifier[TpPd] ( identifier[pd] = literal[int] ) identifier[b] = identifier[MessageType] ( identifier[mesType] = literal[int] ) identifier[c] = identifier[BearerCapability] () identifier[d] = identifier[Cause] () identifier[packet] = identifier[a] / identifier[b] / identifier[c] / identifier[d] keyword[if] identifier[LowLayerCompatibility_presence] keyword[is] literal[int] : identifier[e] = identifier[LowLayerCompatibilityHdr] ( identifier[ieiLLC] = literal[int] , identifier[eightBitLLC] = literal[int] ) identifier[packet] = identifier[packet] / identifier[e] keyword[if] identifier[HighLayerCompatibility_presence] keyword[is] literal[int] : identifier[f] = identifier[HighLayerCompatibilityHdr] ( identifier[ieiHLC] = literal[int] , identifier[eightBitHLC] = literal[int] ) identifier[packet] = identifier[packet] / identifier[f] keyword[return] identifier[packet]
def modifyReject(LowLayerCompatibility_presence=0, HighLayerCompatibility_presence=0): """MODIFY REJECT Section 9.3.15""" a = TpPd(pd=3) b = MessageType(mesType=19) # 00010011 c = BearerCapability() d = Cause() packet = a / b / c / d if LowLayerCompatibility_presence is 1: e = LowLayerCompatibilityHdr(ieiLLC=124, eightBitLLC=0) packet = packet / e # depends on [control=['if'], data=[]] if HighLayerCompatibility_presence is 1: f = HighLayerCompatibilityHdr(ieiHLC=125, eightBitHLC=0) packet = packet / f # depends on [control=['if'], data=[]] return packet
def create_database(self, database): """Create a database on the InfluxDB server. :param database: the name of the database to create :type database: string :rtype: boolean """ url = "db" data = {'name': database} self.request( url=url, method='POST', data=data, expected_response_code=201 ) return True
def function[create_database, parameter[self, database]]: constant[Create a database on the InfluxDB server. :param database: the name of the database to create :type database: string :rtype: boolean ] variable[url] assign[=] constant[db] variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b1883a60>], [<ast.Name object at 0x7da1b1881300>]] call[name[self].request, parameter[]] return[constant[True]]
keyword[def] identifier[create_database] ( identifier[self] , identifier[database] ): literal[string] identifier[url] = literal[string] identifier[data] ={ literal[string] : identifier[database] } identifier[self] . identifier[request] ( identifier[url] = identifier[url] , identifier[method] = literal[string] , identifier[data] = identifier[data] , identifier[expected_response_code] = literal[int] ) keyword[return] keyword[True]
def create_database(self, database): """Create a database on the InfluxDB server. :param database: the name of the database to create :type database: string :rtype: boolean """ url = 'db' data = {'name': database} self.request(url=url, method='POST', data=data, expected_response_code=201) return True
def addUnexpectedSuccess(self, test): """ registers a test as an unexpected success :param test: test to register """ super().addUnexpectedSuccess(test) self.test_info(test) self._call_test_results('addUnexpectedSuccess', test)
def function[addUnexpectedSuccess, parameter[self, test]]: constant[ registers a test as an unexpected success :param test: test to register ] call[call[name[super], parameter[]].addUnexpectedSuccess, parameter[name[test]]] call[name[self].test_info, parameter[name[test]]] call[name[self]._call_test_results, parameter[constant[addUnexpectedSuccess], name[test]]]
keyword[def] identifier[addUnexpectedSuccess] ( identifier[self] , identifier[test] ): literal[string] identifier[super] (). identifier[addUnexpectedSuccess] ( identifier[test] ) identifier[self] . identifier[test_info] ( identifier[test] ) identifier[self] . identifier[_call_test_results] ( literal[string] , identifier[test] )
def addUnexpectedSuccess(self, test): """ registers a test as an unexpected success :param test: test to register """ super().addUnexpectedSuccess(test) self.test_info(test) self._call_test_results('addUnexpectedSuccess', test)
def _send_textmetrics(metrics): ''' Format metrics for the carbon plaintext protocol ''' data = [' '.join(map(six.text_type, metric)) for metric in metrics] + [''] return '\n'.join(data)
def function[_send_textmetrics, parameter[metrics]]: constant[ Format metrics for the carbon plaintext protocol ] variable[data] assign[=] binary_operation[<ast.ListComp object at 0x7da18f58df30> + list[[<ast.Constant object at 0x7da18f58db10>]]] return[call[constant[ ].join, parameter[name[data]]]]
keyword[def] identifier[_send_textmetrics] ( identifier[metrics] ): literal[string] identifier[data] =[ literal[string] . identifier[join] ( identifier[map] ( identifier[six] . identifier[text_type] , identifier[metric] )) keyword[for] identifier[metric] keyword[in] identifier[metrics] ]+[ literal[string] ] keyword[return] literal[string] . identifier[join] ( identifier[data] )
def _send_textmetrics(metrics): """ Format metrics for the carbon plaintext protocol """ data = [' '.join(map(six.text_type, metric)) for metric in metrics] + [''] return '\n'.join(data)
def is_mouse_over(self, event, include_label=True, width_modifier=0): """ Check if the specified mouse event is over this widget. :param event: The MouseEvent to check. :param include_label: Include space reserved for the label when checking. :param width_modifier: Adjustement to width (e.g. for scroll bars). :returns: True if the mouse is over the active parts of the widget. """ # Disabled widgets should not react to the mouse. logger.debug("Widget: %s (%d, %d) (%d, %d)", self, self._x, self._y, self._w, self._h) if self._is_disabled: return False # Check for any overlap if self._y <= event.y < self._y + self._h: if ((include_label and self._x <= event.x < self._x + self._w - width_modifier) or (self._x + self._offset <= event.x < self._x + self._w - width_modifier)): return True return False
def function[is_mouse_over, parameter[self, event, include_label, width_modifier]]: constant[ Check if the specified mouse event is over this widget. :param event: The MouseEvent to check. :param include_label: Include space reserved for the label when checking. :param width_modifier: Adjustement to width (e.g. for scroll bars). :returns: True if the mouse is over the active parts of the widget. ] call[name[logger].debug, parameter[constant[Widget: %s (%d, %d) (%d, %d)], name[self], name[self]._x, name[self]._y, name[self]._w, name[self]._h]] if name[self]._is_disabled begin[:] return[constant[False]] if compare[name[self]._y less_or_equal[<=] name[event].y] begin[:] if <ast.BoolOp object at 0x7da1b1d52cb0> begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[is_mouse_over] ( identifier[self] , identifier[event] , identifier[include_label] = keyword[True] , identifier[width_modifier] = literal[int] ): literal[string] identifier[logger] . identifier[debug] ( literal[string] , identifier[self] , identifier[self] . identifier[_x] , identifier[self] . identifier[_y] , identifier[self] . identifier[_w] , identifier[self] . identifier[_h] ) keyword[if] identifier[self] . identifier[_is_disabled] : keyword[return] keyword[False] keyword[if] identifier[self] . identifier[_y] <= identifier[event] . identifier[y] < identifier[self] . identifier[_y] + identifier[self] . identifier[_h] : keyword[if] (( identifier[include_label] keyword[and] identifier[self] . identifier[_x] <= identifier[event] . identifier[x] < identifier[self] . identifier[_x] + identifier[self] . identifier[_w] - identifier[width_modifier] ) keyword[or] ( identifier[self] . identifier[_x] + identifier[self] . identifier[_offset] <= identifier[event] . identifier[x] < identifier[self] . identifier[_x] + identifier[self] . identifier[_w] - identifier[width_modifier] )): keyword[return] keyword[True] keyword[return] keyword[False]
def is_mouse_over(self, event, include_label=True, width_modifier=0): """ Check if the specified mouse event is over this widget. :param event: The MouseEvent to check. :param include_label: Include space reserved for the label when checking. :param width_modifier: Adjustement to width (e.g. for scroll bars). :returns: True if the mouse is over the active parts of the widget. """ # Disabled widgets should not react to the mouse. logger.debug('Widget: %s (%d, %d) (%d, %d)', self, self._x, self._y, self._w, self._h) if self._is_disabled: return False # depends on [control=['if'], data=[]] # Check for any overlap if self._y <= event.y < self._y + self._h: if include_label and self._x <= event.x < self._x + self._w - width_modifier or self._x + self._offset <= event.x < self._x + self._w - width_modifier: return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return False
def remove(name=None, pkgs=None, **kwargs): ''' Removes packages with ``brew uninstall``. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ''' try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, **kwargs )[0] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() targets = [x for x in pkg_params if x in old] if not targets: return {} cmd = 'uninstall {0}'.format(' '.join(targets)) out = _call_brew(cmd) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered removing package(s)', info={'errors': errors, 'changes': ret} ) return ret
def function[remove, parameter[name, pkgs]]: constant[ Removes packages with ``brew uninstall``. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ] <ast.Try object at 0x7da1b1f8ecb0> variable[old] assign[=] call[name[list_pkgs], parameter[]] variable[targets] assign[=] <ast.ListComp object at 0x7da1b1f66950> if <ast.UnaryOp object at 0x7da1b1f64280> begin[:] return[dictionary[[], []]] variable[cmd] assign[=] call[constant[uninstall {0}].format, parameter[call[constant[ ].join, parameter[name[targets]]]]] variable[out] assign[=] call[name[_call_brew], parameter[name[cmd]]] if <ast.BoolOp object at 0x7da1b1f65bd0> begin[:] variable[errors] assign[=] list[[<ast.Subscript object at 0x7da1b1f665c0>]] call[name[__context__].pop, parameter[constant[pkg.list_pkgs], constant[None]]] variable[new] assign[=] call[name[list_pkgs], parameter[]] variable[ret] assign[=] call[name[salt].utils.data.compare_dicts, parameter[name[old], name[new]]] if name[errors] begin[:] <ast.Raise object at 0x7da1b1f67340> return[name[ret]]
keyword[def] identifier[remove] ( identifier[name] = keyword[None] , identifier[pkgs] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[try] : identifier[pkg_params] = identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[pkgs] ,** identifier[kwargs] )[ literal[int] ] keyword[except] identifier[MinionError] keyword[as] identifier[exc] : keyword[raise] identifier[CommandExecutionError] ( identifier[exc] ) identifier[old] = identifier[list_pkgs] () identifier[targets] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[pkg_params] keyword[if] identifier[x] keyword[in] identifier[old] ] keyword[if] keyword[not] identifier[targets] : keyword[return] {} identifier[cmd] = literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[targets] )) identifier[out] = identifier[_call_brew] ( identifier[cmd] ) keyword[if] identifier[out] [ literal[string] ]!= literal[int] keyword[and] identifier[out] [ literal[string] ]: identifier[errors] =[ identifier[out] [ literal[string] ]] keyword[else] : identifier[errors] =[] identifier[__context__] . identifier[pop] ( literal[string] , keyword[None] ) identifier[new] = identifier[list_pkgs] () identifier[ret] = identifier[salt] . identifier[utils] . identifier[data] . identifier[compare_dicts] ( identifier[old] , identifier[new] ) keyword[if] identifier[errors] : keyword[raise] identifier[CommandExecutionError] ( literal[string] , identifier[info] ={ literal[string] : identifier[errors] , literal[string] : identifier[ret] } ) keyword[return] identifier[ret]
def remove(name=None, pkgs=None, **kwargs): """ Removes packages with ``brew uninstall``. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' """ try: pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs, **kwargs)[0] # depends on [control=['try'], data=[]] except MinionError as exc: raise CommandExecutionError(exc) # depends on [control=['except'], data=['exc']] old = list_pkgs() targets = [x for x in pkg_params if x in old] if not targets: return {} # depends on [control=['if'], data=[]] cmd = 'uninstall {0}'.format(' '.join(targets)) out = _call_brew(cmd) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] # depends on [control=['if'], data=[]] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError('Problem encountered removing package(s)', info={'errors': errors, 'changes': ret}) # depends on [control=['if'], data=[]] return ret
def _clipboard(self, pri=False): """ C """ # Copy highlighted url to clipboard fpo = self.top.body.focus_position url_idx = len([i for i in self.items[:fpo + 1] if isinstance(i, urwid.Columns)]) - 1 if self.compact is False and fpo <= 1: return url = self.urls[url_idx] if pri is True: cmds = ("xsel -i", "xclip -i") else: cmds = ("xsel -ib", "xclip -i -selection clipboard") for cmd in cmds: try: proc = Popen(shlex.split(cmd), stdin=PIPE) proc.communicate(input=url.encode(sys.getdefaultencoding())) self._footer_start_thread("Copied url to {} selection".format( "primary" if pri is True else "clipboard"), 5) except OSError: continue break
def function[_clipboard, parameter[self, pri]]: constant[ C ] variable[fpo] assign[=] name[self].top.body.focus_position variable[url_idx] assign[=] binary_operation[call[name[len], parameter[<ast.ListComp object at 0x7da20c76fb80>]] - constant[1]] if <ast.BoolOp object at 0x7da20c76c7f0> begin[:] return[None] variable[url] assign[=] call[name[self].urls][name[url_idx]] if compare[name[pri] is constant[True]] begin[:] variable[cmds] assign[=] tuple[[<ast.Constant object at 0x7da20c76fd90>, <ast.Constant object at 0x7da20c76e3b0>]] for taget[name[cmd]] in starred[name[cmds]] begin[:] <ast.Try object at 0x7da20c76e0e0> break
keyword[def] identifier[_clipboard] ( identifier[self] , identifier[pri] = keyword[False] ): literal[string] identifier[fpo] = identifier[self] . identifier[top] . identifier[body] . identifier[focus_position] identifier[url_idx] = identifier[len] ([ identifier[i] keyword[for] identifier[i] keyword[in] identifier[self] . identifier[items] [: identifier[fpo] + literal[int] ] keyword[if] identifier[isinstance] ( identifier[i] , identifier[urwid] . identifier[Columns] )])- literal[int] keyword[if] identifier[self] . identifier[compact] keyword[is] keyword[False] keyword[and] identifier[fpo] <= literal[int] : keyword[return] identifier[url] = identifier[self] . identifier[urls] [ identifier[url_idx] ] keyword[if] identifier[pri] keyword[is] keyword[True] : identifier[cmds] =( literal[string] , literal[string] ) keyword[else] : identifier[cmds] =( literal[string] , literal[string] ) keyword[for] identifier[cmd] keyword[in] identifier[cmds] : keyword[try] : identifier[proc] = identifier[Popen] ( identifier[shlex] . identifier[split] ( identifier[cmd] ), identifier[stdin] = identifier[PIPE] ) identifier[proc] . identifier[communicate] ( identifier[input] = identifier[url] . identifier[encode] ( identifier[sys] . identifier[getdefaultencoding] ())) identifier[self] . identifier[_footer_start_thread] ( literal[string] . identifier[format] ( literal[string] keyword[if] identifier[pri] keyword[is] keyword[True] keyword[else] literal[string] ), literal[int] ) keyword[except] identifier[OSError] : keyword[continue] keyword[break]
def _clipboard(self, pri=False): """ C """ # Copy highlighted url to clipboard fpo = self.top.body.focus_position url_idx = len([i for i in self.items[:fpo + 1] if isinstance(i, urwid.Columns)]) - 1 if self.compact is False and fpo <= 1: return # depends on [control=['if'], data=[]] url = self.urls[url_idx] if pri is True: cmds = ('xsel -i', 'xclip -i') # depends on [control=['if'], data=[]] else: cmds = ('xsel -ib', 'xclip -i -selection clipboard') for cmd in cmds: try: proc = Popen(shlex.split(cmd), stdin=PIPE) proc.communicate(input=url.encode(sys.getdefaultencoding())) self._footer_start_thread('Copied url to {} selection'.format('primary' if pri is True else 'clipboard'), 5) # depends on [control=['try'], data=[]] except OSError: continue # depends on [control=['except'], data=[]] break # depends on [control=['for'], data=['cmd']]
def search(path: str, data: typing.Union[list, dict], default=_UNSET, smart_unique: bool=True) -> typing.Union[int, float, bool, str, list, dict, None]: """ when not found: if raise_not_found is true, raise NotFoundError, else return default value. """ expr = parse(path) resp = expr.find(data) if not resp: if default is _UNSET: raise NotFoundError("Can't find by path: {}".format(path)) else: return default if len(resp) == 1 and smart_unique: return resp[0].value else: return [match.value for match in resp]
def function[search, parameter[path, data, default, smart_unique]]: constant[ when not found: if raise_not_found is true, raise NotFoundError, else return default value. ] variable[expr] assign[=] call[name[parse], parameter[name[path]]] variable[resp] assign[=] call[name[expr].find, parameter[name[data]]] if <ast.UnaryOp object at 0x7da1b0a493c0> begin[:] if compare[name[default] is name[_UNSET]] begin[:] <ast.Raise object at 0x7da1b0a4b700> if <ast.BoolOp object at 0x7da1b0a48e80> begin[:] return[call[name[resp]][constant[0]].value]
keyword[def] identifier[search] ( identifier[path] : identifier[str] , identifier[data] : identifier[typing] . identifier[Union] [ identifier[list] , identifier[dict] ], identifier[default] = identifier[_UNSET] , identifier[smart_unique] : identifier[bool] = keyword[True] )-> identifier[typing] . identifier[Union] [ identifier[int] , identifier[float] , identifier[bool] , identifier[str] , identifier[list] , identifier[dict] , keyword[None] ]: literal[string] identifier[expr] = identifier[parse] ( identifier[path] ) identifier[resp] = identifier[expr] . identifier[find] ( identifier[data] ) keyword[if] keyword[not] identifier[resp] : keyword[if] identifier[default] keyword[is] identifier[_UNSET] : keyword[raise] identifier[NotFoundError] ( literal[string] . identifier[format] ( identifier[path] )) keyword[else] : keyword[return] identifier[default] keyword[if] identifier[len] ( identifier[resp] )== literal[int] keyword[and] identifier[smart_unique] : keyword[return] identifier[resp] [ literal[int] ]. identifier[value] keyword[else] : keyword[return] [ identifier[match] . identifier[value] keyword[for] identifier[match] keyword[in] identifier[resp] ]
def search(path: str, data: typing.Union[list, dict], default=_UNSET, smart_unique: bool=True) -> typing.Union[int, float, bool, str, list, dict, None]: """ when not found: if raise_not_found is true, raise NotFoundError, else return default value. """ expr = parse(path) resp = expr.find(data) if not resp: if default is _UNSET: raise NotFoundError("Can't find by path: {}".format(path)) # depends on [control=['if'], data=[]] else: return default # depends on [control=['if'], data=[]] if len(resp) == 1 and smart_unique: return resp[0].value # depends on [control=['if'], data=[]] else: return [match.value for match in resp]
def _parser_fsm(self): """An FSM implemented as a coroutine. This generator is not the most beautiful, but it is as performant as possible. When a process generates a lot of output, then this will be the bottleneck, because it processes just one character at a time. Don't change anything without profiling first. """ basic = self.basic listener = self.listener draw = listener.draw debug = listener.debug ESC, CSI_C1 = ctrl.ESC, ctrl.CSI_C1 OSC_C1 = ctrl.OSC_C1 SP_OR_GT = ctrl.SP + ">" NUL_OR_DEL = ctrl.NUL + ctrl.DEL CAN_OR_SUB = ctrl.CAN + ctrl.SUB ALLOWED_IN_CSI = "".join([ctrl.BEL, ctrl.BS, ctrl.HT, ctrl.LF, ctrl.VT, ctrl.FF, ctrl.CR]) OSC_TERMINATORS = set([ctrl.ST_C0, ctrl.ST_C1, ctrl.BEL]) def create_dispatcher(mapping): return defaultdict(lambda: debug, dict( (event, getattr(listener, attr)) for event, attr in mapping.items())) basic_dispatch = create_dispatcher(basic) sharp_dispatch = create_dispatcher(self.sharp) escape_dispatch = create_dispatcher(self.escape) csi_dispatch = create_dispatcher(self.csi) while True: # ``True`` tells ``Screen.feed`` that it is allowed to send # chunks of plain text directly to the listener, instead # of this generator. char = yield True if char == ESC: # Most non-VT52 commands start with a left-bracket after the # escape and then a stream of parameters and a command; with # a single notable exception -- :data:`escape.DECOM` sequence, # which starts with a sharp. # # .. versionchanged:: 0.4.10 # # For compatibility with Linux terminal stream also # recognizes ``ESC % C`` sequences for selecting control # character set. However, in the current version these # are noop. char = yield if char == "[": char = CSI_C1 # Go to CSI. elif char == "]": char = OSC_C1 # Go to OSC. else: if char == "#": sharp_dispatch[(yield)]() elif char == "%": self.select_other_charset((yield)) elif char in "()": code = yield if self.use_utf8: continue # See http://www.cl.cam.ac.uk/~mgk25/unicode.html#term # for the why on the UTF-8 restriction. listener.define_charset(code, mode=char) else: escape_dispatch[char]() continue # Don't go to CSI. if char in basic: # Ignore shifts in UTF-8 mode. See # http://www.cl.cam.ac.uk/~mgk25/unicode.html#term for # the why on UTF-8 restriction. if (char == ctrl.SI or char == ctrl.SO) and self.use_utf8: continue basic_dispatch[char]() elif char == CSI_C1: # All parameters are unsigned, positive decimal integers, with # the most significant digit sent first. Any parameter greater # than 9999 is set to 9999. If you do not specify a value, a 0 # value is assumed. # # .. seealso:: # # `VT102 User Guide <http://vt100.net/docs/vt102-ug/>`_ # For details on the formatting of escape arguments. # # `VT220 Programmer Ref. <http://vt100.net/docs/vt220-rm/>`_ # For details on the characters valid for use as # arguments. params = [] current = "" private = False while True: char = yield if char == "?": private = True elif char in ALLOWED_IN_CSI: basic_dispatch[char]() elif char in SP_OR_GT: pass # Secondary DA is not supported atm. elif char in CAN_OR_SUB: # If CAN or SUB is received during a sequence, the # current sequence is aborted; terminal displays # the substitute character, followed by characters # in the sequence received after CAN or SUB. draw(char) break elif char.isdigit(): current += char elif char == "$": # XTerm-specific ESC]...$[a-z] sequences are not # currently supported. yield break else: params.append(min(int(current or 0), 9999)) if char == ";": current = "" else: if private: csi_dispatch[char](*params, private=True) else: csi_dispatch[char](*params) break # CSI is finished. elif char == OSC_C1: code = yield if code == "R": continue # Reset palette. Not implemented. elif code == "P": continue # Set palette. Not implemented. param = "" while True: char = yield if char == ESC: char += yield if char in OSC_TERMINATORS: break else: param += char param = param[1:] # Drop the ;. if code in "01": listener.set_icon_name(param) if code in "02": listener.set_title(param) elif char not in NUL_OR_DEL: draw(char)
def function[_parser_fsm, parameter[self]]: constant[An FSM implemented as a coroutine. This generator is not the most beautiful, but it is as performant as possible. When a process generates a lot of output, then this will be the bottleneck, because it processes just one character at a time. Don't change anything without profiling first. ] variable[basic] assign[=] name[self].basic variable[listener] assign[=] name[self].listener variable[draw] assign[=] name[listener].draw variable[debug] assign[=] name[listener].debug <ast.Tuple object at 0x7da1b07a9840> assign[=] tuple[[<ast.Attribute object at 0x7da1b07a8ac0>, <ast.Attribute object at 0x7da1b07a95a0>]] variable[OSC_C1] assign[=] name[ctrl].OSC_C1 variable[SP_OR_GT] assign[=] binary_operation[name[ctrl].SP + constant[>]] variable[NUL_OR_DEL] assign[=] binary_operation[name[ctrl].NUL + name[ctrl].DEL] variable[CAN_OR_SUB] assign[=] binary_operation[name[ctrl].CAN + name[ctrl].SUB] variable[ALLOWED_IN_CSI] assign[=] call[constant[].join, parameter[list[[<ast.Attribute object at 0x7da1b07abb50>, <ast.Attribute object at 0x7da1b07a9480>, <ast.Attribute object at 0x7da1b07a9a80>, <ast.Attribute object at 0x7da1b07a9c90>, <ast.Attribute object at 0x7da1b07aabc0>, <ast.Attribute object at 0x7da1b07a8bb0>, <ast.Attribute object at 0x7da1b07ab0d0>]]]] variable[OSC_TERMINATORS] assign[=] call[name[set], parameter[list[[<ast.Attribute object at 0x7da1b07a9750>, <ast.Attribute object at 0x7da1b07ab580>, <ast.Attribute object at 0x7da1b07a8760>]]]] def function[create_dispatcher, parameter[mapping]]: return[call[name[defaultdict], parameter[<ast.Lambda object at 0x7da1b07aaa10>, call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b07abdf0>]]]]] variable[basic_dispatch] assign[=] call[name[create_dispatcher], parameter[name[basic]]] variable[sharp_dispatch] assign[=] call[name[create_dispatcher], parameter[name[self].sharp]] variable[escape_dispatch] assign[=] call[name[create_dispatcher], parameter[name[self].escape]] variable[csi_dispatch] assign[=] call[name[create_dispatcher], parameter[name[self].csi]] while constant[True] begin[:] variable[char] assign[=] <ast.Yield object at 0x7da1b07b9660> if compare[name[char] equal[==] name[ESC]] begin[:] variable[char] assign[=] <ast.Yield object at 0x7da1b07ba260> if compare[name[char] equal[==] constant[[]] begin[:] variable[char] assign[=] name[CSI_C1] if compare[name[char] in name[basic]] begin[:] if <ast.BoolOp object at 0x7da1b07b8130> begin[:] continue call[call[name[basic_dispatch]][name[char]], parameter[]]
keyword[def] identifier[_parser_fsm] ( identifier[self] ): literal[string] identifier[basic] = identifier[self] . identifier[basic] identifier[listener] = identifier[self] . identifier[listener] identifier[draw] = identifier[listener] . identifier[draw] identifier[debug] = identifier[listener] . identifier[debug] identifier[ESC] , identifier[CSI_C1] = identifier[ctrl] . identifier[ESC] , identifier[ctrl] . identifier[CSI_C1] identifier[OSC_C1] = identifier[ctrl] . identifier[OSC_C1] identifier[SP_OR_GT] = identifier[ctrl] . identifier[SP] + literal[string] identifier[NUL_OR_DEL] = identifier[ctrl] . identifier[NUL] + identifier[ctrl] . identifier[DEL] identifier[CAN_OR_SUB] = identifier[ctrl] . identifier[CAN] + identifier[ctrl] . identifier[SUB] identifier[ALLOWED_IN_CSI] = literal[string] . identifier[join] ([ identifier[ctrl] . identifier[BEL] , identifier[ctrl] . identifier[BS] , identifier[ctrl] . identifier[HT] , identifier[ctrl] . identifier[LF] , identifier[ctrl] . identifier[VT] , identifier[ctrl] . identifier[FF] , identifier[ctrl] . identifier[CR] ]) identifier[OSC_TERMINATORS] = identifier[set] ([ identifier[ctrl] . identifier[ST_C0] , identifier[ctrl] . identifier[ST_C1] , identifier[ctrl] . identifier[BEL] ]) keyword[def] identifier[create_dispatcher] ( identifier[mapping] ): keyword[return] identifier[defaultdict] ( keyword[lambda] : identifier[debug] , identifier[dict] ( ( identifier[event] , identifier[getattr] ( identifier[listener] , identifier[attr] )) keyword[for] identifier[event] , identifier[attr] keyword[in] identifier[mapping] . identifier[items] ())) identifier[basic_dispatch] = identifier[create_dispatcher] ( identifier[basic] ) identifier[sharp_dispatch] = identifier[create_dispatcher] ( identifier[self] . identifier[sharp] ) identifier[escape_dispatch] = identifier[create_dispatcher] ( identifier[self] . identifier[escape] ) identifier[csi_dispatch] = identifier[create_dispatcher] ( identifier[self] . identifier[csi] ) keyword[while] keyword[True] : identifier[char] = keyword[yield] keyword[True] keyword[if] identifier[char] == identifier[ESC] : identifier[char] = keyword[yield] keyword[if] identifier[char] == literal[string] : identifier[char] = identifier[CSI_C1] keyword[elif] identifier[char] == literal[string] : identifier[char] = identifier[OSC_C1] keyword[else] : keyword[if] identifier[char] == literal[string] : identifier[sharp_dispatch] [( keyword[yield] )]() keyword[elif] identifier[char] == literal[string] : identifier[self] . identifier[select_other_charset] (( keyword[yield] )) keyword[elif] identifier[char] keyword[in] literal[string] : identifier[code] = keyword[yield] keyword[if] identifier[self] . identifier[use_utf8] : keyword[continue] identifier[listener] . identifier[define_charset] ( identifier[code] , identifier[mode] = identifier[char] ) keyword[else] : identifier[escape_dispatch] [ identifier[char] ]() keyword[continue] keyword[if] identifier[char] keyword[in] identifier[basic] : keyword[if] ( identifier[char] == identifier[ctrl] . identifier[SI] keyword[or] identifier[char] == identifier[ctrl] . identifier[SO] ) keyword[and] identifier[self] . identifier[use_utf8] : keyword[continue] identifier[basic_dispatch] [ identifier[char] ]() keyword[elif] identifier[char] == identifier[CSI_C1] : identifier[params] =[] identifier[current] = literal[string] identifier[private] = keyword[False] keyword[while] keyword[True] : identifier[char] = keyword[yield] keyword[if] identifier[char] == literal[string] : identifier[private] = keyword[True] keyword[elif] identifier[char] keyword[in] identifier[ALLOWED_IN_CSI] : identifier[basic_dispatch] [ identifier[char] ]() keyword[elif] identifier[char] keyword[in] identifier[SP_OR_GT] : keyword[pass] keyword[elif] identifier[char] keyword[in] identifier[CAN_OR_SUB] : identifier[draw] ( identifier[char] ) keyword[break] keyword[elif] identifier[char] . identifier[isdigit] (): identifier[current] += identifier[char] keyword[elif] identifier[char] == literal[string] : keyword[yield] keyword[break] keyword[else] : identifier[params] . identifier[append] ( identifier[min] ( identifier[int] ( identifier[current] keyword[or] literal[int] ), literal[int] )) keyword[if] identifier[char] == literal[string] : identifier[current] = literal[string] keyword[else] : keyword[if] identifier[private] : identifier[csi_dispatch] [ identifier[char] ](* identifier[params] , identifier[private] = keyword[True] ) keyword[else] : identifier[csi_dispatch] [ identifier[char] ](* identifier[params] ) keyword[break] keyword[elif] identifier[char] == identifier[OSC_C1] : identifier[code] = keyword[yield] keyword[if] identifier[code] == literal[string] : keyword[continue] keyword[elif] identifier[code] == literal[string] : keyword[continue] identifier[param] = literal[string] keyword[while] keyword[True] : identifier[char] = keyword[yield] keyword[if] identifier[char] == identifier[ESC] : identifier[char] += keyword[yield] keyword[if] identifier[char] keyword[in] identifier[OSC_TERMINATORS] : keyword[break] keyword[else] : identifier[param] += identifier[char] identifier[param] = identifier[param] [ literal[int] :] keyword[if] identifier[code] keyword[in] literal[string] : identifier[listener] . identifier[set_icon_name] ( identifier[param] ) keyword[if] identifier[code] keyword[in] literal[string] : identifier[listener] . identifier[set_title] ( identifier[param] ) keyword[elif] identifier[char] keyword[not] keyword[in] identifier[NUL_OR_DEL] : identifier[draw] ( identifier[char] )
def _parser_fsm(self): """An FSM implemented as a coroutine. This generator is not the most beautiful, but it is as performant as possible. When a process generates a lot of output, then this will be the bottleneck, because it processes just one character at a time. Don't change anything without profiling first. """ basic = self.basic listener = self.listener draw = listener.draw debug = listener.debug (ESC, CSI_C1) = (ctrl.ESC, ctrl.CSI_C1) OSC_C1 = ctrl.OSC_C1 SP_OR_GT = ctrl.SP + '>' NUL_OR_DEL = ctrl.NUL + ctrl.DEL CAN_OR_SUB = ctrl.CAN + ctrl.SUB ALLOWED_IN_CSI = ''.join([ctrl.BEL, ctrl.BS, ctrl.HT, ctrl.LF, ctrl.VT, ctrl.FF, ctrl.CR]) OSC_TERMINATORS = set([ctrl.ST_C0, ctrl.ST_C1, ctrl.BEL]) def create_dispatcher(mapping): return defaultdict(lambda : debug, dict(((event, getattr(listener, attr)) for (event, attr) in mapping.items()))) basic_dispatch = create_dispatcher(basic) sharp_dispatch = create_dispatcher(self.sharp) escape_dispatch = create_dispatcher(self.escape) csi_dispatch = create_dispatcher(self.csi) while True: # ``True`` tells ``Screen.feed`` that it is allowed to send # chunks of plain text directly to the listener, instead # of this generator. char = (yield True) if char == ESC: # Most non-VT52 commands start with a left-bracket after the # escape and then a stream of parameters and a command; with # a single notable exception -- :data:`escape.DECOM` sequence, # which starts with a sharp. # # .. versionchanged:: 0.4.10 # # For compatibility with Linux terminal stream also # recognizes ``ESC % C`` sequences for selecting control # character set. However, in the current version these # are noop. char = (yield) if char == '[': char = CSI_C1 # Go to CSI. # depends on [control=['if'], data=['char']] elif char == ']': char = OSC_C1 # Go to OSC. # depends on [control=['if'], data=['char']] else: if char == '#': sharp_dispatch[(yield)]() # depends on [control=['if'], data=[]] elif char == '%': self.select_other_charset((yield)) # depends on [control=['if'], data=[]] elif char in '()': code = (yield) if self.use_utf8: continue # depends on [control=['if'], data=[]] # See http://www.cl.cam.ac.uk/~mgk25/unicode.html#term # for the why on the UTF-8 restriction. listener.define_charset(code, mode=char) # depends on [control=['if'], data=['char']] else: escape_dispatch[char]() continue # Don't go to CSI. # depends on [control=['if'], data=['char']] if char in basic: # Ignore shifts in UTF-8 mode. See # http://www.cl.cam.ac.uk/~mgk25/unicode.html#term for # the why on UTF-8 restriction. if (char == ctrl.SI or char == ctrl.SO) and self.use_utf8: continue # depends on [control=['if'], data=[]] basic_dispatch[char]() # depends on [control=['if'], data=['char']] elif char == CSI_C1: # All parameters are unsigned, positive decimal integers, with # the most significant digit sent first. Any parameter greater # than 9999 is set to 9999. If you do not specify a value, a 0 # value is assumed. # # .. seealso:: # # `VT102 User Guide <http://vt100.net/docs/vt102-ug/>`_ # For details on the formatting of escape arguments. # # `VT220 Programmer Ref. <http://vt100.net/docs/vt220-rm/>`_ # For details on the characters valid for use as # arguments. params = [] current = '' private = False while True: char = (yield) if char == '?': private = True # depends on [control=['if'], data=[]] elif char in ALLOWED_IN_CSI: basic_dispatch[char]() # depends on [control=['if'], data=['char']] elif char in SP_OR_GT: pass # Secondary DA is not supported atm. # depends on [control=['if'], data=[]] elif char in CAN_OR_SUB: # If CAN or SUB is received during a sequence, the # current sequence is aborted; terminal displays # the substitute character, followed by characters # in the sequence received after CAN or SUB. draw(char) break # depends on [control=['if'], data=['char']] elif char.isdigit(): current += char # depends on [control=['if'], data=[]] elif char == '$': # XTerm-specific ESC]...$[a-z] sequences are not # currently supported. yield break # depends on [control=['if'], data=[]] else: params.append(min(int(current or 0), 9999)) if char == ';': current = '' # depends on [control=['if'], data=[]] else: if private: csi_dispatch[char](*params, private=True) # depends on [control=['if'], data=[]] else: csi_dispatch[char](*params) break # CSI is finished. # depends on [control=['while'], data=[]] # depends on [control=['if'], data=['char']] elif char == OSC_C1: code = (yield) if code == 'R': continue # Reset palette. Not implemented. # depends on [control=['if'], data=[]] elif code == 'P': continue # Set palette. Not implemented. # depends on [control=['if'], data=[]] param = '' while True: char = (yield) if char == ESC: char += (yield) # depends on [control=['if'], data=['char']] if char in OSC_TERMINATORS: break # depends on [control=['if'], data=[]] else: param += char # depends on [control=['while'], data=[]] param = param[1:] # Drop the ;. if code in '01': listener.set_icon_name(param) # depends on [control=['if'], data=[]] if code in '02': listener.set_title(param) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['char']] elif char not in NUL_OR_DEL: draw(char) # depends on [control=['if'], data=['char']] # depends on [control=['while'], data=[]]
def get_split_datasets(self, X, y=None, **fit_params): """Get internal train and validation datasets. The validation dataset can be None if ``self.train_split`` is set to None; then internal validation will be skipped. Override this if you want to change how the net splits incoming data into train and validation part. Parameters ---------- X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. y : target data, compatible with skorch.dataset.Dataset The same data types as for ``X`` are supported. If your X is a Dataset that contains the target, ``y`` may be set to None. **fit_params : dict Additional parameters passed to the ``self.train_split`` call. Returns ------- dataset_train The initialized training dataset. dataset_valid The initialized validation dataset or None """ dataset = self.get_dataset(X, y) if self.train_split: dataset_train, dataset_valid = self.train_split( dataset, y, **fit_params) else: dataset_train, dataset_valid = dataset, None return dataset_train, dataset_valid
def function[get_split_datasets, parameter[self, X, y]]: constant[Get internal train and validation datasets. The validation dataset can be None if ``self.train_split`` is set to None; then internal validation will be skipped. Override this if you want to change how the net splits incoming data into train and validation part. Parameters ---------- X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. y : target data, compatible with skorch.dataset.Dataset The same data types as for ``X`` are supported. If your X is a Dataset that contains the target, ``y`` may be set to None. **fit_params : dict Additional parameters passed to the ``self.train_split`` call. Returns ------- dataset_train The initialized training dataset. dataset_valid The initialized validation dataset or None ] variable[dataset] assign[=] call[name[self].get_dataset, parameter[name[X], name[y]]] if name[self].train_split begin[:] <ast.Tuple object at 0x7da1b0aba080> assign[=] call[name[self].train_split, parameter[name[dataset], name[y]]] return[tuple[[<ast.Name object at 0x7da1b0ab95a0>, <ast.Name object at 0x7da1b0abb8e0>]]]
keyword[def] identifier[get_split_datasets] ( identifier[self] , identifier[X] , identifier[y] = keyword[None] ,** identifier[fit_params] ): literal[string] identifier[dataset] = identifier[self] . identifier[get_dataset] ( identifier[X] , identifier[y] ) keyword[if] identifier[self] . identifier[train_split] : identifier[dataset_train] , identifier[dataset_valid] = identifier[self] . identifier[train_split] ( identifier[dataset] , identifier[y] ,** identifier[fit_params] ) keyword[else] : identifier[dataset_train] , identifier[dataset_valid] = identifier[dataset] , keyword[None] keyword[return] identifier[dataset_train] , identifier[dataset_valid]
def get_split_datasets(self, X, y=None, **fit_params): """Get internal train and validation datasets. The validation dataset can be None if ``self.train_split`` is set to None; then internal validation will be skipped. Override this if you want to change how the net splits incoming data into train and validation part. Parameters ---------- X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. y : target data, compatible with skorch.dataset.Dataset The same data types as for ``X`` are supported. If your X is a Dataset that contains the target, ``y`` may be set to None. **fit_params : dict Additional parameters passed to the ``self.train_split`` call. Returns ------- dataset_train The initialized training dataset. dataset_valid The initialized validation dataset or None """ dataset = self.get_dataset(X, y) if self.train_split: (dataset_train, dataset_valid) = self.train_split(dataset, y, **fit_params) # depends on [control=['if'], data=[]] else: (dataset_train, dataset_valid) = (dataset, None) return (dataset_train, dataset_valid)
def _from_binary_secd_header(cls, binary_stream): """See base class.""" ''' Revision number - 1 Padding - 1 Control flags - 2 Reference to the owner SID - 4 (offset relative to the header) Reference to the group SID - 4 (offset relative to the header) Reference to the DACL - 4 (offset relative to the header) Reference to the SACL - 4 (offset relative to the header) ''' nw_obj = cls(cls._REPR.unpack(binary_stream)) nw_obj.control_flags = SecurityDescriptorFlags(nw_obj.control_flags) _MOD_LOGGER.debug("Attempted to unpack Security Descriptor Header from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj) return nw_obj
def function[_from_binary_secd_header, parameter[cls, binary_stream]]: constant[See base class.] constant[ Revision number - 1 Padding - 1 Control flags - 2 Reference to the owner SID - 4 (offset relative to the header) Reference to the group SID - 4 (offset relative to the header) Reference to the DACL - 4 (offset relative to the header) Reference to the SACL - 4 (offset relative to the header) ] variable[nw_obj] assign[=] call[name[cls], parameter[call[name[cls]._REPR.unpack, parameter[name[binary_stream]]]]] name[nw_obj].control_flags assign[=] call[name[SecurityDescriptorFlags], parameter[name[nw_obj].control_flags]] call[name[_MOD_LOGGER].debug, parameter[constant[Attempted to unpack Security Descriptor Header from "%s" Result: %s], call[name[binary_stream].tobytes, parameter[]], name[nw_obj]]] return[name[nw_obj]]
keyword[def] identifier[_from_binary_secd_header] ( identifier[cls] , identifier[binary_stream] ): literal[string] literal[string] identifier[nw_obj] = identifier[cls] ( identifier[cls] . identifier[_REPR] . identifier[unpack] ( identifier[binary_stream] )) identifier[nw_obj] . identifier[control_flags] = identifier[SecurityDescriptorFlags] ( identifier[nw_obj] . identifier[control_flags] ) identifier[_MOD_LOGGER] . identifier[debug] ( literal[string] , identifier[binary_stream] . identifier[tobytes] (), identifier[nw_obj] ) keyword[return] identifier[nw_obj]
def _from_binary_secd_header(cls, binary_stream): """See base class.""" ' Revision number - 1\n Padding - 1\n Control flags - 2\n Reference to the owner SID - 4 (offset relative to the header)\n Reference to the group SID - 4 (offset relative to the header)\n Reference to the DACL - 4 (offset relative to the header)\n Reference to the SACL - 4 (offset relative to the header)\n ' nw_obj = cls(cls._REPR.unpack(binary_stream)) nw_obj.control_flags = SecurityDescriptorFlags(nw_obj.control_flags) _MOD_LOGGER.debug('Attempted to unpack Security Descriptor Header from "%s"\nResult: %s', binary_stream.tobytes(), nw_obj) return nw_obj
def _conn_key(self, instance, db_key, db_name=None): ''' Return a key to use for the connection cache ''' dsn, host, username, password, database, driver = self._get_access_info(instance, db_key, db_name) return '{}:{}:{}:{}:{}:{}'.format(dsn, host, username, password, database, driver)
def function[_conn_key, parameter[self, instance, db_key, db_name]]: constant[ Return a key to use for the connection cache ] <ast.Tuple object at 0x7da18c4cdae0> assign[=] call[name[self]._get_access_info, parameter[name[instance], name[db_key], name[db_name]]] return[call[constant[{}:{}:{}:{}:{}:{}].format, parameter[name[dsn], name[host], name[username], name[password], name[database], name[driver]]]]
keyword[def] identifier[_conn_key] ( identifier[self] , identifier[instance] , identifier[db_key] , identifier[db_name] = keyword[None] ): literal[string] identifier[dsn] , identifier[host] , identifier[username] , identifier[password] , identifier[database] , identifier[driver] = identifier[self] . identifier[_get_access_info] ( identifier[instance] , identifier[db_key] , identifier[db_name] ) keyword[return] literal[string] . identifier[format] ( identifier[dsn] , identifier[host] , identifier[username] , identifier[password] , identifier[database] , identifier[driver] )
def _conn_key(self, instance, db_key, db_name=None): """ Return a key to use for the connection cache """ (dsn, host, username, password, database, driver) = self._get_access_info(instance, db_key, db_name) return '{}:{}:{}:{}:{}:{}'.format(dsn, host, username, password, database, driver)
def _delete_external_tool(self, context, context_id, external_tool_id): """ Delete the external tool identified by external_tool_id. context is either COURSES_API or ACCOUNTS_API. context_id is the course_id or account_id, depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.destroy """ url = context.format(context_id) + "/external_tools/{}".format( external_tool_id) response = self._delete_resource(url) return True
def function[_delete_external_tool, parameter[self, context, context_id, external_tool_id]]: constant[ Delete the external tool identified by external_tool_id. context is either COURSES_API or ACCOUNTS_API. context_id is the course_id or account_id, depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.destroy ] variable[url] assign[=] binary_operation[call[name[context].format, parameter[name[context_id]]] + call[constant[/external_tools/{}].format, parameter[name[external_tool_id]]]] variable[response] assign[=] call[name[self]._delete_resource, parameter[name[url]]] return[constant[True]]
keyword[def] identifier[_delete_external_tool] ( identifier[self] , identifier[context] , identifier[context_id] , identifier[external_tool_id] ): literal[string] identifier[url] = identifier[context] . identifier[format] ( identifier[context_id] )+ literal[string] . identifier[format] ( identifier[external_tool_id] ) identifier[response] = identifier[self] . identifier[_delete_resource] ( identifier[url] ) keyword[return] keyword[True]
def _delete_external_tool(self, context, context_id, external_tool_id): """ Delete the external tool identified by external_tool_id. context is either COURSES_API or ACCOUNTS_API. context_id is the course_id or account_id, depending on context https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.destroy """ url = context.format(context_id) + '/external_tools/{}'.format(external_tool_id) response = self._delete_resource(url) return True
def GetHeaderGuardCPPVariable(filename): """Returns the CPP variable that should be used as a header guard. Args: filename: The name of a C++ header file. Returns: The CPP variable that should be used as a header guard in the named file. """ # Restores original filename in case that cpplint is invoked from Emacs's # flymake. filename = re.sub(r'_flymake\.h$', '.h', filename) filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename) # Replace 'c++' with 'cpp'. filename = filename.replace('C++', 'cpp').replace('c++', 'cpp') fileinfo = FileInfo(filename) file_path_from_root = fileinfo.RepositoryName() if _root: suffix = os.sep # On Windows using directory separator will leave us with # "bogus escape error" unless we properly escape regex. if suffix == '\\': suffix += '\\' file_path_from_root = re.sub('^' + _root + suffix, '', file_path_from_root) return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
def function[GetHeaderGuardCPPVariable, parameter[filename]]: constant[Returns the CPP variable that should be used as a header guard. Args: filename: The name of a C++ header file. Returns: The CPP variable that should be used as a header guard in the named file. ] variable[filename] assign[=] call[name[re].sub, parameter[constant[_flymake\.h$], constant[.h], name[filename]]] variable[filename] assign[=] call[name[re].sub, parameter[constant[/\.flymake/([^/]*)$], constant[/\1], name[filename]]] variable[filename] assign[=] call[call[name[filename].replace, parameter[constant[C++], constant[cpp]]].replace, parameter[constant[c++], constant[cpp]]] variable[fileinfo] assign[=] call[name[FileInfo], parameter[name[filename]]] variable[file_path_from_root] assign[=] call[name[fileinfo].RepositoryName, parameter[]] if name[_root] begin[:] variable[suffix] assign[=] name[os].sep if compare[name[suffix] equal[==] constant[\]] begin[:] <ast.AugAssign object at 0x7da20c6c6b00> variable[file_path_from_root] assign[=] call[name[re].sub, parameter[binary_operation[binary_operation[constant[^] + name[_root]] + name[suffix]], constant[], name[file_path_from_root]]] return[binary_operation[call[call[name[re].sub, parameter[constant[[^a-zA-Z0-9]], constant[_], name[file_path_from_root]]].upper, parameter[]] + constant[_]]]
keyword[def] identifier[GetHeaderGuardCPPVariable] ( identifier[filename] ): literal[string] identifier[filename] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[filename] ) identifier[filename] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[filename] ) identifier[filename] = identifier[filename] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ) identifier[fileinfo] = identifier[FileInfo] ( identifier[filename] ) identifier[file_path_from_root] = identifier[fileinfo] . identifier[RepositoryName] () keyword[if] identifier[_root] : identifier[suffix] = identifier[os] . identifier[sep] keyword[if] identifier[suffix] == literal[string] : identifier[suffix] += literal[string] identifier[file_path_from_root] = identifier[re] . identifier[sub] ( literal[string] + identifier[_root] + identifier[suffix] , literal[string] , identifier[file_path_from_root] ) keyword[return] identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[file_path_from_root] ). identifier[upper] ()+ literal[string]
def GetHeaderGuardCPPVariable(filename): """Returns the CPP variable that should be used as a header guard. Args: filename: The name of a C++ header file. Returns: The CPP variable that should be used as a header guard in the named file. """ # Restores original filename in case that cpplint is invoked from Emacs's # flymake. filename = re.sub('_flymake\\.h$', '.h', filename) filename = re.sub('/\\.flymake/([^/]*)$', '/\\1', filename) # Replace 'c++' with 'cpp'. filename = filename.replace('C++', 'cpp').replace('c++', 'cpp') fileinfo = FileInfo(filename) file_path_from_root = fileinfo.RepositoryName() if _root: suffix = os.sep # On Windows using directory separator will leave us with # "bogus escape error" unless we properly escape regex. if suffix == '\\': suffix += '\\' # depends on [control=['if'], data=['suffix']] file_path_from_root = re.sub('^' + _root + suffix, '', file_path_from_root) # depends on [control=['if'], data=[]] return re.sub('[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
def main(): """ if lk.py is run as a script, this function will run """ parser = build_parser() args = parser.parse_args() flags = re.LOCALE if args.dot_all: flags |= re.DOTALL if args.ignorecase: flags |= re.IGNORECASE if args.unicode: flags |= re.UNICODE if args.multiline: flags |= re.MULTILINE exclude_path_flags = re.UNICODE | re.LOCALE exclude_path_regexes = [ re.compile(pattern, exclude_path_flags) for pattern in args.exclude_path_patterns ] pattern = re.escape(args.pattern) if args.escape else args.pattern try: search_manager = SearchManager(regex=re.compile(pattern, flags), number_processes=args.number_processes, search_hidden=args.search_hidden, follow_links=args.follow_links, search_binary=args.search_binary, use_ansi_colors=args.use_ansi_colors, print_stats=args.print_stats, exclude_path_regexes=exclude_path_regexes, command_strings=args.command_strings) search_manager.enqueue_directory(args.directory) search_manager.process_queue() except (KeyboardInterruptError, KeyboardInterrupt): sys.stdout.write('\n') exit(1)
def function[main, parameter[]]: constant[ if lk.py is run as a script, this function will run ] variable[parser] assign[=] call[name[build_parser], parameter[]] variable[args] assign[=] call[name[parser].parse_args, parameter[]] variable[flags] assign[=] name[re].LOCALE if name[args].dot_all begin[:] <ast.AugAssign object at 0x7da1b15b12d0> if name[args].ignorecase begin[:] <ast.AugAssign object at 0x7da1b15b11b0> if name[args].unicode begin[:] <ast.AugAssign object at 0x7da1b15b3820> if name[args].multiline begin[:] <ast.AugAssign object at 0x7da1b15b34f0> variable[exclude_path_flags] assign[=] binary_operation[name[re].UNICODE <ast.BitOr object at 0x7da2590d6aa0> name[re].LOCALE] variable[exclude_path_regexes] assign[=] <ast.ListComp object at 0x7da1b15b1120> variable[pattern] assign[=] <ast.IfExp object at 0x7da1b15b2fb0> <ast.Try object at 0x7da1b15b26e0>
keyword[def] identifier[main] (): literal[string] identifier[parser] = identifier[build_parser] () identifier[args] = identifier[parser] . identifier[parse_args] () identifier[flags] = identifier[re] . identifier[LOCALE] keyword[if] identifier[args] . identifier[dot_all] : identifier[flags] |= identifier[re] . identifier[DOTALL] keyword[if] identifier[args] . identifier[ignorecase] : identifier[flags] |= identifier[re] . identifier[IGNORECASE] keyword[if] identifier[args] . identifier[unicode] : identifier[flags] |= identifier[re] . identifier[UNICODE] keyword[if] identifier[args] . identifier[multiline] : identifier[flags] |= identifier[re] . identifier[MULTILINE] identifier[exclude_path_flags] = identifier[re] . identifier[UNICODE] | identifier[re] . identifier[LOCALE] identifier[exclude_path_regexes] =[ identifier[re] . identifier[compile] ( identifier[pattern] , identifier[exclude_path_flags] ) keyword[for] identifier[pattern] keyword[in] identifier[args] . identifier[exclude_path_patterns] ] identifier[pattern] = identifier[re] . identifier[escape] ( identifier[args] . identifier[pattern] ) keyword[if] identifier[args] . identifier[escape] keyword[else] identifier[args] . identifier[pattern] keyword[try] : identifier[search_manager] = identifier[SearchManager] ( identifier[regex] = identifier[re] . identifier[compile] ( identifier[pattern] , identifier[flags] ), identifier[number_processes] = identifier[args] . identifier[number_processes] , identifier[search_hidden] = identifier[args] . identifier[search_hidden] , identifier[follow_links] = identifier[args] . identifier[follow_links] , identifier[search_binary] = identifier[args] . identifier[search_binary] , identifier[use_ansi_colors] = identifier[args] . identifier[use_ansi_colors] , identifier[print_stats] = identifier[args] . identifier[print_stats] , identifier[exclude_path_regexes] = identifier[exclude_path_regexes] , identifier[command_strings] = identifier[args] . identifier[command_strings] ) identifier[search_manager] . identifier[enqueue_directory] ( identifier[args] . identifier[directory] ) identifier[search_manager] . identifier[process_queue] () keyword[except] ( identifier[KeyboardInterruptError] , identifier[KeyboardInterrupt] ): identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] ) identifier[exit] ( literal[int] )
def main(): """ if lk.py is run as a script, this function will run """ parser = build_parser() args = parser.parse_args() flags = re.LOCALE if args.dot_all: flags |= re.DOTALL # depends on [control=['if'], data=[]] if args.ignorecase: flags |= re.IGNORECASE # depends on [control=['if'], data=[]] if args.unicode: flags |= re.UNICODE # depends on [control=['if'], data=[]] if args.multiline: flags |= re.MULTILINE # depends on [control=['if'], data=[]] exclude_path_flags = re.UNICODE | re.LOCALE exclude_path_regexes = [re.compile(pattern, exclude_path_flags) for pattern in args.exclude_path_patterns] pattern = re.escape(args.pattern) if args.escape else args.pattern try: search_manager = SearchManager(regex=re.compile(pattern, flags), number_processes=args.number_processes, search_hidden=args.search_hidden, follow_links=args.follow_links, search_binary=args.search_binary, use_ansi_colors=args.use_ansi_colors, print_stats=args.print_stats, exclude_path_regexes=exclude_path_regexes, command_strings=args.command_strings) search_manager.enqueue_directory(args.directory) search_manager.process_queue() # depends on [control=['try'], data=[]] except (KeyboardInterruptError, KeyboardInterrupt): sys.stdout.write('\n') exit(1) # depends on [control=['except'], data=[]]
def p_statement_foreach(p): 'statement : FOREACH LPAREN expr AS foreach_variable foreach_optional_arg RPAREN foreach_statement' if p[6] is None: p[0] = ast.Foreach(p[3], None, p[5], p[8], lineno=p.lineno(1)) else: p[0] = ast.Foreach(p[3], p[5], p[6], p[8], lineno=p.lineno(1))
def function[p_statement_foreach, parameter[p]]: constant[statement : FOREACH LPAREN expr AS foreach_variable foreach_optional_arg RPAREN foreach_statement] if compare[call[name[p]][constant[6]] is constant[None]] begin[:] call[name[p]][constant[0]] assign[=] call[name[ast].Foreach, parameter[call[name[p]][constant[3]], constant[None], call[name[p]][constant[5]], call[name[p]][constant[8]]]]
keyword[def] identifier[p_statement_foreach] ( identifier[p] ): literal[string] keyword[if] identifier[p] [ literal[int] ] keyword[is] keyword[None] : identifier[p] [ literal[int] ]= identifier[ast] . identifier[Foreach] ( identifier[p] [ literal[int] ], keyword[None] , identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] )) keyword[else] : identifier[p] [ literal[int] ]= identifier[ast] . identifier[Foreach] ( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] ))
def p_statement_foreach(p): """statement : FOREACH LPAREN expr AS foreach_variable foreach_optional_arg RPAREN foreach_statement""" if p[6] is None: p[0] = ast.Foreach(p[3], None, p[5], p[8], lineno=p.lineno(1)) # depends on [control=['if'], data=[]] else: p[0] = ast.Foreach(p[3], p[5], p[6], p[8], lineno=p.lineno(1))
def runGetVariant(self, id_): """ Returns a variant with the given id """ compoundId = datamodel.VariantCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) variantSet = dataset.getVariantSet(compoundId.variant_set_id) gaVariant = variantSet.getVariant(compoundId) # TODO variant is a special case here, as it's returning a # protocol element rather than a datamodel object. We should # fix this for consistency. jsonString = protocol.toJson(gaVariant) return jsonString
def function[runGetVariant, parameter[self, id_]]: constant[ Returns a variant with the given id ] variable[compoundId] assign[=] call[name[datamodel].VariantCompoundId.parse, parameter[name[id_]]] variable[dataset] assign[=] call[call[name[self].getDataRepository, parameter[]].getDataset, parameter[name[compoundId].dataset_id]] variable[variantSet] assign[=] call[name[dataset].getVariantSet, parameter[name[compoundId].variant_set_id]] variable[gaVariant] assign[=] call[name[variantSet].getVariant, parameter[name[compoundId]]] variable[jsonString] assign[=] call[name[protocol].toJson, parameter[name[gaVariant]]] return[name[jsonString]]
keyword[def] identifier[runGetVariant] ( identifier[self] , identifier[id_] ): literal[string] identifier[compoundId] = identifier[datamodel] . identifier[VariantCompoundId] . identifier[parse] ( identifier[id_] ) identifier[dataset] = identifier[self] . identifier[getDataRepository] (). identifier[getDataset] ( identifier[compoundId] . identifier[dataset_id] ) identifier[variantSet] = identifier[dataset] . identifier[getVariantSet] ( identifier[compoundId] . identifier[variant_set_id] ) identifier[gaVariant] = identifier[variantSet] . identifier[getVariant] ( identifier[compoundId] ) identifier[jsonString] = identifier[protocol] . identifier[toJson] ( identifier[gaVariant] ) keyword[return] identifier[jsonString]
def runGetVariant(self, id_): """ Returns a variant with the given id """ compoundId = datamodel.VariantCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) variantSet = dataset.getVariantSet(compoundId.variant_set_id) gaVariant = variantSet.getVariant(compoundId) # TODO variant is a special case here, as it's returning a # protocol element rather than a datamodel object. We should # fix this for consistency. jsonString = protocol.toJson(gaVariant) return jsonString
def check(self): """ Run the setting checker against the setting raw value. Raises: AttributeError: if the setting is missing and required. ValueError: (or other Exception) if the raw value is invalid. """ super(NestedSetting, self).check() errors = [] for subsetting in self.settings.values(): try: subsetting.check() except ValidationError as error: errors.extend(error.messages) if errors: raise ValidationError(errors)
def function[check, parameter[self]]: constant[ Run the setting checker against the setting raw value. Raises: AttributeError: if the setting is missing and required. ValueError: (or other Exception) if the raw value is invalid. ] call[call[name[super], parameter[name[NestedSetting], name[self]]].check, parameter[]] variable[errors] assign[=] list[[]] for taget[name[subsetting]] in starred[call[name[self].settings.values, parameter[]]] begin[:] <ast.Try object at 0x7da2047ea710> if name[errors] begin[:] <ast.Raise object at 0x7da2047ea740>
keyword[def] identifier[check] ( identifier[self] ): literal[string] identifier[super] ( identifier[NestedSetting] , identifier[self] ). identifier[check] () identifier[errors] =[] keyword[for] identifier[subsetting] keyword[in] identifier[self] . identifier[settings] . identifier[values] (): keyword[try] : identifier[subsetting] . identifier[check] () keyword[except] identifier[ValidationError] keyword[as] identifier[error] : identifier[errors] . identifier[extend] ( identifier[error] . identifier[messages] ) keyword[if] identifier[errors] : keyword[raise] identifier[ValidationError] ( identifier[errors] )
def check(self): """ Run the setting checker against the setting raw value. Raises: AttributeError: if the setting is missing and required. ValueError: (or other Exception) if the raw value is invalid. """ super(NestedSetting, self).check() errors = [] for subsetting in self.settings.values(): try: subsetting.check() # depends on [control=['try'], data=[]] except ValidationError as error: errors.extend(error.messages) # depends on [control=['except'], data=['error']] # depends on [control=['for'], data=['subsetting']] if errors: raise ValidationError(errors) # depends on [control=['if'], data=[]]
def commit(self, sourcedir, targetdir, abs_config, abs_sourcedir, abs_targetdir): """ Commit project structure and configuration file Args: sourcedir (string): Source directory path. targetdir (string): Compiled files target directory path. abs_config (string): Configuration file absolute path. abs_sourcedir (string): ``sourcedir`` expanded as absolute path. abs_targetdir (string): ``targetdir`` expanded as absolute path. """ config_path, config_filename = os.path.split(abs_config) if not os.path.exists(config_path): os.makedirs(config_path) if not os.path.exists(abs_sourcedir): os.makedirs(abs_sourcedir) if not os.path.exists(abs_targetdir): os.makedirs(abs_targetdir) # Dump settings file self.backend_engine.dump({ 'SOURCES_PATH': sourcedir, 'TARGET_PATH': targetdir, "LIBRARY_PATHS": [], "OUTPUT_STYLES": "nested", "SOURCE_COMMENTS": False, "EXCLUDES": [] }, abs_config, indent=4)
def function[commit, parameter[self, sourcedir, targetdir, abs_config, abs_sourcedir, abs_targetdir]]: constant[ Commit project structure and configuration file Args: sourcedir (string): Source directory path. targetdir (string): Compiled files target directory path. abs_config (string): Configuration file absolute path. abs_sourcedir (string): ``sourcedir`` expanded as absolute path. abs_targetdir (string): ``targetdir`` expanded as absolute path. ] <ast.Tuple object at 0x7da1b0a2ea70> assign[=] call[name[os].path.split, parameter[name[abs_config]]] if <ast.UnaryOp object at 0x7da1b0a2c460> begin[:] call[name[os].makedirs, parameter[name[config_path]]] if <ast.UnaryOp object at 0x7da1b0a2c1f0> begin[:] call[name[os].makedirs, parameter[name[abs_sourcedir]]] if <ast.UnaryOp object at 0x7da1b0a2d6c0> begin[:] call[name[os].makedirs, parameter[name[abs_targetdir]]] call[name[self].backend_engine.dump, parameter[dictionary[[<ast.Constant object at 0x7da1b0a2ded0>, <ast.Constant object at 0x7da1b0a2fe80>, <ast.Constant object at 0x7da1b0a2fa90>, <ast.Constant object at 0x7da1b0a2fb20>, <ast.Constant object at 0x7da1b0a2dbd0>, <ast.Constant object at 0x7da1b0a2c0d0>], [<ast.Name object at 0x7da1b0a2cca0>, <ast.Name object at 0x7da1b0a2df90>, <ast.List object at 0x7da1b0a2dc60>, <ast.Constant object at 0x7da1b0a2c220>, <ast.Constant object at 0x7da1b0a2f880>, <ast.List object at 0x7da1b0a2fbe0>]], name[abs_config]]]
keyword[def] identifier[commit] ( identifier[self] , identifier[sourcedir] , identifier[targetdir] , identifier[abs_config] , identifier[abs_sourcedir] , identifier[abs_targetdir] ): literal[string] identifier[config_path] , identifier[config_filename] = identifier[os] . identifier[path] . identifier[split] ( identifier[abs_config] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[config_path] ): identifier[os] . identifier[makedirs] ( identifier[config_path] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[abs_sourcedir] ): identifier[os] . identifier[makedirs] ( identifier[abs_sourcedir] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[abs_targetdir] ): identifier[os] . identifier[makedirs] ( identifier[abs_targetdir] ) identifier[self] . identifier[backend_engine] . identifier[dump] ({ literal[string] : identifier[sourcedir] , literal[string] : identifier[targetdir] , literal[string] :[], literal[string] : literal[string] , literal[string] : keyword[False] , literal[string] :[] }, identifier[abs_config] , identifier[indent] = literal[int] )
def commit(self, sourcedir, targetdir, abs_config, abs_sourcedir, abs_targetdir): """ Commit project structure and configuration file Args: sourcedir (string): Source directory path. targetdir (string): Compiled files target directory path. abs_config (string): Configuration file absolute path. abs_sourcedir (string): ``sourcedir`` expanded as absolute path. abs_targetdir (string): ``targetdir`` expanded as absolute path. """ (config_path, config_filename) = os.path.split(abs_config) if not os.path.exists(config_path): os.makedirs(config_path) # depends on [control=['if'], data=[]] if not os.path.exists(abs_sourcedir): os.makedirs(abs_sourcedir) # depends on [control=['if'], data=[]] if not os.path.exists(abs_targetdir): os.makedirs(abs_targetdir) # depends on [control=['if'], data=[]] # Dump settings file self.backend_engine.dump({'SOURCES_PATH': sourcedir, 'TARGET_PATH': targetdir, 'LIBRARY_PATHS': [], 'OUTPUT_STYLES': 'nested', 'SOURCE_COMMENTS': False, 'EXCLUDES': []}, abs_config, indent=4)
def nodeids(self, ivs=None, quantifier=None): """ Return the list of nodeids given by *ivs*, or all nodeids. Args: ivs: the intrinsic variables of the predications to select; if `None`, return all nodeids (but see *quantifier*) quantifier: if `True`, only return nodeids of quantifiers; if `False`, only return non-quantifiers; if `None` (the default), return both """ if ivs is None: nids = list(self._nodeids) else: _vars = self._vars nids = [] for iv in ivs: if iv in _vars and IVARG_ROLE in _vars[iv]['refs']: nids.extend(_vars[iv]['refs'][IVARG_ROLE]) else: raise KeyError(iv) if quantifier is not None: nids = [n for n in nids if self.ep(n).is_quantifier()==quantifier] return nids
def function[nodeids, parameter[self, ivs, quantifier]]: constant[ Return the list of nodeids given by *ivs*, or all nodeids. Args: ivs: the intrinsic variables of the predications to select; if `None`, return all nodeids (but see *quantifier*) quantifier: if `True`, only return nodeids of quantifiers; if `False`, only return non-quantifiers; if `None` (the default), return both ] if compare[name[ivs] is constant[None]] begin[:] variable[nids] assign[=] call[name[list], parameter[name[self]._nodeids]] if compare[name[quantifier] is_not constant[None]] begin[:] variable[nids] assign[=] <ast.ListComp object at 0x7da1b04fdff0> return[name[nids]]
keyword[def] identifier[nodeids] ( identifier[self] , identifier[ivs] = keyword[None] , identifier[quantifier] = keyword[None] ): literal[string] keyword[if] identifier[ivs] keyword[is] keyword[None] : identifier[nids] = identifier[list] ( identifier[self] . identifier[_nodeids] ) keyword[else] : identifier[_vars] = identifier[self] . identifier[_vars] identifier[nids] =[] keyword[for] identifier[iv] keyword[in] identifier[ivs] : keyword[if] identifier[iv] keyword[in] identifier[_vars] keyword[and] identifier[IVARG_ROLE] keyword[in] identifier[_vars] [ identifier[iv] ][ literal[string] ]: identifier[nids] . identifier[extend] ( identifier[_vars] [ identifier[iv] ][ literal[string] ][ identifier[IVARG_ROLE] ]) keyword[else] : keyword[raise] identifier[KeyError] ( identifier[iv] ) keyword[if] identifier[quantifier] keyword[is] keyword[not] keyword[None] : identifier[nids] =[ identifier[n] keyword[for] identifier[n] keyword[in] identifier[nids] keyword[if] identifier[self] . identifier[ep] ( identifier[n] ). identifier[is_quantifier] ()== identifier[quantifier] ] keyword[return] identifier[nids]
def nodeids(self, ivs=None, quantifier=None): """ Return the list of nodeids given by *ivs*, or all nodeids. Args: ivs: the intrinsic variables of the predications to select; if `None`, return all nodeids (but see *quantifier*) quantifier: if `True`, only return nodeids of quantifiers; if `False`, only return non-quantifiers; if `None` (the default), return both """ if ivs is None: nids = list(self._nodeids) # depends on [control=['if'], data=[]] else: _vars = self._vars nids = [] for iv in ivs: if iv in _vars and IVARG_ROLE in _vars[iv]['refs']: nids.extend(_vars[iv]['refs'][IVARG_ROLE]) # depends on [control=['if'], data=[]] else: raise KeyError(iv) # depends on [control=['for'], data=['iv']] if quantifier is not None: nids = [n for n in nids if self.ep(n).is_quantifier() == quantifier] # depends on [control=['if'], data=['quantifier']] return nids
def submit_plain_query(self, operation): """ Sends a plain query to server. This call will transition session into pending state. If some operation is currently pending on the session, it will be cancelled before sending this request. Spec: http://msdn.microsoft.com/en-us/library/dd358575.aspx :param operation: A string representing sql statement. """ self.messages = [] self.cancel_if_pending() self.res_info = None logger.info("Sending query %s", operation[:100]) w = self._writer with self.querying_context(tds_base.PacketType.QUERY): if tds_base.IS_TDS72_PLUS(self): self._start_query() w.write_ucs2(operation)
def function[submit_plain_query, parameter[self, operation]]: constant[ Sends a plain query to server. This call will transition session into pending state. If some operation is currently pending on the session, it will be cancelled before sending this request. Spec: http://msdn.microsoft.com/en-us/library/dd358575.aspx :param operation: A string representing sql statement. ] name[self].messages assign[=] list[[]] call[name[self].cancel_if_pending, parameter[]] name[self].res_info assign[=] constant[None] call[name[logger].info, parameter[constant[Sending query %s], call[name[operation]][<ast.Slice object at 0x7da1b0317d00>]]] variable[w] assign[=] name[self]._writer with call[name[self].querying_context, parameter[name[tds_base].PacketType.QUERY]] begin[:] if call[name[tds_base].IS_TDS72_PLUS, parameter[name[self]]] begin[:] call[name[self]._start_query, parameter[]] call[name[w].write_ucs2, parameter[name[operation]]]
keyword[def] identifier[submit_plain_query] ( identifier[self] , identifier[operation] ): literal[string] identifier[self] . identifier[messages] =[] identifier[self] . identifier[cancel_if_pending] () identifier[self] . identifier[res_info] = keyword[None] identifier[logger] . identifier[info] ( literal[string] , identifier[operation] [: literal[int] ]) identifier[w] = identifier[self] . identifier[_writer] keyword[with] identifier[self] . identifier[querying_context] ( identifier[tds_base] . identifier[PacketType] . identifier[QUERY] ): keyword[if] identifier[tds_base] . identifier[IS_TDS72_PLUS] ( identifier[self] ): identifier[self] . identifier[_start_query] () identifier[w] . identifier[write_ucs2] ( identifier[operation] )
def submit_plain_query(self, operation): """ Sends a plain query to server. This call will transition session into pending state. If some operation is currently pending on the session, it will be cancelled before sending this request. Spec: http://msdn.microsoft.com/en-us/library/dd358575.aspx :param operation: A string representing sql statement. """ self.messages = [] self.cancel_if_pending() self.res_info = None logger.info('Sending query %s', operation[:100]) w = self._writer with self.querying_context(tds_base.PacketType.QUERY): if tds_base.IS_TDS72_PLUS(self): self._start_query() # depends on [control=['if'], data=[]] w.write_ucs2(operation) # depends on [control=['with'], data=[]]
def sensorpod_status_encode(self, timestamp, visensor_rate_1, visensor_rate_2, visensor_rate_3, visensor_rate_4, recording_nodes_count, cpu_temp, free_space): ''' Monitoring of sensorpod status timestamp : Timestamp in linuxtime [ms] (since 1.1.1970) (uint64_t) visensor_rate_1 : Rate of ROS topic 1 (uint8_t) visensor_rate_2 : Rate of ROS topic 2 (uint8_t) visensor_rate_3 : Rate of ROS topic 3 (uint8_t) visensor_rate_4 : Rate of ROS topic 4 (uint8_t) recording_nodes_count : Number of recording nodes (uint8_t) cpu_temp : Temperature of sensorpod CPU in [deg C] (uint8_t) free_space : Free space available in recordings directory in [Gb] * 1e2 (uint16_t) ''' return MAVLink_sensorpod_status_message(timestamp, visensor_rate_1, visensor_rate_2, visensor_rate_3, visensor_rate_4, recording_nodes_count, cpu_temp, free_space)
def function[sensorpod_status_encode, parameter[self, timestamp, visensor_rate_1, visensor_rate_2, visensor_rate_3, visensor_rate_4, recording_nodes_count, cpu_temp, free_space]]: constant[ Monitoring of sensorpod status timestamp : Timestamp in linuxtime [ms] (since 1.1.1970) (uint64_t) visensor_rate_1 : Rate of ROS topic 1 (uint8_t) visensor_rate_2 : Rate of ROS topic 2 (uint8_t) visensor_rate_3 : Rate of ROS topic 3 (uint8_t) visensor_rate_4 : Rate of ROS topic 4 (uint8_t) recording_nodes_count : Number of recording nodes (uint8_t) cpu_temp : Temperature of sensorpod CPU in [deg C] (uint8_t) free_space : Free space available in recordings directory in [Gb] * 1e2 (uint16_t) ] return[call[name[MAVLink_sensorpod_status_message], parameter[name[timestamp], name[visensor_rate_1], name[visensor_rate_2], name[visensor_rate_3], name[visensor_rate_4], name[recording_nodes_count], name[cpu_temp], name[free_space]]]]
keyword[def] identifier[sensorpod_status_encode] ( identifier[self] , identifier[timestamp] , identifier[visensor_rate_1] , identifier[visensor_rate_2] , identifier[visensor_rate_3] , identifier[visensor_rate_4] , identifier[recording_nodes_count] , identifier[cpu_temp] , identifier[free_space] ): literal[string] keyword[return] identifier[MAVLink_sensorpod_status_message] ( identifier[timestamp] , identifier[visensor_rate_1] , identifier[visensor_rate_2] , identifier[visensor_rate_3] , identifier[visensor_rate_4] , identifier[recording_nodes_count] , identifier[cpu_temp] , identifier[free_space] )
def sensorpod_status_encode(self, timestamp, visensor_rate_1, visensor_rate_2, visensor_rate_3, visensor_rate_4, recording_nodes_count, cpu_temp, free_space): """ Monitoring of sensorpod status timestamp : Timestamp in linuxtime [ms] (since 1.1.1970) (uint64_t) visensor_rate_1 : Rate of ROS topic 1 (uint8_t) visensor_rate_2 : Rate of ROS topic 2 (uint8_t) visensor_rate_3 : Rate of ROS topic 3 (uint8_t) visensor_rate_4 : Rate of ROS topic 4 (uint8_t) recording_nodes_count : Number of recording nodes (uint8_t) cpu_temp : Temperature of sensorpod CPU in [deg C] (uint8_t) free_space : Free space available in recordings directory in [Gb] * 1e2 (uint16_t) """ return MAVLink_sensorpod_status_message(timestamp, visensor_rate_1, visensor_rate_2, visensor_rate_3, visensor_rate_4, recording_nodes_count, cpu_temp, free_space)
def read_line(self, line): """Read a new line""" if self.ignore: return for i, char in enumerate(line): if char not in ['"', "'"]: continue # Is the char escaped? if line[i - 1:i] == '\\': continue if self.single == char: self.single = None continue if self.single is not None: continue if not self.python: continue if self.triple == char: if line[i - 2:i + 1] == 3 * char: self.triple = None continue if self.triple is not None: continue if line[i - 2:i + 1] == 3 * char: self.triple = char continue self.single = char # Line ended if self.python: self.single = None
def function[read_line, parameter[self, line]]: constant[Read a new line] if name[self].ignore begin[:] return[None] for taget[tuple[[<ast.Name object at 0x7da18f58f610>, <ast.Name object at 0x7da18f58db70>]]] in starred[call[name[enumerate], parameter[name[line]]]] begin[:] if compare[name[char] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da18f58cee0>, <ast.Constant object at 0x7da18f58e0e0>]]] begin[:] continue if compare[call[name[line]][<ast.Slice object at 0x7da18f58e260>] equal[==] constant[\]] begin[:] continue if compare[name[self].single equal[==] name[char]] begin[:] name[self].single assign[=] constant[None] continue if compare[name[self].single is_not constant[None]] begin[:] continue if <ast.UnaryOp object at 0x7da18f58c280> begin[:] continue if compare[name[self].triple equal[==] name[char]] begin[:] if compare[call[name[line]][<ast.Slice object at 0x7da18f58fb20>] equal[==] binary_operation[constant[3] * name[char]]] begin[:] name[self].triple assign[=] constant[None] continue if compare[name[self].triple is_not constant[None]] begin[:] continue if compare[call[name[line]][<ast.Slice object at 0x7da18f58e410>] equal[==] binary_operation[constant[3] * name[char]]] begin[:] name[self].triple assign[=] name[char] continue name[self].single assign[=] name[char] if name[self].python begin[:] name[self].single assign[=] constant[None]
keyword[def] identifier[read_line] ( identifier[self] , identifier[line] ): literal[string] keyword[if] identifier[self] . identifier[ignore] : keyword[return] keyword[for] identifier[i] , identifier[char] keyword[in] identifier[enumerate] ( identifier[line] ): keyword[if] identifier[char] keyword[not] keyword[in] [ literal[string] , literal[string] ]: keyword[continue] keyword[if] identifier[line] [ identifier[i] - literal[int] : identifier[i] ]== literal[string] : keyword[continue] keyword[if] identifier[self] . identifier[single] == identifier[char] : identifier[self] . identifier[single] = keyword[None] keyword[continue] keyword[if] identifier[self] . identifier[single] keyword[is] keyword[not] keyword[None] : keyword[continue] keyword[if] keyword[not] identifier[self] . identifier[python] : keyword[continue] keyword[if] identifier[self] . identifier[triple] == identifier[char] : keyword[if] identifier[line] [ identifier[i] - literal[int] : identifier[i] + literal[int] ]== literal[int] * identifier[char] : identifier[self] . identifier[triple] = keyword[None] keyword[continue] keyword[if] identifier[self] . identifier[triple] keyword[is] keyword[not] keyword[None] : keyword[continue] keyword[if] identifier[line] [ identifier[i] - literal[int] : identifier[i] + literal[int] ]== literal[int] * identifier[char] : identifier[self] . identifier[triple] = identifier[char] keyword[continue] identifier[self] . identifier[single] = identifier[char] keyword[if] identifier[self] . identifier[python] : identifier[self] . identifier[single] = keyword[None]
def read_line(self, line): """Read a new line""" if self.ignore: return # depends on [control=['if'], data=[]] for (i, char) in enumerate(line): if char not in ['"', "'"]: continue # depends on [control=['if'], data=[]] # Is the char escaped? if line[i - 1:i] == '\\': continue # depends on [control=['if'], data=[]] if self.single == char: self.single = None continue # depends on [control=['if'], data=[]] if self.single is not None: continue # depends on [control=['if'], data=[]] if not self.python: continue # depends on [control=['if'], data=[]] if self.triple == char: if line[i - 2:i + 1] == 3 * char: self.triple = None continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['char']] if self.triple is not None: continue # depends on [control=['if'], data=[]] if line[i - 2:i + 1] == 3 * char: self.triple = char continue # depends on [control=['if'], data=[]] self.single = char # depends on [control=['for'], data=[]] # Line ended if self.python: self.single = None # depends on [control=['if'], data=[]]
def permute(self, permutations: List[mx.nd.NDArray]) -> 'ParallelDataSet': """ Permutes the data within each bucket. The permutation is received as an argument, allowing the data to be unpermuted (i.e., restored) later on. :param permutations: For each bucket, a permutation of the data within that bucket. :return: A new, permuted ParallelDataSet. """ assert len(self) == len(permutations) source = [] target = [] label = [] for buck_idx in range(len(self)): num_samples = self.source[buck_idx].shape[0] if num_samples: # not empty bucket permutation = permutations[buck_idx] if isinstance(self.source[buck_idx], np.ndarray): source.append(self.source[buck_idx].take(np.int64(permutation.asnumpy()))) else: source.append(self.source[buck_idx].take(permutation)) target.append(self.target[buck_idx].take(permutation)) label.append(self.label[buck_idx].take(permutation)) else: source.append(self.source[buck_idx]) target.append(self.target[buck_idx]) label.append(self.label[buck_idx]) return ParallelDataSet(source, target, label)
def function[permute, parameter[self, permutations]]: constant[ Permutes the data within each bucket. The permutation is received as an argument, allowing the data to be unpermuted (i.e., restored) later on. :param permutations: For each bucket, a permutation of the data within that bucket. :return: A new, permuted ParallelDataSet. ] assert[compare[call[name[len], parameter[name[self]]] equal[==] call[name[len], parameter[name[permutations]]]]] variable[source] assign[=] list[[]] variable[target] assign[=] list[[]] variable[label] assign[=] list[[]] for taget[name[buck_idx]] in starred[call[name[range], parameter[call[name[len], parameter[name[self]]]]]] begin[:] variable[num_samples] assign[=] call[call[name[self].source][name[buck_idx]].shape][constant[0]] if name[num_samples] begin[:] variable[permutation] assign[=] call[name[permutations]][name[buck_idx]] if call[name[isinstance], parameter[call[name[self].source][name[buck_idx]], name[np].ndarray]] begin[:] call[name[source].append, parameter[call[call[name[self].source][name[buck_idx]].take, parameter[call[name[np].int64, parameter[call[name[permutation].asnumpy, parameter[]]]]]]]] call[name[target].append, parameter[call[call[name[self].target][name[buck_idx]].take, parameter[name[permutation]]]]] call[name[label].append, parameter[call[call[name[self].label][name[buck_idx]].take, parameter[name[permutation]]]]] return[call[name[ParallelDataSet], parameter[name[source], name[target], name[label]]]]
keyword[def] identifier[permute] ( identifier[self] , identifier[permutations] : identifier[List] [ identifier[mx] . identifier[nd] . identifier[NDArray] ])-> literal[string] : literal[string] keyword[assert] identifier[len] ( identifier[self] )== identifier[len] ( identifier[permutations] ) identifier[source] =[] identifier[target] =[] identifier[label] =[] keyword[for] identifier[buck_idx] keyword[in] identifier[range] ( identifier[len] ( identifier[self] )): identifier[num_samples] = identifier[self] . identifier[source] [ identifier[buck_idx] ]. identifier[shape] [ literal[int] ] keyword[if] identifier[num_samples] : identifier[permutation] = identifier[permutations] [ identifier[buck_idx] ] keyword[if] identifier[isinstance] ( identifier[self] . identifier[source] [ identifier[buck_idx] ], identifier[np] . identifier[ndarray] ): identifier[source] . identifier[append] ( identifier[self] . identifier[source] [ identifier[buck_idx] ]. identifier[take] ( identifier[np] . identifier[int64] ( identifier[permutation] . identifier[asnumpy] ()))) keyword[else] : identifier[source] . identifier[append] ( identifier[self] . identifier[source] [ identifier[buck_idx] ]. identifier[take] ( identifier[permutation] )) identifier[target] . identifier[append] ( identifier[self] . identifier[target] [ identifier[buck_idx] ]. identifier[take] ( identifier[permutation] )) identifier[label] . identifier[append] ( identifier[self] . identifier[label] [ identifier[buck_idx] ]. identifier[take] ( identifier[permutation] )) keyword[else] : identifier[source] . identifier[append] ( identifier[self] . identifier[source] [ identifier[buck_idx] ]) identifier[target] . identifier[append] ( identifier[self] . identifier[target] [ identifier[buck_idx] ]) identifier[label] . identifier[append] ( identifier[self] . identifier[label] [ identifier[buck_idx] ]) keyword[return] identifier[ParallelDataSet] ( identifier[source] , identifier[target] , identifier[label] )
def permute(self, permutations: List[mx.nd.NDArray]) -> 'ParallelDataSet': """ Permutes the data within each bucket. The permutation is received as an argument, allowing the data to be unpermuted (i.e., restored) later on. :param permutations: For each bucket, a permutation of the data within that bucket. :return: A new, permuted ParallelDataSet. """ assert len(self) == len(permutations) source = [] target = [] label = [] for buck_idx in range(len(self)): num_samples = self.source[buck_idx].shape[0] if num_samples: # not empty bucket permutation = permutations[buck_idx] if isinstance(self.source[buck_idx], np.ndarray): source.append(self.source[buck_idx].take(np.int64(permutation.asnumpy()))) # depends on [control=['if'], data=[]] else: source.append(self.source[buck_idx].take(permutation)) target.append(self.target[buck_idx].take(permutation)) label.append(self.label[buck_idx].take(permutation)) # depends on [control=['if'], data=[]] else: source.append(self.source[buck_idx]) target.append(self.target[buck_idx]) label.append(self.label[buck_idx]) # depends on [control=['for'], data=['buck_idx']] return ParallelDataSet(source, target, label)
def _build(argv, config, versions, current_name, is_root): """Build Sphinx docs via multiprocessing for isolation. :param tuple argv: Arguments to pass to Sphinx. :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance. :param str current_name: The ref name of the current version being built. :param bool is_root: Is this build in the web root? """ # Patch. application.Config = ConfigInject if config.show_banner: EventHandlers.BANNER_GREATEST_TAG = config.banner_greatest_tag EventHandlers.BANNER_MAIN_VERSION = config.banner_main_ref EventHandlers.BANNER_RECENT_TAG = config.banner_recent_tag EventHandlers.SHOW_BANNER = True EventHandlers.CURRENT_VERSION = current_name EventHandlers.IS_ROOT = is_root EventHandlers.VERSIONS = versions SC_VERSIONING_VERSIONS[:] = [p for r in versions.remotes for p in sorted(r.items()) if p[0] not in ('sha', 'date')] # Update argv. if config.verbose > 1: argv += ('-v',) * (config.verbose - 1) if config.no_colors: argv += ('-N',) if config.overflow: argv += config.overflow # Build. result = build_main(argv) if result != 0: raise SphinxError
def function[_build, parameter[argv, config, versions, current_name, is_root]]: constant[Build Sphinx docs via multiprocessing for isolation. :param tuple argv: Arguments to pass to Sphinx. :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance. :param str current_name: The ref name of the current version being built. :param bool is_root: Is this build in the web root? ] name[application].Config assign[=] name[ConfigInject] if name[config].show_banner begin[:] name[EventHandlers].BANNER_GREATEST_TAG assign[=] name[config].banner_greatest_tag name[EventHandlers].BANNER_MAIN_VERSION assign[=] name[config].banner_main_ref name[EventHandlers].BANNER_RECENT_TAG assign[=] name[config].banner_recent_tag name[EventHandlers].SHOW_BANNER assign[=] constant[True] name[EventHandlers].CURRENT_VERSION assign[=] name[current_name] name[EventHandlers].IS_ROOT assign[=] name[is_root] name[EventHandlers].VERSIONS assign[=] name[versions] call[name[SC_VERSIONING_VERSIONS]][<ast.Slice object at 0x7da1b1a5d210>] assign[=] <ast.ListComp object at 0x7da1b1a5d870> if compare[name[config].verbose greater[>] constant[1]] begin[:] <ast.AugAssign object at 0x7da1b1b7e4d0> if name[config].no_colors begin[:] <ast.AugAssign object at 0x7da1b1b7f340> if name[config].overflow begin[:] <ast.AugAssign object at 0x7da1b1bedcc0> variable[result] assign[=] call[name[build_main], parameter[name[argv]]] if compare[name[result] not_equal[!=] constant[0]] begin[:] <ast.Raise object at 0x7da1b1beeec0>
keyword[def] identifier[_build] ( identifier[argv] , identifier[config] , identifier[versions] , identifier[current_name] , identifier[is_root] ): literal[string] identifier[application] . identifier[Config] = identifier[ConfigInject] keyword[if] identifier[config] . identifier[show_banner] : identifier[EventHandlers] . identifier[BANNER_GREATEST_TAG] = identifier[config] . identifier[banner_greatest_tag] identifier[EventHandlers] . identifier[BANNER_MAIN_VERSION] = identifier[config] . identifier[banner_main_ref] identifier[EventHandlers] . identifier[BANNER_RECENT_TAG] = identifier[config] . identifier[banner_recent_tag] identifier[EventHandlers] . identifier[SHOW_BANNER] = keyword[True] identifier[EventHandlers] . identifier[CURRENT_VERSION] = identifier[current_name] identifier[EventHandlers] . identifier[IS_ROOT] = identifier[is_root] identifier[EventHandlers] . identifier[VERSIONS] = identifier[versions] identifier[SC_VERSIONING_VERSIONS] [:]=[ identifier[p] keyword[for] identifier[r] keyword[in] identifier[versions] . identifier[remotes] keyword[for] identifier[p] keyword[in] identifier[sorted] ( identifier[r] . identifier[items] ()) keyword[if] identifier[p] [ literal[int] ] keyword[not] keyword[in] ( literal[string] , literal[string] )] keyword[if] identifier[config] . identifier[verbose] > literal[int] : identifier[argv] +=( literal[string] ,)*( identifier[config] . identifier[verbose] - literal[int] ) keyword[if] identifier[config] . identifier[no_colors] : identifier[argv] +=( literal[string] ,) keyword[if] identifier[config] . identifier[overflow] : identifier[argv] += identifier[config] . identifier[overflow] identifier[result] = identifier[build_main] ( identifier[argv] ) keyword[if] identifier[result] != literal[int] : keyword[raise] identifier[SphinxError]
def _build(argv, config, versions, current_name, is_root): """Build Sphinx docs via multiprocessing for isolation. :param tuple argv: Arguments to pass to Sphinx. :param sphinxcontrib.versioning.lib.Config config: Runtime configuration. :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance. :param str current_name: The ref name of the current version being built. :param bool is_root: Is this build in the web root? """ # Patch. application.Config = ConfigInject if config.show_banner: EventHandlers.BANNER_GREATEST_TAG = config.banner_greatest_tag EventHandlers.BANNER_MAIN_VERSION = config.banner_main_ref EventHandlers.BANNER_RECENT_TAG = config.banner_recent_tag EventHandlers.SHOW_BANNER = True # depends on [control=['if'], data=[]] EventHandlers.CURRENT_VERSION = current_name EventHandlers.IS_ROOT = is_root EventHandlers.VERSIONS = versions SC_VERSIONING_VERSIONS[:] = [p for r in versions.remotes for p in sorted(r.items()) if p[0] not in ('sha', 'date')] # Update argv. if config.verbose > 1: argv += ('-v',) * (config.verbose - 1) # depends on [control=['if'], data=[]] if config.no_colors: argv += ('-N',) # depends on [control=['if'], data=[]] if config.overflow: argv += config.overflow # depends on [control=['if'], data=[]] # Build. result = build_main(argv) if result != 0: raise SphinxError # depends on [control=['if'], data=[]]
def join_regex(regexes): """Combine a list of regexes into one that matches any of them.""" if len(regexes) > 1: return "|".join(["(%s)" % r for r in regexes]) elif regexes: return regexes[0] else: return ""
def function[join_regex, parameter[regexes]]: constant[Combine a list of regexes into one that matches any of them.] if compare[call[name[len], parameter[name[regexes]]] greater[>] constant[1]] begin[:] return[call[constant[|].join, parameter[<ast.ListComp object at 0x7da20c991f90>]]]
keyword[def] identifier[join_regex] ( identifier[regexes] ): literal[string] keyword[if] identifier[len] ( identifier[regexes] )> literal[int] : keyword[return] literal[string] . identifier[join] ([ literal[string] % identifier[r] keyword[for] identifier[r] keyword[in] identifier[regexes] ]) keyword[elif] identifier[regexes] : keyword[return] identifier[regexes] [ literal[int] ] keyword[else] : keyword[return] literal[string]
def join_regex(regexes): """Combine a list of regexes into one that matches any of them.""" if len(regexes) > 1: return '|'.join(['(%s)' % r for r in regexes]) # depends on [control=['if'], data=[]] elif regexes: return regexes[0] # depends on [control=['if'], data=[]] else: return ''
def ReadHuntLogEntries(self, hunt_id, offset, count, with_substring=None): """Reads hunt log entries of a given hunt using given query options.""" all_entries = [] for flow_obj in self._GetHuntFlows(hunt_id): for entry in self.ReadFlowLogEntries( flow_obj.client_id, flow_obj.flow_id, 0, sys.maxsize, with_substring=with_substring): all_entries.append( rdf_flow_objects.FlowLogEntry( hunt_id=hunt_id, client_id=flow_obj.client_id, flow_id=flow_obj.flow_id, timestamp=entry.timestamp, message=entry.message)) return sorted(all_entries, key=lambda x: x.timestamp)[offset:offset + count]
def function[ReadHuntLogEntries, parameter[self, hunt_id, offset, count, with_substring]]: constant[Reads hunt log entries of a given hunt using given query options.] variable[all_entries] assign[=] list[[]] for taget[name[flow_obj]] in starred[call[name[self]._GetHuntFlows, parameter[name[hunt_id]]]] begin[:] for taget[name[entry]] in starred[call[name[self].ReadFlowLogEntries, parameter[name[flow_obj].client_id, name[flow_obj].flow_id, constant[0], name[sys].maxsize]]] begin[:] call[name[all_entries].append, parameter[call[name[rdf_flow_objects].FlowLogEntry, parameter[]]]] return[call[call[name[sorted], parameter[name[all_entries]]]][<ast.Slice object at 0x7da1b1c1bc40>]]
keyword[def] identifier[ReadHuntLogEntries] ( identifier[self] , identifier[hunt_id] , identifier[offset] , identifier[count] , identifier[with_substring] = keyword[None] ): literal[string] identifier[all_entries] =[] keyword[for] identifier[flow_obj] keyword[in] identifier[self] . identifier[_GetHuntFlows] ( identifier[hunt_id] ): keyword[for] identifier[entry] keyword[in] identifier[self] . identifier[ReadFlowLogEntries] ( identifier[flow_obj] . identifier[client_id] , identifier[flow_obj] . identifier[flow_id] , literal[int] , identifier[sys] . identifier[maxsize] , identifier[with_substring] = identifier[with_substring] ): identifier[all_entries] . identifier[append] ( identifier[rdf_flow_objects] . identifier[FlowLogEntry] ( identifier[hunt_id] = identifier[hunt_id] , identifier[client_id] = identifier[flow_obj] . identifier[client_id] , identifier[flow_id] = identifier[flow_obj] . identifier[flow_id] , identifier[timestamp] = identifier[entry] . identifier[timestamp] , identifier[message] = identifier[entry] . identifier[message] )) keyword[return] identifier[sorted] ( identifier[all_entries] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] . identifier[timestamp] )[ identifier[offset] : identifier[offset] + identifier[count] ]
def ReadHuntLogEntries(self, hunt_id, offset, count, with_substring=None): """Reads hunt log entries of a given hunt using given query options.""" all_entries = [] for flow_obj in self._GetHuntFlows(hunt_id): for entry in self.ReadFlowLogEntries(flow_obj.client_id, flow_obj.flow_id, 0, sys.maxsize, with_substring=with_substring): all_entries.append(rdf_flow_objects.FlowLogEntry(hunt_id=hunt_id, client_id=flow_obj.client_id, flow_id=flow_obj.flow_id, timestamp=entry.timestamp, message=entry.message)) # depends on [control=['for'], data=['entry']] # depends on [control=['for'], data=['flow_obj']] return sorted(all_entries, key=lambda x: x.timestamp)[offset:offset + count]
def clear_graph(identifier=None): """ Clean up a graph by removing it :param identifier: Root identifier of the graph :return: """ graph = get_graph() if identifier: graph.destroy(identifier) try: graph.close() except: warn("Unable to close the Graph")
def function[clear_graph, parameter[identifier]]: constant[ Clean up a graph by removing it :param identifier: Root identifier of the graph :return: ] variable[graph] assign[=] call[name[get_graph], parameter[]] if name[identifier] begin[:] call[name[graph].destroy, parameter[name[identifier]]] <ast.Try object at 0x7da20c6e4370>
keyword[def] identifier[clear_graph] ( identifier[identifier] = keyword[None] ): literal[string] identifier[graph] = identifier[get_graph] () keyword[if] identifier[identifier] : identifier[graph] . identifier[destroy] ( identifier[identifier] ) keyword[try] : identifier[graph] . identifier[close] () keyword[except] : identifier[warn] ( literal[string] )
def clear_graph(identifier=None): """ Clean up a graph by removing it :param identifier: Root identifier of the graph :return: """ graph = get_graph() if identifier: graph.destroy(identifier) # depends on [control=['if'], data=[]] try: graph.close() # depends on [control=['try'], data=[]] except: warn('Unable to close the Graph') # depends on [control=['except'], data=[]]
def __type_matches(self, obj: Any, type_: Type) -> bool: """Checks that the object matches the given type. Like isinstance(), but will work with union types using Union, \ Dict and List. Args: obj: The object to check type_: The type to check against Returns: True iff obj is of type type_ """ if is_generic_union(type_): for t in generic_type_args(type_): if self.__type_matches(obj, t): return True return False elif is_generic_list(type_): if not isinstance(obj, list): return False for item in obj: if not self.__type_matches(item, generic_type_args(type_)[0]): return False return True elif is_generic_dict(type_): if not isinstance(obj, OrderedDict): return False for key, value in obj: if not isinstance(key, generic_type_args(type_)[0]): return False if not self.__type_matches(value, generic_type_args(type_)[1]): return False return True else: return isinstance(obj, type_)
def function[__type_matches, parameter[self, obj, type_]]: constant[Checks that the object matches the given type. Like isinstance(), but will work with union types using Union, Dict and List. Args: obj: The object to check type_: The type to check against Returns: True iff obj is of type type_ ] if call[name[is_generic_union], parameter[name[type_]]] begin[:] for taget[name[t]] in starred[call[name[generic_type_args], parameter[name[type_]]]] begin[:] if call[name[self].__type_matches, parameter[name[obj], name[t]]] begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[__type_matches] ( identifier[self] , identifier[obj] : identifier[Any] , identifier[type_] : identifier[Type] )-> identifier[bool] : literal[string] keyword[if] identifier[is_generic_union] ( identifier[type_] ): keyword[for] identifier[t] keyword[in] identifier[generic_type_args] ( identifier[type_] ): keyword[if] identifier[self] . identifier[__type_matches] ( identifier[obj] , identifier[t] ): keyword[return] keyword[True] keyword[return] keyword[False] keyword[elif] identifier[is_generic_list] ( identifier[type_] ): keyword[if] keyword[not] identifier[isinstance] ( identifier[obj] , identifier[list] ): keyword[return] keyword[False] keyword[for] identifier[item] keyword[in] identifier[obj] : keyword[if] keyword[not] identifier[self] . identifier[__type_matches] ( identifier[item] , identifier[generic_type_args] ( identifier[type_] )[ literal[int] ]): keyword[return] keyword[False] keyword[return] keyword[True] keyword[elif] identifier[is_generic_dict] ( identifier[type_] ): keyword[if] keyword[not] identifier[isinstance] ( identifier[obj] , identifier[OrderedDict] ): keyword[return] keyword[False] keyword[for] identifier[key] , identifier[value] keyword[in] identifier[obj] : keyword[if] keyword[not] identifier[isinstance] ( identifier[key] , identifier[generic_type_args] ( identifier[type_] )[ literal[int] ]): keyword[return] keyword[False] keyword[if] keyword[not] identifier[self] . identifier[__type_matches] ( identifier[value] , identifier[generic_type_args] ( identifier[type_] )[ literal[int] ]): keyword[return] keyword[False] keyword[return] keyword[True] keyword[else] : keyword[return] identifier[isinstance] ( identifier[obj] , identifier[type_] )
def __type_matches(self, obj: Any, type_: Type) -> bool: """Checks that the object matches the given type. Like isinstance(), but will work with union types using Union, Dict and List. Args: obj: The object to check type_: The type to check against Returns: True iff obj is of type type_ """ if is_generic_union(type_): for t in generic_type_args(type_): if self.__type_matches(obj, t): return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['t']] return False # depends on [control=['if'], data=[]] elif is_generic_list(type_): if not isinstance(obj, list): return False # depends on [control=['if'], data=[]] for item in obj: if not self.__type_matches(item, generic_type_args(type_)[0]): return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']] return True # depends on [control=['if'], data=[]] elif is_generic_dict(type_): if not isinstance(obj, OrderedDict): return False # depends on [control=['if'], data=[]] for (key, value) in obj: if not isinstance(key, generic_type_args(type_)[0]): return False # depends on [control=['if'], data=[]] if not self.__type_matches(value, generic_type_args(type_)[1]): return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return True # depends on [control=['if'], data=[]] else: return isinstance(obj, type_)
def navigation_info(request): '''Expose whether to display the navigation header and footer''' if request.GET.get('wafer_hide_navigation') == "1": nav_class = "wafer-invisible" else: nav_class = "wafer-visible" context = { 'WAFER_NAVIGATION_VISIBILITY': nav_class, } return context
def function[navigation_info, parameter[request]]: constant[Expose whether to display the navigation header and footer] if compare[call[name[request].GET.get, parameter[constant[wafer_hide_navigation]]] equal[==] constant[1]] begin[:] variable[nav_class] assign[=] constant[wafer-invisible] variable[context] assign[=] dictionary[[<ast.Constant object at 0x7da1b0e4c100>], [<ast.Name object at 0x7da1b0e4fca0>]] return[name[context]]
keyword[def] identifier[navigation_info] ( identifier[request] ): literal[string] keyword[if] identifier[request] . identifier[GET] . identifier[get] ( literal[string] )== literal[string] : identifier[nav_class] = literal[string] keyword[else] : identifier[nav_class] = literal[string] identifier[context] ={ literal[string] : identifier[nav_class] , } keyword[return] identifier[context]
def navigation_info(request): """Expose whether to display the navigation header and footer""" if request.GET.get('wafer_hide_navigation') == '1': nav_class = 'wafer-invisible' # depends on [control=['if'], data=[]] else: nav_class = 'wafer-visible' context = {'WAFER_NAVIGATION_VISIBILITY': nav_class} return context
def _preprocess_input(self, input): ''' Preprocesses the input before it's split into a list. ''' if not re.search(preprocess_chars, input): # No characters that we need to preprocess, so continue without. return input input = self._add_punctuation_spacing(input) return input
def function[_preprocess_input, parameter[self, input]]: constant[ Preprocesses the input before it's split into a list. ] if <ast.UnaryOp object at 0x7da2054a4850> begin[:] return[name[input]] variable[input] assign[=] call[name[self]._add_punctuation_spacing, parameter[name[input]]] return[name[input]]
keyword[def] identifier[_preprocess_input] ( identifier[self] , identifier[input] ): literal[string] keyword[if] keyword[not] identifier[re] . identifier[search] ( identifier[preprocess_chars] , identifier[input] ): keyword[return] identifier[input] identifier[input] = identifier[self] . identifier[_add_punctuation_spacing] ( identifier[input] ) keyword[return] identifier[input]
def _preprocess_input(self, input): """ Preprocesses the input before it's split into a list. """ if not re.search(preprocess_chars, input): # No characters that we need to preprocess, so continue without. return input # depends on [control=['if'], data=[]] input = self._add_punctuation_spacing(input) return input
def GetDecoder(cls, encoding_method): """Retrieves the decoder object for a specific encoding method. Args: encoding_method (str): encoding method identifier. Returns: Decoder: decoder or None if the encoding method does not exists. """ encoding_method = encoding_method.lower() decoder = cls._decoders.get(encoding_method, None) if not decoder: return None return decoder()
def function[GetDecoder, parameter[cls, encoding_method]]: constant[Retrieves the decoder object for a specific encoding method. Args: encoding_method (str): encoding method identifier. Returns: Decoder: decoder or None if the encoding method does not exists. ] variable[encoding_method] assign[=] call[name[encoding_method].lower, parameter[]] variable[decoder] assign[=] call[name[cls]._decoders.get, parameter[name[encoding_method], constant[None]]] if <ast.UnaryOp object at 0x7da1b065bd00> begin[:] return[constant[None]] return[call[name[decoder], parameter[]]]
keyword[def] identifier[GetDecoder] ( identifier[cls] , identifier[encoding_method] ): literal[string] identifier[encoding_method] = identifier[encoding_method] . identifier[lower] () identifier[decoder] = identifier[cls] . identifier[_decoders] . identifier[get] ( identifier[encoding_method] , keyword[None] ) keyword[if] keyword[not] identifier[decoder] : keyword[return] keyword[None] keyword[return] identifier[decoder] ()
def GetDecoder(cls, encoding_method): """Retrieves the decoder object for a specific encoding method. Args: encoding_method (str): encoding method identifier. Returns: Decoder: decoder or None if the encoding method does not exists. """ encoding_method = encoding_method.lower() decoder = cls._decoders.get(encoding_method, None) if not decoder: return None # depends on [control=['if'], data=[]] return decoder()
def calacs(input_file, exec_path=None, time_stamps=False, temp_files=False, verbose=False, debug=False, quiet=False, single_core=False, exe_args=None): """ Run the calacs.e executable as from the shell. By default this will run the calacs given by 'calacs.e'. Parameters ---------- input_file : str Name of input file. exec_path : str, optional The complete path to a calacs executable. time_stamps : bool, optional Set to True to turn on the printing of time stamps. temp_files : bool, optional Set to True to have CALACS save temporary files. verbose : bool, optional Set to True for verbose output. debug : bool, optional Set to True to turn on debugging output. quiet : bool, optional Set to True for quiet output. single_core : bool, optional CTE correction in CALACS will by default try to use all available CPUs on your computer. Set this to True to force the use of just one CPU. exe_args : list, optional Arbitrary arguments passed to underlying executable call. Note: Implementation uses subprocess.call and whitespace is not permitted. E.g. use exe_args=['--nThreads', '1'] """ if exec_path: if not os.path.exists(exec_path): raise OSError('Executable not found: ' + exec_path) call_list = [exec_path] else: call_list = ['calacs.e'] if time_stamps: call_list.append('-t') if temp_files: call_list.append('-s') if verbose: call_list.append('-v') if debug: call_list.append('-d') if quiet: call_list.append('-q') if single_core: call_list.append('-1') if not os.path.exists(input_file): raise IOError('Input file not found: ' + input_file) call_list.append(input_file) if exe_args: call_list.extend(exe_args) subprocess.check_call(call_list)
def function[calacs, parameter[input_file, exec_path, time_stamps, temp_files, verbose, debug, quiet, single_core, exe_args]]: constant[ Run the calacs.e executable as from the shell. By default this will run the calacs given by 'calacs.e'. Parameters ---------- input_file : str Name of input file. exec_path : str, optional The complete path to a calacs executable. time_stamps : bool, optional Set to True to turn on the printing of time stamps. temp_files : bool, optional Set to True to have CALACS save temporary files. verbose : bool, optional Set to True for verbose output. debug : bool, optional Set to True to turn on debugging output. quiet : bool, optional Set to True for quiet output. single_core : bool, optional CTE correction in CALACS will by default try to use all available CPUs on your computer. Set this to True to force the use of just one CPU. exe_args : list, optional Arbitrary arguments passed to underlying executable call. Note: Implementation uses subprocess.call and whitespace is not permitted. E.g. use exe_args=['--nThreads', '1'] ] if name[exec_path] begin[:] if <ast.UnaryOp object at 0x7da20e960700> begin[:] <ast.Raise object at 0x7da20e9602e0> variable[call_list] assign[=] list[[<ast.Name object at 0x7da20c6e7d30>]] if name[time_stamps] begin[:] call[name[call_list].append, parameter[constant[-t]]] if name[temp_files] begin[:] call[name[call_list].append, parameter[constant[-s]]] if name[verbose] begin[:] call[name[call_list].append, parameter[constant[-v]]] if name[debug] begin[:] call[name[call_list].append, parameter[constant[-d]]] if name[quiet] begin[:] call[name[call_list].append, parameter[constant[-q]]] if name[single_core] begin[:] call[name[call_list].append, parameter[constant[-1]]] if <ast.UnaryOp object at 0x7da20c991840> begin[:] <ast.Raise object at 0x7da20c993f10> call[name[call_list].append, parameter[name[input_file]]] if name[exe_args] begin[:] call[name[call_list].extend, parameter[name[exe_args]]] call[name[subprocess].check_call, parameter[name[call_list]]]
keyword[def] identifier[calacs] ( identifier[input_file] , identifier[exec_path] = keyword[None] , identifier[time_stamps] = keyword[False] , identifier[temp_files] = keyword[False] , identifier[verbose] = keyword[False] , identifier[debug] = keyword[False] , identifier[quiet] = keyword[False] , identifier[single_core] = keyword[False] , identifier[exe_args] = keyword[None] ): literal[string] keyword[if] identifier[exec_path] : keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[exec_path] ): keyword[raise] identifier[OSError] ( literal[string] + identifier[exec_path] ) identifier[call_list] =[ identifier[exec_path] ] keyword[else] : identifier[call_list] =[ literal[string] ] keyword[if] identifier[time_stamps] : identifier[call_list] . identifier[append] ( literal[string] ) keyword[if] identifier[temp_files] : identifier[call_list] . identifier[append] ( literal[string] ) keyword[if] identifier[verbose] : identifier[call_list] . identifier[append] ( literal[string] ) keyword[if] identifier[debug] : identifier[call_list] . identifier[append] ( literal[string] ) keyword[if] identifier[quiet] : identifier[call_list] . identifier[append] ( literal[string] ) keyword[if] identifier[single_core] : identifier[call_list] . identifier[append] ( literal[string] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[input_file] ): keyword[raise] identifier[IOError] ( literal[string] + identifier[input_file] ) identifier[call_list] . identifier[append] ( identifier[input_file] ) keyword[if] identifier[exe_args] : identifier[call_list] . identifier[extend] ( identifier[exe_args] ) identifier[subprocess] . identifier[check_call] ( identifier[call_list] )
def calacs(input_file, exec_path=None, time_stamps=False, temp_files=False, verbose=False, debug=False, quiet=False, single_core=False, exe_args=None): """ Run the calacs.e executable as from the shell. By default this will run the calacs given by 'calacs.e'. Parameters ---------- input_file : str Name of input file. exec_path : str, optional The complete path to a calacs executable. time_stamps : bool, optional Set to True to turn on the printing of time stamps. temp_files : bool, optional Set to True to have CALACS save temporary files. verbose : bool, optional Set to True for verbose output. debug : bool, optional Set to True to turn on debugging output. quiet : bool, optional Set to True for quiet output. single_core : bool, optional CTE correction in CALACS will by default try to use all available CPUs on your computer. Set this to True to force the use of just one CPU. exe_args : list, optional Arbitrary arguments passed to underlying executable call. Note: Implementation uses subprocess.call and whitespace is not permitted. E.g. use exe_args=['--nThreads', '1'] """ if exec_path: if not os.path.exists(exec_path): raise OSError('Executable not found: ' + exec_path) # depends on [control=['if'], data=[]] call_list = [exec_path] # depends on [control=['if'], data=[]] else: call_list = ['calacs.e'] if time_stamps: call_list.append('-t') # depends on [control=['if'], data=[]] if temp_files: call_list.append('-s') # depends on [control=['if'], data=[]] if verbose: call_list.append('-v') # depends on [control=['if'], data=[]] if debug: call_list.append('-d') # depends on [control=['if'], data=[]] if quiet: call_list.append('-q') # depends on [control=['if'], data=[]] if single_core: call_list.append('-1') # depends on [control=['if'], data=[]] if not os.path.exists(input_file): raise IOError('Input file not found: ' + input_file) # depends on [control=['if'], data=[]] call_list.append(input_file) if exe_args: call_list.extend(exe_args) # depends on [control=['if'], data=[]] subprocess.check_call(call_list)
def register_system_role(self, system_role): """Register a system role. .. note:: A system role can't be registered two times. If it happens, then an assert exception will be raised. :param system_role: The system role to be registered. """ assert system_role.value not in self.system_roles self.system_roles[system_role.value] = system_role
def function[register_system_role, parameter[self, system_role]]: constant[Register a system role. .. note:: A system role can't be registered two times. If it happens, then an assert exception will be raised. :param system_role: The system role to be registered. ] assert[compare[name[system_role].value <ast.NotIn object at 0x7da2590d7190> name[self].system_roles]] call[name[self].system_roles][name[system_role].value] assign[=] name[system_role]
keyword[def] identifier[register_system_role] ( identifier[self] , identifier[system_role] ): literal[string] keyword[assert] identifier[system_role] . identifier[value] keyword[not] keyword[in] identifier[self] . identifier[system_roles] identifier[self] . identifier[system_roles] [ identifier[system_role] . identifier[value] ]= identifier[system_role]
def register_system_role(self, system_role): """Register a system role. .. note:: A system role can't be registered two times. If it happens, then an assert exception will be raised. :param system_role: The system role to be registered. """ assert system_role.value not in self.system_roles self.system_roles[system_role.value] = system_role
def isfortran(env, source): """Return 1 if any of code in source has fortran files in it, 0 otherwise.""" try: fsuffixes = env['FORTRANSUFFIXES'] except KeyError: # If no FORTRANSUFFIXES, no fortran tool, so there is no need to look # for fortran sources. return 0 if not source: # Source might be None for unusual cases like SConf. return 0 for s in source: if s.sources: ext = os.path.splitext(str(s.sources[0]))[1] if ext in fsuffixes: return 1 return 0
def function[isfortran, parameter[env, source]]: constant[Return 1 if any of code in source has fortran files in it, 0 otherwise.] <ast.Try object at 0x7da20e9b0160> if <ast.UnaryOp object at 0x7da20e9b3610> begin[:] return[constant[0]] for taget[name[s]] in starred[name[source]] begin[:] if name[s].sources begin[:] variable[ext] assign[=] call[call[name[os].path.splitext, parameter[call[name[str], parameter[call[name[s].sources][constant[0]]]]]]][constant[1]] if compare[name[ext] in name[fsuffixes]] begin[:] return[constant[1]] return[constant[0]]
keyword[def] identifier[isfortran] ( identifier[env] , identifier[source] ): literal[string] keyword[try] : identifier[fsuffixes] = identifier[env] [ literal[string] ] keyword[except] identifier[KeyError] : keyword[return] literal[int] keyword[if] keyword[not] identifier[source] : keyword[return] literal[int] keyword[for] identifier[s] keyword[in] identifier[source] : keyword[if] identifier[s] . identifier[sources] : identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[str] ( identifier[s] . identifier[sources] [ literal[int] ]))[ literal[int] ] keyword[if] identifier[ext] keyword[in] identifier[fsuffixes] : keyword[return] literal[int] keyword[return] literal[int]
def isfortran(env, source): """Return 1 if any of code in source has fortran files in it, 0 otherwise.""" try: fsuffixes = env['FORTRANSUFFIXES'] # depends on [control=['try'], data=[]] except KeyError: # If no FORTRANSUFFIXES, no fortran tool, so there is no need to look # for fortran sources. return 0 # depends on [control=['except'], data=[]] if not source: # Source might be None for unusual cases like SConf. return 0 # depends on [control=['if'], data=[]] for s in source: if s.sources: ext = os.path.splitext(str(s.sources[0]))[1] if ext in fsuffixes: return 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['s']] return 0
def get(self, call_sid): """ Constructs a ParticipantContext :param call_sid: The Call SID of the resource to fetch :returns: twilio.rest.api.v2010.account.conference.participant.ParticipantContext :rtype: twilio.rest.api.v2010.account.conference.participant.ParticipantContext """ return ParticipantContext( self._version, account_sid=self._solution['account_sid'], conference_sid=self._solution['conference_sid'], call_sid=call_sid, )
def function[get, parameter[self, call_sid]]: constant[ Constructs a ParticipantContext :param call_sid: The Call SID of the resource to fetch :returns: twilio.rest.api.v2010.account.conference.participant.ParticipantContext :rtype: twilio.rest.api.v2010.account.conference.participant.ParticipantContext ] return[call[name[ParticipantContext], parameter[name[self]._version]]]
keyword[def] identifier[get] ( identifier[self] , identifier[call_sid] ): literal[string] keyword[return] identifier[ParticipantContext] ( identifier[self] . identifier[_version] , identifier[account_sid] = identifier[self] . identifier[_solution] [ literal[string] ], identifier[conference_sid] = identifier[self] . identifier[_solution] [ literal[string] ], identifier[call_sid] = identifier[call_sid] , )
def get(self, call_sid): """ Constructs a ParticipantContext :param call_sid: The Call SID of the resource to fetch :returns: twilio.rest.api.v2010.account.conference.participant.ParticipantContext :rtype: twilio.rest.api.v2010.account.conference.participant.ParticipantContext """ return ParticipantContext(self._version, account_sid=self._solution['account_sid'], conference_sid=self._solution['conference_sid'], call_sid=call_sid)
def frameify(self, state, data): """Yield chunk data as a single frame, and buffer the rest.""" # If we've pulled in all the chunk data, buffer the data if state.chunk_remaining <= 0: state.recv_buf += data return # Pull in any partially-processed data data = state.recv_buf + data # Determine how much belongs to the chunk if len(data) <= state.chunk_remaining: chunk = data data = '' else: # Pull out only what's part of the chunk chunk = data[:state.chunk_remaining] data = data[state.chunk_remaining:] # Update the state state.recv_buf = data state.chunk_remaining -= len(chunk) # Yield the chunk try: yield chunk except FrameSwitch: pass
def function[frameify, parameter[self, state, data]]: constant[Yield chunk data as a single frame, and buffer the rest.] if compare[name[state].chunk_remaining less_or_equal[<=] constant[0]] begin[:] <ast.AugAssign object at 0x7da20cabe7d0> return[None] variable[data] assign[=] binary_operation[name[state].recv_buf + name[data]] if compare[call[name[len], parameter[name[data]]] less_or_equal[<=] name[state].chunk_remaining] begin[:] variable[chunk] assign[=] name[data] variable[data] assign[=] constant[] name[state].recv_buf assign[=] name[data] <ast.AugAssign object at 0x7da20cabdd20> <ast.Try object at 0x7da20cabcd90>
keyword[def] identifier[frameify] ( identifier[self] , identifier[state] , identifier[data] ): literal[string] keyword[if] identifier[state] . identifier[chunk_remaining] <= literal[int] : identifier[state] . identifier[recv_buf] += identifier[data] keyword[return] identifier[data] = identifier[state] . identifier[recv_buf] + identifier[data] keyword[if] identifier[len] ( identifier[data] )<= identifier[state] . identifier[chunk_remaining] : identifier[chunk] = identifier[data] identifier[data] = literal[string] keyword[else] : identifier[chunk] = identifier[data] [: identifier[state] . identifier[chunk_remaining] ] identifier[data] = identifier[data] [ identifier[state] . identifier[chunk_remaining] :] identifier[state] . identifier[recv_buf] = identifier[data] identifier[state] . identifier[chunk_remaining] -= identifier[len] ( identifier[chunk] ) keyword[try] : keyword[yield] identifier[chunk] keyword[except] identifier[FrameSwitch] : keyword[pass]
def frameify(self, state, data): """Yield chunk data as a single frame, and buffer the rest.""" # If we've pulled in all the chunk data, buffer the data if state.chunk_remaining <= 0: state.recv_buf += data return # depends on [control=['if'], data=[]] # Pull in any partially-processed data data = state.recv_buf + data # Determine how much belongs to the chunk if len(data) <= state.chunk_remaining: chunk = data data = '' # depends on [control=['if'], data=[]] else: # Pull out only what's part of the chunk chunk = data[:state.chunk_remaining] data = data[state.chunk_remaining:] # Update the state state.recv_buf = data state.chunk_remaining -= len(chunk) # Yield the chunk try: yield chunk # depends on [control=['try'], data=[]] except FrameSwitch: pass # depends on [control=['except'], data=[]]
def create(self, host_list=[], serial=None, instance_name=None, use_mgmt_port=False, interval=None, bandwidth_base=None, bandwidth_unrestricted=None): """Creates a license manager entry Keyword arguments: instance_name -- license manager instance name host_list -- list(dict) a list of dictionaries of the format: {'ip': '127.0.0.1', 'port': 443} serial - (str) appliance serial number use_mgmt_port - (bool) use management for license interactions interval - (int) 1=Monthly, 2=Daily, 3=Hourly bandwidth_base - (int) Configure feature bandwidth base (Mb) Valid range - 10-102400 bandwidth_unrestricted - (bool) Set the bandwidth to maximum """ payload = self._build_payload(host_list=host_list, serial=serial, instance_name=instance_name, use_mgmt_port=use_mgmt_port, interval=interval, bandwidth_base=bandwidth_base, bandwidth_unrestricted=bandwidth_unrestricted) return self._post(self.url_base, payload)
def function[create, parameter[self, host_list, serial, instance_name, use_mgmt_port, interval, bandwidth_base, bandwidth_unrestricted]]: constant[Creates a license manager entry Keyword arguments: instance_name -- license manager instance name host_list -- list(dict) a list of dictionaries of the format: {'ip': '127.0.0.1', 'port': 443} serial - (str) appliance serial number use_mgmt_port - (bool) use management for license interactions interval - (int) 1=Monthly, 2=Daily, 3=Hourly bandwidth_base - (int) Configure feature bandwidth base (Mb) Valid range - 10-102400 bandwidth_unrestricted - (bool) Set the bandwidth to maximum ] variable[payload] assign[=] call[name[self]._build_payload, parameter[]] return[call[name[self]._post, parameter[name[self].url_base, name[payload]]]]
keyword[def] identifier[create] ( identifier[self] , identifier[host_list] =[], identifier[serial] = keyword[None] , identifier[instance_name] = keyword[None] , identifier[use_mgmt_port] = keyword[False] , identifier[interval] = keyword[None] , identifier[bandwidth_base] = keyword[None] , identifier[bandwidth_unrestricted] = keyword[None] ): literal[string] identifier[payload] = identifier[self] . identifier[_build_payload] ( identifier[host_list] = identifier[host_list] , identifier[serial] = identifier[serial] , identifier[instance_name] = identifier[instance_name] , identifier[use_mgmt_port] = identifier[use_mgmt_port] , identifier[interval] = identifier[interval] , identifier[bandwidth_base] = identifier[bandwidth_base] , identifier[bandwidth_unrestricted] = identifier[bandwidth_unrestricted] ) keyword[return] identifier[self] . identifier[_post] ( identifier[self] . identifier[url_base] , identifier[payload] )
def create(self, host_list=[], serial=None, instance_name=None, use_mgmt_port=False, interval=None, bandwidth_base=None, bandwidth_unrestricted=None): """Creates a license manager entry Keyword arguments: instance_name -- license manager instance name host_list -- list(dict) a list of dictionaries of the format: {'ip': '127.0.0.1', 'port': 443} serial - (str) appliance serial number use_mgmt_port - (bool) use management for license interactions interval - (int) 1=Monthly, 2=Daily, 3=Hourly bandwidth_base - (int) Configure feature bandwidth base (Mb) Valid range - 10-102400 bandwidth_unrestricted - (bool) Set the bandwidth to maximum """ payload = self._build_payload(host_list=host_list, serial=serial, instance_name=instance_name, use_mgmt_port=use_mgmt_port, interval=interval, bandwidth_base=bandwidth_base, bandwidth_unrestricted=bandwidth_unrestricted) return self._post(self.url_base, payload)
def visit_nonlocal(self, node, parent): """visit a Nonlocal node and return a new instance of it""" return nodes.Nonlocal( node.names, getattr(node, "lineno", None), getattr(node, "col_offset", None), parent, )
def function[visit_nonlocal, parameter[self, node, parent]]: constant[visit a Nonlocal node and return a new instance of it] return[call[name[nodes].Nonlocal, parameter[name[node].names, call[name[getattr], parameter[name[node], constant[lineno], constant[None]]], call[name[getattr], parameter[name[node], constant[col_offset], constant[None]]], name[parent]]]]
keyword[def] identifier[visit_nonlocal] ( identifier[self] , identifier[node] , identifier[parent] ): literal[string] keyword[return] identifier[nodes] . identifier[Nonlocal] ( identifier[node] . identifier[names] , identifier[getattr] ( identifier[node] , literal[string] , keyword[None] ), identifier[getattr] ( identifier[node] , literal[string] , keyword[None] ), identifier[parent] , )
def visit_nonlocal(self, node, parent): """visit a Nonlocal node and return a new instance of it""" return nodes.Nonlocal(node.names, getattr(node, 'lineno', None), getattr(node, 'col_offset', None), parent)
def wait_for_save(filename, timeout=5): """Waits for FILENAME to update, waiting up to TIMEOUT seconds. Returns True if a save was detected, and False otherwise. """ modification_time = os.path.getmtime(filename) start_time = time.time() while time.time() < start_time + timeout: if (os.path.getmtime(filename) > modification_time and os.path.getsize(filename) > 0): return True time.sleep(0.2) return False
def function[wait_for_save, parameter[filename, timeout]]: constant[Waits for FILENAME to update, waiting up to TIMEOUT seconds. Returns True if a save was detected, and False otherwise. ] variable[modification_time] assign[=] call[name[os].path.getmtime, parameter[name[filename]]] variable[start_time] assign[=] call[name[time].time, parameter[]] while compare[call[name[time].time, parameter[]] less[<] binary_operation[name[start_time] + name[timeout]]] begin[:] if <ast.BoolOp object at 0x7da204961e40> begin[:] return[constant[True]] call[name[time].sleep, parameter[constant[0.2]]] return[constant[False]]
keyword[def] identifier[wait_for_save] ( identifier[filename] , identifier[timeout] = literal[int] ): literal[string] identifier[modification_time] = identifier[os] . identifier[path] . identifier[getmtime] ( identifier[filename] ) identifier[start_time] = identifier[time] . identifier[time] () keyword[while] identifier[time] . identifier[time] ()< identifier[start_time] + identifier[timeout] : keyword[if] ( identifier[os] . identifier[path] . identifier[getmtime] ( identifier[filename] )> identifier[modification_time] keyword[and] identifier[os] . identifier[path] . identifier[getsize] ( identifier[filename] )> literal[int] ): keyword[return] keyword[True] identifier[time] . identifier[sleep] ( literal[int] ) keyword[return] keyword[False]
def wait_for_save(filename, timeout=5): """Waits for FILENAME to update, waiting up to TIMEOUT seconds. Returns True if a save was detected, and False otherwise. """ modification_time = os.path.getmtime(filename) start_time = time.time() while time.time() < start_time + timeout: if os.path.getmtime(filename) > modification_time and os.path.getsize(filename) > 0: return True # depends on [control=['if'], data=[]] time.sleep(0.2) # depends on [control=['while'], data=[]] return False
def resolve_and_validate_document(loadingContext, workflowobj, uri, preprocess_only=False, # type: bool skip_schemas=None, # type: bool ): # type: (...) -> Tuple[LoadingContext, Text] """Validate a CWL document.""" loadingContext = loadingContext.copy() if not isinstance(workflowobj, MutableMapping): raise ValueError("workflowjobj must be a dict, got '{}': {}".format( type(workflowobj), workflowobj)) jobobj = None if "cwl:tool" in workflowobj: jobobj, _ = loadingContext.loader.resolve_all(workflowobj, uri, checklinks=loadingContext.do_validate) uri = urllib.parse.urljoin(uri, workflowobj["https://w3id.org/cwl/cwl#tool"]) del cast(dict, jobobj)["https://w3id.org/cwl/cwl#tool"] workflowobj = fetch_document(uri, loadingContext)[1] fileuri = urllib.parse.urldefrag(uri)[0] cwlVersion = loadingContext.metadata.get("cwlVersion") if not cwlVersion: cwlVersion = workflowobj.get("cwlVersion") if not cwlVersion: raise ValidationException( "No cwlVersion found. " "Use the following syntax in your CWL document to declare " "the version: cwlVersion: <version>.\n" "Note: if this is a CWL draft-2 (pre v1.0) document then it " "will need to be upgraded first.") if not isinstance(cwlVersion, string_types): with SourceLine(workflowobj, "cwlVersion", ValidationException): raise ValidationException("'cwlVersion' must be a string, " "got {}".format( type(cwlVersion))) # strip out version cwlVersion = re.sub( r"^(?:cwl:|https://w3id.org/cwl/cwl#)", "", cwlVersion) if cwlVersion not in list(ALLUPDATES): # print out all the Supported Versions of cwlVersion versions = [] for version in list(ALLUPDATES): if "dev" in version: version += " (with --enable-dev flag only)" versions.append(version) versions.sort() raise ValidationException( "The CWL reference runner no longer supports pre CWL v1.0 " "documents. Supported versions are: " "\n{}".format("\n".join(versions))) if isinstance(jobobj, CommentedMap) and "http://commonwl.org/cwltool#overrides" in jobobj: loadingContext.overrides_list.extend(resolve_overrides(jobobj, uri, uri)) del jobobj["http://commonwl.org/cwltool#overrides"] if isinstance(jobobj, CommentedMap) and "https://w3id.org/cwl/cwl#requirements" in jobobj: if cwlVersion not in ("v1.1.0-dev1",): raise ValidationException( "`cwl:requirements` in the input object is not part of CWL " "v1.0. You can adjust to use `cwltool:overrides` instead; or you " "can set the cwlVersion to v1.1.0-dev1 or greater and re-run with " "--enable-dev.") loadingContext.overrides_list.append({"overrideTarget": uri, "requirements": jobobj["https://w3id.org/cwl/cwl#requirements"]}) del jobobj["https://w3id.org/cwl/cwl#requirements"] (sch_document_loader, avsc_names) = \ process.get_schema(cwlVersion)[:2] if isinstance(avsc_names, Exception): raise avsc_names processobj = None # type: Union[CommentedMap, CommentedSeq, Text, None] document_loader = Loader(sch_document_loader.ctx, schemagraph=sch_document_loader.graph, idx=loadingContext.loader.idx, cache=sch_document_loader.cache, fetcher_constructor=loadingContext.fetcher_constructor, skip_schemas=skip_schemas) if cwlVersion == "v1.0": _add_blank_ids(workflowobj) workflowobj["id"] = fileuri processobj, metadata = document_loader.resolve_all( workflowobj, fileuri, checklinks=loadingContext.do_validate) if loadingContext.metadata: metadata = loadingContext.metadata if not isinstance(processobj, (CommentedMap, CommentedSeq)): raise ValidationException("Workflow must be a CommentedMap or CommentedSeq.") if not isinstance(metadata, CommentedMap): raise ValidationException("metadata must be a CommentedMap, was %s" % type(metadata)) _convert_stdstreams_to_files(workflowobj) if preprocess_only: return loadingContext, uri if loadingContext.do_validate: schema.validate_doc(avsc_names, processobj, document_loader, loadingContext.strict) # None means default behavior (do update) if loadingContext.do_update in (True, None): processobj = cast(CommentedMap, cmap(update.update( processobj, document_loader, fileuri, loadingContext.enable_dev, metadata))) if isinstance(processobj, MutableMapping): document_loader.idx[processobj["id"]] = processobj elif isinstance(processobj, MutableSequence): document_loader.idx[metadata["id"]] = metadata for po in processobj: document_loader.idx[po["id"]] = po if jobobj is not None: loadingContext.jobdefaults = jobobj loadingContext.loader = document_loader loadingContext.avsc_names = avsc_names loadingContext.metadata = metadata return loadingContext, uri
def function[resolve_and_validate_document, parameter[loadingContext, workflowobj, uri, preprocess_only, skip_schemas]]: constant[Validate a CWL document.] variable[loadingContext] assign[=] call[name[loadingContext].copy, parameter[]] if <ast.UnaryOp object at 0x7da204566c50> begin[:] <ast.Raise object at 0x7da204566350> variable[jobobj] assign[=] constant[None] if compare[constant[cwl:tool] in name[workflowobj]] begin[:] <ast.Tuple object at 0x7da2045672b0> assign[=] call[name[loadingContext].loader.resolve_all, parameter[name[workflowobj], name[uri]]] variable[uri] assign[=] call[name[urllib].parse.urljoin, parameter[name[uri], call[name[workflowobj]][constant[https://w3id.org/cwl/cwl#tool]]]] <ast.Delete object at 0x7da204566bf0> variable[workflowobj] assign[=] call[call[name[fetch_document], parameter[name[uri], name[loadingContext]]]][constant[1]] variable[fileuri] assign[=] call[call[name[urllib].parse.urldefrag, parameter[name[uri]]]][constant[0]] variable[cwlVersion] assign[=] call[name[loadingContext].metadata.get, parameter[constant[cwlVersion]]] if <ast.UnaryOp object at 0x7da204564760> begin[:] variable[cwlVersion] assign[=] call[name[workflowobj].get, parameter[constant[cwlVersion]]] if <ast.UnaryOp object at 0x7da2045659c0> begin[:] <ast.Raise object at 0x7da2045675e0> if <ast.UnaryOp object at 0x7da204564a60> begin[:] with call[name[SourceLine], parameter[name[workflowobj], constant[cwlVersion], name[ValidationException]]] begin[:] <ast.Raise object at 0x7da204567a30> variable[cwlVersion] assign[=] call[name[re].sub, parameter[constant[^(?:cwl:|https://w3id.org/cwl/cwl#)], constant[], name[cwlVersion]]] if compare[name[cwlVersion] <ast.NotIn object at 0x7da2590d7190> call[name[list], parameter[name[ALLUPDATES]]]] begin[:] variable[versions] assign[=] list[[]] for taget[name[version]] in starred[call[name[list], parameter[name[ALLUPDATES]]]] begin[:] if compare[constant[dev] in name[version]] begin[:] <ast.AugAssign object at 0x7da204567f70> call[name[versions].append, parameter[name[version]]] call[name[versions].sort, parameter[]] <ast.Raise object at 0x7da204566590> if <ast.BoolOp object at 0x7da2045656c0> begin[:] call[name[loadingContext].overrides_list.extend, parameter[call[name[resolve_overrides], parameter[name[jobobj], name[uri], name[uri]]]]] <ast.Delete object at 0x7da204566a10> if <ast.BoolOp object at 0x7da2045669e0> begin[:] if compare[name[cwlVersion] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da204566e60>]]] begin[:] <ast.Raise object at 0x7da204567f10> call[name[loadingContext].overrides_list.append, parameter[dictionary[[<ast.Constant object at 0x7da204566cb0>, <ast.Constant object at 0x7da204565d80>], [<ast.Name object at 0x7da204567a00>, <ast.Subscript object at 0x7da204566830>]]]] <ast.Delete object at 0x7da204564940> <ast.Tuple object at 0x7da204565ab0> assign[=] call[call[name[process].get_schema, parameter[name[cwlVersion]]]][<ast.Slice object at 0x7da2045654e0>] if call[name[isinstance], parameter[name[avsc_names], name[Exception]]] begin[:] <ast.Raise object at 0x7da204564220> variable[processobj] assign[=] constant[None] variable[document_loader] assign[=] call[name[Loader], parameter[name[sch_document_loader].ctx]] if compare[name[cwlVersion] equal[==] constant[v1.0]] begin[:] call[name[_add_blank_ids], parameter[name[workflowobj]]] call[name[workflowobj]][constant[id]] assign[=] name[fileuri] <ast.Tuple object at 0x7da2044c0910> assign[=] call[name[document_loader].resolve_all, parameter[name[workflowobj], name[fileuri]]] if name[loadingContext].metadata begin[:] variable[metadata] assign[=] name[loadingContext].metadata if <ast.UnaryOp object at 0x7da2044c3ee0> begin[:] <ast.Raise object at 0x7da2044c2230> if <ast.UnaryOp object at 0x7da2044c2380> begin[:] <ast.Raise object at 0x7da2044c3490> call[name[_convert_stdstreams_to_files], parameter[name[workflowobj]]] if name[preprocess_only] begin[:] return[tuple[[<ast.Name object at 0x7da2044c36a0>, <ast.Name object at 0x7da2044c20e0>]]] if name[loadingContext].do_validate begin[:] call[name[schema].validate_doc, parameter[name[avsc_names], name[processobj], name[document_loader], name[loadingContext].strict]] if compare[name[loadingContext].do_update in tuple[[<ast.Constant object at 0x7da2044c1ab0>, <ast.Constant object at 0x7da2044c25f0>]]] begin[:] variable[processobj] assign[=] call[name[cast], parameter[name[CommentedMap], call[name[cmap], parameter[call[name[update].update, parameter[name[processobj], name[document_loader], name[fileuri], name[loadingContext].enable_dev, name[metadata]]]]]]] if call[name[isinstance], parameter[name[processobj], name[MutableMapping]]] begin[:] call[name[document_loader].idx][call[name[processobj]][constant[id]]] assign[=] name[processobj] if compare[name[jobobj] is_not constant[None]] begin[:] name[loadingContext].jobdefaults assign[=] name[jobobj] name[loadingContext].loader assign[=] name[document_loader] name[loadingContext].avsc_names assign[=] name[avsc_names] name[loadingContext].metadata assign[=] name[metadata] return[tuple[[<ast.Name object at 0x7da2044c2650>, <ast.Name object at 0x7da2044c1870>]]]
keyword[def] identifier[resolve_and_validate_document] ( identifier[loadingContext] , identifier[workflowobj] , identifier[uri] , identifier[preprocess_only] = keyword[False] , identifier[skip_schemas] = keyword[None] , ): literal[string] identifier[loadingContext] = identifier[loadingContext] . identifier[copy] () keyword[if] keyword[not] identifier[isinstance] ( identifier[workflowobj] , identifier[MutableMapping] ): keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[type] ( identifier[workflowobj] ), identifier[workflowobj] )) identifier[jobobj] = keyword[None] keyword[if] literal[string] keyword[in] identifier[workflowobj] : identifier[jobobj] , identifier[_] = identifier[loadingContext] . identifier[loader] . identifier[resolve_all] ( identifier[workflowobj] , identifier[uri] , identifier[checklinks] = identifier[loadingContext] . identifier[do_validate] ) identifier[uri] = identifier[urllib] . identifier[parse] . identifier[urljoin] ( identifier[uri] , identifier[workflowobj] [ literal[string] ]) keyword[del] identifier[cast] ( identifier[dict] , identifier[jobobj] )[ literal[string] ] identifier[workflowobj] = identifier[fetch_document] ( identifier[uri] , identifier[loadingContext] )[ literal[int] ] identifier[fileuri] = identifier[urllib] . identifier[parse] . identifier[urldefrag] ( identifier[uri] )[ literal[int] ] identifier[cwlVersion] = identifier[loadingContext] . identifier[metadata] . identifier[get] ( literal[string] ) keyword[if] keyword[not] identifier[cwlVersion] : identifier[cwlVersion] = identifier[workflowobj] . identifier[get] ( literal[string] ) keyword[if] keyword[not] identifier[cwlVersion] : keyword[raise] identifier[ValidationException] ( literal[string] literal[string] literal[string] literal[string] literal[string] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[cwlVersion] , identifier[string_types] ): keyword[with] identifier[SourceLine] ( identifier[workflowobj] , literal[string] , identifier[ValidationException] ): keyword[raise] identifier[ValidationException] ( literal[string] literal[string] . identifier[format] ( identifier[type] ( identifier[cwlVersion] ))) identifier[cwlVersion] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[cwlVersion] ) keyword[if] identifier[cwlVersion] keyword[not] keyword[in] identifier[list] ( identifier[ALLUPDATES] ): identifier[versions] =[] keyword[for] identifier[version] keyword[in] identifier[list] ( identifier[ALLUPDATES] ): keyword[if] literal[string] keyword[in] identifier[version] : identifier[version] += literal[string] identifier[versions] . identifier[append] ( identifier[version] ) identifier[versions] . identifier[sort] () keyword[raise] identifier[ValidationException] ( literal[string] literal[string] literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[versions] ))) keyword[if] identifier[isinstance] ( identifier[jobobj] , identifier[CommentedMap] ) keyword[and] literal[string] keyword[in] identifier[jobobj] : identifier[loadingContext] . identifier[overrides_list] . identifier[extend] ( identifier[resolve_overrides] ( identifier[jobobj] , identifier[uri] , identifier[uri] )) keyword[del] identifier[jobobj] [ literal[string] ] keyword[if] identifier[isinstance] ( identifier[jobobj] , identifier[CommentedMap] ) keyword[and] literal[string] keyword[in] identifier[jobobj] : keyword[if] identifier[cwlVersion] keyword[not] keyword[in] ( literal[string] ,): keyword[raise] identifier[ValidationException] ( literal[string] literal[string] literal[string] literal[string] ) identifier[loadingContext] . identifier[overrides_list] . identifier[append] ({ literal[string] : identifier[uri] , literal[string] : identifier[jobobj] [ literal[string] ]}) keyword[del] identifier[jobobj] [ literal[string] ] ( identifier[sch_document_loader] , identifier[avsc_names] )= identifier[process] . identifier[get_schema] ( identifier[cwlVersion] )[: literal[int] ] keyword[if] identifier[isinstance] ( identifier[avsc_names] , identifier[Exception] ): keyword[raise] identifier[avsc_names] identifier[processobj] = keyword[None] identifier[document_loader] = identifier[Loader] ( identifier[sch_document_loader] . identifier[ctx] , identifier[schemagraph] = identifier[sch_document_loader] . identifier[graph] , identifier[idx] = identifier[loadingContext] . identifier[loader] . identifier[idx] , identifier[cache] = identifier[sch_document_loader] . identifier[cache] , identifier[fetcher_constructor] = identifier[loadingContext] . identifier[fetcher_constructor] , identifier[skip_schemas] = identifier[skip_schemas] ) keyword[if] identifier[cwlVersion] == literal[string] : identifier[_add_blank_ids] ( identifier[workflowobj] ) identifier[workflowobj] [ literal[string] ]= identifier[fileuri] identifier[processobj] , identifier[metadata] = identifier[document_loader] . identifier[resolve_all] ( identifier[workflowobj] , identifier[fileuri] , identifier[checklinks] = identifier[loadingContext] . identifier[do_validate] ) keyword[if] identifier[loadingContext] . identifier[metadata] : identifier[metadata] = identifier[loadingContext] . identifier[metadata] keyword[if] keyword[not] identifier[isinstance] ( identifier[processobj] ,( identifier[CommentedMap] , identifier[CommentedSeq] )): keyword[raise] identifier[ValidationException] ( literal[string] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[metadata] , identifier[CommentedMap] ): keyword[raise] identifier[ValidationException] ( literal[string] % identifier[type] ( identifier[metadata] )) identifier[_convert_stdstreams_to_files] ( identifier[workflowobj] ) keyword[if] identifier[preprocess_only] : keyword[return] identifier[loadingContext] , identifier[uri] keyword[if] identifier[loadingContext] . identifier[do_validate] : identifier[schema] . identifier[validate_doc] ( identifier[avsc_names] , identifier[processobj] , identifier[document_loader] , identifier[loadingContext] . identifier[strict] ) keyword[if] identifier[loadingContext] . identifier[do_update] keyword[in] ( keyword[True] , keyword[None] ): identifier[processobj] = identifier[cast] ( identifier[CommentedMap] , identifier[cmap] ( identifier[update] . identifier[update] ( identifier[processobj] , identifier[document_loader] , identifier[fileuri] , identifier[loadingContext] . identifier[enable_dev] , identifier[metadata] ))) keyword[if] identifier[isinstance] ( identifier[processobj] , identifier[MutableMapping] ): identifier[document_loader] . identifier[idx] [ identifier[processobj] [ literal[string] ]]= identifier[processobj] keyword[elif] identifier[isinstance] ( identifier[processobj] , identifier[MutableSequence] ): identifier[document_loader] . identifier[idx] [ identifier[metadata] [ literal[string] ]]= identifier[metadata] keyword[for] identifier[po] keyword[in] identifier[processobj] : identifier[document_loader] . identifier[idx] [ identifier[po] [ literal[string] ]]= identifier[po] keyword[if] identifier[jobobj] keyword[is] keyword[not] keyword[None] : identifier[loadingContext] . identifier[jobdefaults] = identifier[jobobj] identifier[loadingContext] . identifier[loader] = identifier[document_loader] identifier[loadingContext] . identifier[avsc_names] = identifier[avsc_names] identifier[loadingContext] . identifier[metadata] = identifier[metadata] keyword[return] identifier[loadingContext] , identifier[uri]
def resolve_and_validate_document(loadingContext, workflowobj, uri, preprocess_only=False, skip_schemas=None): # type: bool # type: bool # type: (...) -> Tuple[LoadingContext, Text] 'Validate a CWL document.' loadingContext = loadingContext.copy() if not isinstance(workflowobj, MutableMapping): raise ValueError("workflowjobj must be a dict, got '{}': {}".format(type(workflowobj), workflowobj)) # depends on [control=['if'], data=[]] jobobj = None if 'cwl:tool' in workflowobj: (jobobj, _) = loadingContext.loader.resolve_all(workflowobj, uri, checklinks=loadingContext.do_validate) uri = urllib.parse.urljoin(uri, workflowobj['https://w3id.org/cwl/cwl#tool']) del cast(dict, jobobj)['https://w3id.org/cwl/cwl#tool'] workflowobj = fetch_document(uri, loadingContext)[1] # depends on [control=['if'], data=['workflowobj']] fileuri = urllib.parse.urldefrag(uri)[0] cwlVersion = loadingContext.metadata.get('cwlVersion') if not cwlVersion: cwlVersion = workflowobj.get('cwlVersion') # depends on [control=['if'], data=[]] if not cwlVersion: raise ValidationException('No cwlVersion found. Use the following syntax in your CWL document to declare the version: cwlVersion: <version>.\nNote: if this is a CWL draft-2 (pre v1.0) document then it will need to be upgraded first.') # depends on [control=['if'], data=[]] if not isinstance(cwlVersion, string_types): with SourceLine(workflowobj, 'cwlVersion', ValidationException): raise ValidationException("'cwlVersion' must be a string, got {}".format(type(cwlVersion))) # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]] # strip out version cwlVersion = re.sub('^(?:cwl:|https://w3id.org/cwl/cwl#)', '', cwlVersion) if cwlVersion not in list(ALLUPDATES): # print out all the Supported Versions of cwlVersion versions = [] for version in list(ALLUPDATES): if 'dev' in version: version += ' (with --enable-dev flag only)' # depends on [control=['if'], data=['version']] versions.append(version) # depends on [control=['for'], data=['version']] versions.sort() raise ValidationException('The CWL reference runner no longer supports pre CWL v1.0 documents. Supported versions are: \n{}'.format('\n'.join(versions))) # depends on [control=['if'], data=[]] if isinstance(jobobj, CommentedMap) and 'http://commonwl.org/cwltool#overrides' in jobobj: loadingContext.overrides_list.extend(resolve_overrides(jobobj, uri, uri)) del jobobj['http://commonwl.org/cwltool#overrides'] # depends on [control=['if'], data=[]] if isinstance(jobobj, CommentedMap) and 'https://w3id.org/cwl/cwl#requirements' in jobobj: if cwlVersion not in ('v1.1.0-dev1',): raise ValidationException('`cwl:requirements` in the input object is not part of CWL v1.0. You can adjust to use `cwltool:overrides` instead; or you can set the cwlVersion to v1.1.0-dev1 or greater and re-run with --enable-dev.') # depends on [control=['if'], data=[]] loadingContext.overrides_list.append({'overrideTarget': uri, 'requirements': jobobj['https://w3id.org/cwl/cwl#requirements']}) del jobobj['https://w3id.org/cwl/cwl#requirements'] # depends on [control=['if'], data=[]] (sch_document_loader, avsc_names) = process.get_schema(cwlVersion)[:2] if isinstance(avsc_names, Exception): raise avsc_names # depends on [control=['if'], data=[]] processobj = None # type: Union[CommentedMap, CommentedSeq, Text, None] document_loader = Loader(sch_document_loader.ctx, schemagraph=sch_document_loader.graph, idx=loadingContext.loader.idx, cache=sch_document_loader.cache, fetcher_constructor=loadingContext.fetcher_constructor, skip_schemas=skip_schemas) if cwlVersion == 'v1.0': _add_blank_ids(workflowobj) # depends on [control=['if'], data=[]] workflowobj['id'] = fileuri (processobj, metadata) = document_loader.resolve_all(workflowobj, fileuri, checklinks=loadingContext.do_validate) if loadingContext.metadata: metadata = loadingContext.metadata # depends on [control=['if'], data=[]] if not isinstance(processobj, (CommentedMap, CommentedSeq)): raise ValidationException('Workflow must be a CommentedMap or CommentedSeq.') # depends on [control=['if'], data=[]] if not isinstance(metadata, CommentedMap): raise ValidationException('metadata must be a CommentedMap, was %s' % type(metadata)) # depends on [control=['if'], data=[]] _convert_stdstreams_to_files(workflowobj) if preprocess_only: return (loadingContext, uri) # depends on [control=['if'], data=[]] if loadingContext.do_validate: schema.validate_doc(avsc_names, processobj, document_loader, loadingContext.strict) # depends on [control=['if'], data=[]] # None means default behavior (do update) if loadingContext.do_update in (True, None): processobj = cast(CommentedMap, cmap(update.update(processobj, document_loader, fileuri, loadingContext.enable_dev, metadata))) if isinstance(processobj, MutableMapping): document_loader.idx[processobj['id']] = processobj # depends on [control=['if'], data=[]] elif isinstance(processobj, MutableSequence): document_loader.idx[metadata['id']] = metadata for po in processobj: document_loader.idx[po['id']] = po # depends on [control=['for'], data=['po']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if jobobj is not None: loadingContext.jobdefaults = jobobj # depends on [control=['if'], data=['jobobj']] loadingContext.loader = document_loader loadingContext.avsc_names = avsc_names loadingContext.metadata = metadata return (loadingContext, uri)
def upsert_keys(session, key_table, keys): """Bulk add annotation keys to the specified table. :param key_table: The sqlalchemy class to insert into. :param keys: A map of {name: [candidate_classes]}. """ # Do nothing if empty if not keys: return for key_batch in _batch_postgres_query( key_table, [{"name": k[0], "candidate_classes": k[1]} for k in keys.items()] ): stmt = insert(key_table.__table__) stmt = stmt.on_conflict_do_update( constraint=key_table.__table__.primary_key, set_={ "name": stmt.excluded.get("name"), "candidate_classes": stmt.excluded.get("candidate_classes"), }, ) while True: try: session.execute(stmt, key_batch) session.commit() break except Exception as e: logger.debug(e)
def function[upsert_keys, parameter[session, key_table, keys]]: constant[Bulk add annotation keys to the specified table. :param key_table: The sqlalchemy class to insert into. :param keys: A map of {name: [candidate_classes]}. ] if <ast.UnaryOp object at 0x7da1b23447c0> begin[:] return[None] for taget[name[key_batch]] in starred[call[name[_batch_postgres_query], parameter[name[key_table], <ast.ListComp object at 0x7da1b2346110>]]] begin[:] variable[stmt] assign[=] call[name[insert], parameter[name[key_table].__table__]] variable[stmt] assign[=] call[name[stmt].on_conflict_do_update, parameter[]] while constant[True] begin[:] <ast.Try object at 0x7da1b2347370>
keyword[def] identifier[upsert_keys] ( identifier[session] , identifier[key_table] , identifier[keys] ): literal[string] keyword[if] keyword[not] identifier[keys] : keyword[return] keyword[for] identifier[key_batch] keyword[in] identifier[_batch_postgres_query] ( identifier[key_table] ,[{ literal[string] : identifier[k] [ literal[int] ], literal[string] : identifier[k] [ literal[int] ]} keyword[for] identifier[k] keyword[in] identifier[keys] . identifier[items] ()] ): identifier[stmt] = identifier[insert] ( identifier[key_table] . identifier[__table__] ) identifier[stmt] = identifier[stmt] . identifier[on_conflict_do_update] ( identifier[constraint] = identifier[key_table] . identifier[__table__] . identifier[primary_key] , identifier[set_] ={ literal[string] : identifier[stmt] . identifier[excluded] . identifier[get] ( literal[string] ), literal[string] : identifier[stmt] . identifier[excluded] . identifier[get] ( literal[string] ), }, ) keyword[while] keyword[True] : keyword[try] : identifier[session] . identifier[execute] ( identifier[stmt] , identifier[key_batch] ) identifier[session] . identifier[commit] () keyword[break] keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[logger] . identifier[debug] ( identifier[e] )
def upsert_keys(session, key_table, keys): """Bulk add annotation keys to the specified table. :param key_table: The sqlalchemy class to insert into. :param keys: A map of {name: [candidate_classes]}. """ # Do nothing if empty if not keys: return # depends on [control=['if'], data=[]] for key_batch in _batch_postgres_query(key_table, [{'name': k[0], 'candidate_classes': k[1]} for k in keys.items()]): stmt = insert(key_table.__table__) stmt = stmt.on_conflict_do_update(constraint=key_table.__table__.primary_key, set_={'name': stmt.excluded.get('name'), 'candidate_classes': stmt.excluded.get('candidate_classes')}) while True: try: session.execute(stmt, key_batch) session.commit() break # depends on [control=['try'], data=[]] except Exception as e: logger.debug(e) # depends on [control=['except'], data=['e']] # depends on [control=['while'], data=[]] # depends on [control=['for'], data=['key_batch']]
def wait(self, cadence=2, sendspec=None): """Does not return until all background commands are completed. """ shutit = self.shutit shutit.log('In wait.', level=logging.DEBUG) if sendspec: cadence = sendspec.wait_cadence shutit.log('Login stack is:\n' + str(self.login_stack), level=logging.DEBUG) while True: # go through each background child checking whether they've finished res, res_str, background_object = self.login_stack.get_current_login_item().check_background_commands_complete() shutit.log('Checking: ' + str(background_object) + '\nres: ' + str(res) + '\nres_str' + str(res_str), level=logging.DEBUG) if res: # When all have completed, break return the background command objects. break elif res_str in ('S','N'): # Do nothing, this is an started or not-running task. pass elif res_str == 'F': assert background_object is not None, shutit_util.print_debug() assert isinstance(background_object, ShutItBackgroundCommand), shutit_util.print_debug() shutit.log('Failure in: ' + str(self.login_stack), level=logging.DEBUG) self.pause_point('Background task: ' + background_object.sendspec.original_send + ' :failed.') return False else: self.shutit.fail('Un-handled exit code: ' + res_str) # pragma: no cover time.sleep(cadence) shutit.log('Wait complete.', level=logging.DEBUG) return True
def function[wait, parameter[self, cadence, sendspec]]: constant[Does not return until all background commands are completed. ] variable[shutit] assign[=] name[self].shutit call[name[shutit].log, parameter[constant[In wait.]]] if name[sendspec] begin[:] variable[cadence] assign[=] name[sendspec].wait_cadence call[name[shutit].log, parameter[binary_operation[constant[Login stack is: ] + call[name[str], parameter[name[self].login_stack]]]]] while constant[True] begin[:] <ast.Tuple object at 0x7da1b26ac700> assign[=] call[call[name[self].login_stack.get_current_login_item, parameter[]].check_background_commands_complete, parameter[]] call[name[shutit].log, parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[Checking: ] + call[name[str], parameter[name[background_object]]]] + constant[ res: ]] + call[name[str], parameter[name[res]]]] + constant[ res_str]] + call[name[str], parameter[name[res_str]]]]]] if name[res] begin[:] break call[name[time].sleep, parameter[name[cadence]]] call[name[shutit].log, parameter[constant[Wait complete.]]] return[constant[True]]
keyword[def] identifier[wait] ( identifier[self] , identifier[cadence] = literal[int] , identifier[sendspec] = keyword[None] ): literal[string] identifier[shutit] = identifier[self] . identifier[shutit] identifier[shutit] . identifier[log] ( literal[string] , identifier[level] = identifier[logging] . identifier[DEBUG] ) keyword[if] identifier[sendspec] : identifier[cadence] = identifier[sendspec] . identifier[wait_cadence] identifier[shutit] . identifier[log] ( literal[string] + identifier[str] ( identifier[self] . identifier[login_stack] ), identifier[level] = identifier[logging] . identifier[DEBUG] ) keyword[while] keyword[True] : identifier[res] , identifier[res_str] , identifier[background_object] = identifier[self] . identifier[login_stack] . identifier[get_current_login_item] (). identifier[check_background_commands_complete] () identifier[shutit] . identifier[log] ( literal[string] + identifier[str] ( identifier[background_object] )+ literal[string] + identifier[str] ( identifier[res] )+ literal[string] + identifier[str] ( identifier[res_str] ), identifier[level] = identifier[logging] . identifier[DEBUG] ) keyword[if] identifier[res] : keyword[break] keyword[elif] identifier[res_str] keyword[in] ( literal[string] , literal[string] ): keyword[pass] keyword[elif] identifier[res_str] == literal[string] : keyword[assert] identifier[background_object] keyword[is] keyword[not] keyword[None] , identifier[shutit_util] . identifier[print_debug] () keyword[assert] identifier[isinstance] ( identifier[background_object] , identifier[ShutItBackgroundCommand] ), identifier[shutit_util] . identifier[print_debug] () identifier[shutit] . identifier[log] ( literal[string] + identifier[str] ( identifier[self] . identifier[login_stack] ), identifier[level] = identifier[logging] . identifier[DEBUG] ) identifier[self] . identifier[pause_point] ( literal[string] + identifier[background_object] . identifier[sendspec] . identifier[original_send] + literal[string] ) keyword[return] keyword[False] keyword[else] : identifier[self] . identifier[shutit] . identifier[fail] ( literal[string] + identifier[res_str] ) identifier[time] . identifier[sleep] ( identifier[cadence] ) identifier[shutit] . identifier[log] ( literal[string] , identifier[level] = identifier[logging] . identifier[DEBUG] ) keyword[return] keyword[True]
def wait(self, cadence=2, sendspec=None): """Does not return until all background commands are completed. """ shutit = self.shutit shutit.log('In wait.', level=logging.DEBUG) if sendspec: cadence = sendspec.wait_cadence # depends on [control=['if'], data=[]] shutit.log('Login stack is:\n' + str(self.login_stack), level=logging.DEBUG) while True: # go through each background child checking whether they've finished (res, res_str, background_object) = self.login_stack.get_current_login_item().check_background_commands_complete() shutit.log('Checking: ' + str(background_object) + '\nres: ' + str(res) + '\nres_str' + str(res_str), level=logging.DEBUG) if res: # When all have completed, break return the background command objects. break # depends on [control=['if'], data=[]] elif res_str in ('S', 'N'): # Do nothing, this is an started or not-running task. pass # depends on [control=['if'], data=[]] elif res_str == 'F': assert background_object is not None, shutit_util.print_debug() assert isinstance(background_object, ShutItBackgroundCommand), shutit_util.print_debug() shutit.log('Failure in: ' + str(self.login_stack), level=logging.DEBUG) self.pause_point('Background task: ' + background_object.sendspec.original_send + ' :failed.') return False # depends on [control=['if'], data=[]] else: self.shutit.fail('Un-handled exit code: ' + res_str) # pragma: no cover time.sleep(cadence) # depends on [control=['while'], data=[]] shutit.log('Wait complete.', level=logging.DEBUG) return True
def msg_curse(self, args=None, max_width=None): """Return the dict to display in the curse interface.""" # Init the return message ret = [] # Only available on GNU/Linux # Only process if stats exist and display plugin enable... if not LINUX or not self.stats or not self.args.enable_irq: return ret # Max size for the interface name name_max_width = max_width - 7 # Build the string message # Header msg = '{:{width}}'.format('IRQ', width=name_max_width) ret.append(self.curse_add_line(msg, "TITLE")) msg = '{:>9}'.format('Rate/s') ret.append(self.curse_add_line(msg)) for i in self.stats: ret.append(self.curse_new_line()) msg = '{:{width}}'.format(i['irq_line'][:name_max_width], width=name_max_width) ret.append(self.curse_add_line(msg)) msg = '{:>9}'.format(str(i['irq_rate'])) ret.append(self.curse_add_line(msg)) return ret
def function[msg_curse, parameter[self, args, max_width]]: constant[Return the dict to display in the curse interface.] variable[ret] assign[=] list[[]] if <ast.BoolOp object at 0x7da20e956920> begin[:] return[name[ret]] variable[name_max_width] assign[=] binary_operation[name[max_width] - constant[7]] variable[msg] assign[=] call[constant[{:{width}}].format, parameter[constant[IRQ]]] call[name[ret].append, parameter[call[name[self].curse_add_line, parameter[name[msg], constant[TITLE]]]]] variable[msg] assign[=] call[constant[{:>9}].format, parameter[constant[Rate/s]]] call[name[ret].append, parameter[call[name[self].curse_add_line, parameter[name[msg]]]]] for taget[name[i]] in starred[name[self].stats] begin[:] call[name[ret].append, parameter[call[name[self].curse_new_line, parameter[]]]] variable[msg] assign[=] call[constant[{:{width}}].format, parameter[call[call[name[i]][constant[irq_line]]][<ast.Slice object at 0x7da18c4ce320>]]] call[name[ret].append, parameter[call[name[self].curse_add_line, parameter[name[msg]]]]] variable[msg] assign[=] call[constant[{:>9}].format, parameter[call[name[str], parameter[call[name[i]][constant[irq_rate]]]]]] call[name[ret].append, parameter[call[name[self].curse_add_line, parameter[name[msg]]]]] return[name[ret]]
keyword[def] identifier[msg_curse] ( identifier[self] , identifier[args] = keyword[None] , identifier[max_width] = keyword[None] ): literal[string] identifier[ret] =[] keyword[if] keyword[not] identifier[LINUX] keyword[or] keyword[not] identifier[self] . identifier[stats] keyword[or] keyword[not] identifier[self] . identifier[args] . identifier[enable_irq] : keyword[return] identifier[ret] identifier[name_max_width] = identifier[max_width] - literal[int] identifier[msg] = literal[string] . identifier[format] ( literal[string] , identifier[width] = identifier[name_max_width] ) identifier[ret] . identifier[append] ( identifier[self] . identifier[curse_add_line] ( identifier[msg] , literal[string] )) identifier[msg] = literal[string] . identifier[format] ( literal[string] ) identifier[ret] . identifier[append] ( identifier[self] . identifier[curse_add_line] ( identifier[msg] )) keyword[for] identifier[i] keyword[in] identifier[self] . identifier[stats] : identifier[ret] . identifier[append] ( identifier[self] . identifier[curse_new_line] ()) identifier[msg] = literal[string] . identifier[format] ( identifier[i] [ literal[string] ][: identifier[name_max_width] ], identifier[width] = identifier[name_max_width] ) identifier[ret] . identifier[append] ( identifier[self] . identifier[curse_add_line] ( identifier[msg] )) identifier[msg] = literal[string] . identifier[format] ( identifier[str] ( identifier[i] [ literal[string] ])) identifier[ret] . identifier[append] ( identifier[self] . identifier[curse_add_line] ( identifier[msg] )) keyword[return] identifier[ret]
def msg_curse(self, args=None, max_width=None): """Return the dict to display in the curse interface.""" # Init the return message ret = [] # Only available on GNU/Linux # Only process if stats exist and display plugin enable... if not LINUX or not self.stats or (not self.args.enable_irq): return ret # depends on [control=['if'], data=[]] # Max size for the interface name name_max_width = max_width - 7 # Build the string message # Header msg = '{:{width}}'.format('IRQ', width=name_max_width) ret.append(self.curse_add_line(msg, 'TITLE')) msg = '{:>9}'.format('Rate/s') ret.append(self.curse_add_line(msg)) for i in self.stats: ret.append(self.curse_new_line()) msg = '{:{width}}'.format(i['irq_line'][:name_max_width], width=name_max_width) ret.append(self.curse_add_line(msg)) msg = '{:>9}'.format(str(i['irq_rate'])) ret.append(self.curse_add_line(msg)) # depends on [control=['for'], data=['i']] return ret
def native(self, value, context=None): """Convert a value from a foriegn type (i.e. web-safe) to Python-native.""" if self.strip and hasattr(value, 'strip'): value = value.strip() if self.none and value == '': return None if self.encoding and isinstance(value, bytes): return value.decode(self.encoding) return value
def function[native, parameter[self, value, context]]: constant[Convert a value from a foriegn type (i.e. web-safe) to Python-native.] if <ast.BoolOp object at 0x7da1b191cc10> begin[:] variable[value] assign[=] call[name[value].strip, parameter[]] if <ast.BoolOp object at 0x7da1b191ef20> begin[:] return[constant[None]] if <ast.BoolOp object at 0x7da1b191d780> begin[:] return[call[name[value].decode, parameter[name[self].encoding]]] return[name[value]]
keyword[def] identifier[native] ( identifier[self] , identifier[value] , identifier[context] = keyword[None] ): literal[string] keyword[if] identifier[self] . identifier[strip] keyword[and] identifier[hasattr] ( identifier[value] , literal[string] ): identifier[value] = identifier[value] . identifier[strip] () keyword[if] identifier[self] . identifier[none] keyword[and] identifier[value] == literal[string] : keyword[return] keyword[None] keyword[if] identifier[self] . identifier[encoding] keyword[and] identifier[isinstance] ( identifier[value] , identifier[bytes] ): keyword[return] identifier[value] . identifier[decode] ( identifier[self] . identifier[encoding] ) keyword[return] identifier[value]
def native(self, value, context=None): """Convert a value from a foriegn type (i.e. web-safe) to Python-native.""" if self.strip and hasattr(value, 'strip'): value = value.strip() # depends on [control=['if'], data=[]] if self.none and value == '': return None # depends on [control=['if'], data=[]] if self.encoding and isinstance(value, bytes): return value.decode(self.encoding) # depends on [control=['if'], data=[]] return value
def sprint(string, *args, **kwargs): """Safe Print (handle UnicodeEncodeErrors on some terminals)""" try: print(string, *args, **kwargs) except UnicodeEncodeError: string = string.encode('utf-8', errors='ignore')\ .decode('ascii', errors='ignore') print(string, *args, **kwargs)
def function[sprint, parameter[string]]: constant[Safe Print (handle UnicodeEncodeErrors on some terminals)] <ast.Try object at 0x7da18ede6b90>
keyword[def] identifier[sprint] ( identifier[string] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[try] : identifier[print] ( identifier[string] ,* identifier[args] ,** identifier[kwargs] ) keyword[except] identifier[UnicodeEncodeError] : identifier[string] = identifier[string] . identifier[encode] ( literal[string] , identifier[errors] = literal[string] ). identifier[decode] ( literal[string] , identifier[errors] = literal[string] ) identifier[print] ( identifier[string] ,* identifier[args] ,** identifier[kwargs] )
def sprint(string, *args, **kwargs): """Safe Print (handle UnicodeEncodeErrors on some terminals)""" try: print(string, *args, **kwargs) # depends on [control=['try'], data=[]] except UnicodeEncodeError: string = string.encode('utf-8', errors='ignore').decode('ascii', errors='ignore') print(string, *args, **kwargs) # depends on [control=['except'], data=[]]
def bitonic_sort(arr, reverse=False): """ bitonic sort is sorting algorithm to use multiple process, but this code not containing parallel process It can sort only array that sizes power of 2 It can sort array in both increasing order and decreasing order by giving argument true(increasing) and false(decreasing) Worst-case in parallel: O(log(n)^2) Worst-case in non-parallel: O(nlog(n)^2) reference: https://en.wikipedia.org/wiki/Bitonic_sorter """ def compare(arr, reverse): n = len(arr)//2 for i in range(n): if reverse != (arr[i] > arr[i+n]): arr[i], arr[i+n] = arr[i+n], arr[i] return arr def bitonic_merge(arr, reverse): n = len(arr) if n <= 1: return arr arr = compare(arr, reverse) left = bitonic_merge(arr[:n // 2], reverse) right = bitonic_merge(arr[n // 2:], reverse) return left + right #end of function(compare and bitionic_merge) definition n = len(arr) if n <= 1: return arr # checks if n is power of two if not (n and (not(n & (n - 1))) ): raise ValueError("the size of input should be power of two") left = bitonic_sort(arr[:n // 2], True) right = bitonic_sort(arr[n // 2:], False) arr = bitonic_merge(left + right, reverse) return arr
def function[bitonic_sort, parameter[arr, reverse]]: constant[ bitonic sort is sorting algorithm to use multiple process, but this code not containing parallel process It can sort only array that sizes power of 2 It can sort array in both increasing order and decreasing order by giving argument true(increasing) and false(decreasing) Worst-case in parallel: O(log(n)^2) Worst-case in non-parallel: O(nlog(n)^2) reference: https://en.wikipedia.org/wiki/Bitonic_sorter ] def function[compare, parameter[arr, reverse]]: variable[n] assign[=] binary_operation[call[name[len], parameter[name[arr]]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]] for taget[name[i]] in starred[call[name[range], parameter[name[n]]]] begin[:] if compare[name[reverse] not_equal[!=] compare[call[name[arr]][name[i]] greater[>] call[name[arr]][binary_operation[name[i] + name[n]]]]] begin[:] <ast.Tuple object at 0x7da1b2078370> assign[=] tuple[[<ast.Subscript object at 0x7da1b2078520>, <ast.Subscript object at 0x7da1b2078bb0>]] return[name[arr]] def function[bitonic_merge, parameter[arr, reverse]]: variable[n] assign[=] call[name[len], parameter[name[arr]]] if compare[name[n] less_or_equal[<=] constant[1]] begin[:] return[name[arr]] variable[arr] assign[=] call[name[compare], parameter[name[arr], name[reverse]]] variable[left] assign[=] call[name[bitonic_merge], parameter[call[name[arr]][<ast.Slice object at 0x7da1b1ee9d80>], name[reverse]]] variable[right] assign[=] call[name[bitonic_merge], parameter[call[name[arr]][<ast.Slice object at 0x7da1b1ee84c0>], name[reverse]]] return[binary_operation[name[left] + name[right]]] variable[n] assign[=] call[name[len], parameter[name[arr]]] if compare[name[n] less_or_equal[<=] constant[1]] begin[:] return[name[arr]] if <ast.UnaryOp object at 0x7da1b1eeb0a0> begin[:] <ast.Raise object at 0x7da1b1ee98d0> variable[left] assign[=] call[name[bitonic_sort], parameter[call[name[arr]][<ast.Slice object at 0x7da1b1eea3e0>], constant[True]]] variable[right] assign[=] call[name[bitonic_sort], parameter[call[name[arr]][<ast.Slice object at 0x7da1b1eeb190>], constant[False]]] variable[arr] assign[=] call[name[bitonic_merge], parameter[binary_operation[name[left] + name[right]], name[reverse]]] return[name[arr]]
keyword[def] identifier[bitonic_sort] ( identifier[arr] , identifier[reverse] = keyword[False] ): literal[string] keyword[def] identifier[compare] ( identifier[arr] , identifier[reverse] ): identifier[n] = identifier[len] ( identifier[arr] )// literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n] ): keyword[if] identifier[reverse] !=( identifier[arr] [ identifier[i] ]> identifier[arr] [ identifier[i] + identifier[n] ]): identifier[arr] [ identifier[i] ], identifier[arr] [ identifier[i] + identifier[n] ]= identifier[arr] [ identifier[i] + identifier[n] ], identifier[arr] [ identifier[i] ] keyword[return] identifier[arr] keyword[def] identifier[bitonic_merge] ( identifier[arr] , identifier[reverse] ): identifier[n] = identifier[len] ( identifier[arr] ) keyword[if] identifier[n] <= literal[int] : keyword[return] identifier[arr] identifier[arr] = identifier[compare] ( identifier[arr] , identifier[reverse] ) identifier[left] = identifier[bitonic_merge] ( identifier[arr] [: identifier[n] // literal[int] ], identifier[reverse] ) identifier[right] = identifier[bitonic_merge] ( identifier[arr] [ identifier[n] // literal[int] :], identifier[reverse] ) keyword[return] identifier[left] + identifier[right] identifier[n] = identifier[len] ( identifier[arr] ) keyword[if] identifier[n] <= literal[int] : keyword[return] identifier[arr] keyword[if] keyword[not] ( identifier[n] keyword[and] ( keyword[not] ( identifier[n] &( identifier[n] - literal[int] )))): keyword[raise] identifier[ValueError] ( literal[string] ) identifier[left] = identifier[bitonic_sort] ( identifier[arr] [: identifier[n] // literal[int] ], keyword[True] ) identifier[right] = identifier[bitonic_sort] ( identifier[arr] [ identifier[n] // literal[int] :], keyword[False] ) identifier[arr] = identifier[bitonic_merge] ( identifier[left] + identifier[right] , identifier[reverse] ) keyword[return] identifier[arr]
def bitonic_sort(arr, reverse=False): """ bitonic sort is sorting algorithm to use multiple process, but this code not containing parallel process It can sort only array that sizes power of 2 It can sort array in both increasing order and decreasing order by giving argument true(increasing) and false(decreasing) Worst-case in parallel: O(log(n)^2) Worst-case in non-parallel: O(nlog(n)^2) reference: https://en.wikipedia.org/wiki/Bitonic_sorter """ def compare(arr, reverse): n = len(arr) // 2 for i in range(n): if reverse != (arr[i] > arr[i + n]): (arr[i], arr[i + n]) = (arr[i + n], arr[i]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] return arr def bitonic_merge(arr, reverse): n = len(arr) if n <= 1: return arr # depends on [control=['if'], data=[]] arr = compare(arr, reverse) left = bitonic_merge(arr[:n // 2], reverse) right = bitonic_merge(arr[n // 2:], reverse) return left + right #end of function(compare and bitionic_merge) definition n = len(arr) if n <= 1: return arr # depends on [control=['if'], data=[]] # checks if n is power of two if not (n and (not n & n - 1)): raise ValueError('the size of input should be power of two') # depends on [control=['if'], data=[]] left = bitonic_sort(arr[:n // 2], True) right = bitonic_sort(arr[n // 2:], False) arr = bitonic_merge(left + right, reverse) return arr
def tags(self): """ Returns a dict containing tags and their localized labels as values """ return dict([(t, self._catalog.tags.get(t, t)) for t in self._asset.get("tags", [])])
def function[tags, parameter[self]]: constant[ Returns a dict containing tags and their localized labels as values ] return[call[name[dict], parameter[<ast.ListComp object at 0x7da1b0f19090>]]]
keyword[def] identifier[tags] ( identifier[self] ): literal[string] keyword[return] identifier[dict] ([( identifier[t] , identifier[self] . identifier[_catalog] . identifier[tags] . identifier[get] ( identifier[t] , identifier[t] )) keyword[for] identifier[t] keyword[in] identifier[self] . identifier[_asset] . identifier[get] ( literal[string] ,[])])
def tags(self): """ Returns a dict containing tags and their localized labels as values """ return dict([(t, self._catalog.tags.get(t, t)) for t in self._asset.get('tags', [])])
def createBracketOrder(self, contract, quantity, entry=0., target=0., stop=0., targetType=None, trailingStop=None, group=None, tif="DAY", fillorkill=False, iceberg=False, rth=False, stop_limit=False, transmit=True, account=None, **kwargs): """ creates One Cancels All Bracket Order trailingStop = None (regular stop) / percent / amount """ if group == None: group = "bracket_" + str(int(time.time())) # main order enteyOrder = self.createOrder(quantity, price=entry, transmit=False, tif=tif, fillorkill=fillorkill, iceberg=iceberg, rth=rth, account=account) entryOrderId = self.placeOrder(contract, enteyOrder) # target targetOrderId = 0 if target > 0: targetOrder = self.createTargetOrder(-quantity, parentId = entryOrderId, target = target, transmit = False if stop > 0 else True, orderType = targetType, group = group, rth = rth, tif = tif, account = account ) time.sleep(0.0001) self.requestOrderIds() targetOrderId = self.placeOrder(contract, targetOrder, self.orderId + 1) # stop stopOrderId = 0 if stop > 0: stopOrder = self.createStopOrder(-quantity, parentId = entryOrderId, stop = stop, trail = trailingStop, transmit = transmit, group = group, rth = rth, tif = tif, stop_limit = stop_limit, account = account ) time.sleep(0.0001) self.requestOrderIds() stopOrderId = self.placeOrder(contract, stopOrder, self.orderId + 2) # triggered trailing stop? # if ("triggerPrice" in kwargs) & ("trailPercent" in kwargs): # self.pendingTriggeredTrailingStopOrders.append() # self.signal_ttl = kwargs["signal_ttl"] if "signal_ttl" in kwargs else 0 return { "group": group, "entryOrderId": entryOrderId, "targetOrderId": targetOrderId, "stopOrderId": stopOrderId }
def function[createBracketOrder, parameter[self, contract, quantity, entry, target, stop, targetType, trailingStop, group, tif, fillorkill, iceberg, rth, stop_limit, transmit, account]]: constant[ creates One Cancels All Bracket Order trailingStop = None (regular stop) / percent / amount ] if compare[name[group] equal[==] constant[None]] begin[:] variable[group] assign[=] binary_operation[constant[bracket_] + call[name[str], parameter[call[name[int], parameter[call[name[time].time, parameter[]]]]]]] variable[enteyOrder] assign[=] call[name[self].createOrder, parameter[name[quantity]]] variable[entryOrderId] assign[=] call[name[self].placeOrder, parameter[name[contract], name[enteyOrder]]] variable[targetOrderId] assign[=] constant[0] if compare[name[target] greater[>] constant[0]] begin[:] variable[targetOrder] assign[=] call[name[self].createTargetOrder, parameter[<ast.UnaryOp object at 0x7da1b18baef0>]] call[name[time].sleep, parameter[constant[0.0001]]] call[name[self].requestOrderIds, parameter[]] variable[targetOrderId] assign[=] call[name[self].placeOrder, parameter[name[contract], name[targetOrder], binary_operation[name[self].orderId + constant[1]]]] variable[stopOrderId] assign[=] constant[0] if compare[name[stop] greater[>] constant[0]] begin[:] variable[stopOrder] assign[=] call[name[self].createStopOrder, parameter[<ast.UnaryOp object at 0x7da1b18b9c90>]] call[name[time].sleep, parameter[constant[0.0001]]] call[name[self].requestOrderIds, parameter[]] variable[stopOrderId] assign[=] call[name[self].placeOrder, parameter[name[contract], name[stopOrder], binary_operation[name[self].orderId + constant[2]]]] return[dictionary[[<ast.Constant object at 0x7da1b18ba110>, <ast.Constant object at 0x7da1b18ba140>, <ast.Constant object at 0x7da1b18ba170>, <ast.Constant object at 0x7da1b18ba1a0>], [<ast.Name object at 0x7da1b18ba1d0>, <ast.Name object at 0x7da1b18ba200>, <ast.Name object at 0x7da1b18ba230>, <ast.Name object at 0x7da1b18ba260>]]]
keyword[def] identifier[createBracketOrder] ( identifier[self] , identifier[contract] , identifier[quantity] , identifier[entry] = literal[int] , identifier[target] = literal[int] , identifier[stop] = literal[int] , identifier[targetType] = keyword[None] , identifier[trailingStop] = keyword[None] , identifier[group] = keyword[None] , identifier[tif] = literal[string] , identifier[fillorkill] = keyword[False] , identifier[iceberg] = keyword[False] , identifier[rth] = keyword[False] , identifier[stop_limit] = keyword[False] , identifier[transmit] = keyword[True] , identifier[account] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[group] == keyword[None] : identifier[group] = literal[string] + identifier[str] ( identifier[int] ( identifier[time] . identifier[time] ())) identifier[enteyOrder] = identifier[self] . identifier[createOrder] ( identifier[quantity] , identifier[price] = identifier[entry] , identifier[transmit] = keyword[False] , identifier[tif] = identifier[tif] , identifier[fillorkill] = identifier[fillorkill] , identifier[iceberg] = identifier[iceberg] , identifier[rth] = identifier[rth] , identifier[account] = identifier[account] ) identifier[entryOrderId] = identifier[self] . identifier[placeOrder] ( identifier[contract] , identifier[enteyOrder] ) identifier[targetOrderId] = literal[int] keyword[if] identifier[target] > literal[int] : identifier[targetOrder] = identifier[self] . identifier[createTargetOrder] (- identifier[quantity] , identifier[parentId] = identifier[entryOrderId] , identifier[target] = identifier[target] , identifier[transmit] = keyword[False] keyword[if] identifier[stop] > literal[int] keyword[else] keyword[True] , identifier[orderType] = identifier[targetType] , identifier[group] = identifier[group] , identifier[rth] = identifier[rth] , identifier[tif] = identifier[tif] , identifier[account] = identifier[account] ) identifier[time] . identifier[sleep] ( literal[int] ) identifier[self] . identifier[requestOrderIds] () identifier[targetOrderId] = identifier[self] . identifier[placeOrder] ( identifier[contract] , identifier[targetOrder] , identifier[self] . identifier[orderId] + literal[int] ) identifier[stopOrderId] = literal[int] keyword[if] identifier[stop] > literal[int] : identifier[stopOrder] = identifier[self] . identifier[createStopOrder] (- identifier[quantity] , identifier[parentId] = identifier[entryOrderId] , identifier[stop] = identifier[stop] , identifier[trail] = identifier[trailingStop] , identifier[transmit] = identifier[transmit] , identifier[group] = identifier[group] , identifier[rth] = identifier[rth] , identifier[tif] = identifier[tif] , identifier[stop_limit] = identifier[stop_limit] , identifier[account] = identifier[account] ) identifier[time] . identifier[sleep] ( literal[int] ) identifier[self] . identifier[requestOrderIds] () identifier[stopOrderId] = identifier[self] . identifier[placeOrder] ( identifier[contract] , identifier[stopOrder] , identifier[self] . identifier[orderId] + literal[int] ) keyword[return] { literal[string] : identifier[group] , literal[string] : identifier[entryOrderId] , literal[string] : identifier[targetOrderId] , literal[string] : identifier[stopOrderId] }
def createBracketOrder(self, contract, quantity, entry=0.0, target=0.0, stop=0.0, targetType=None, trailingStop=None, group=None, tif='DAY', fillorkill=False, iceberg=False, rth=False, stop_limit=False, transmit=True, account=None, **kwargs): """ creates One Cancels All Bracket Order trailingStop = None (regular stop) / percent / amount """ if group == None: group = 'bracket_' + str(int(time.time())) # depends on [control=['if'], data=['group']] # main order enteyOrder = self.createOrder(quantity, price=entry, transmit=False, tif=tif, fillorkill=fillorkill, iceberg=iceberg, rth=rth, account=account) entryOrderId = self.placeOrder(contract, enteyOrder) # target targetOrderId = 0 if target > 0: targetOrder = self.createTargetOrder(-quantity, parentId=entryOrderId, target=target, transmit=False if stop > 0 else True, orderType=targetType, group=group, rth=rth, tif=tif, account=account) time.sleep(0.0001) self.requestOrderIds() targetOrderId = self.placeOrder(contract, targetOrder, self.orderId + 1) # depends on [control=['if'], data=['target']] # stop stopOrderId = 0 if stop > 0: stopOrder = self.createStopOrder(-quantity, parentId=entryOrderId, stop=stop, trail=trailingStop, transmit=transmit, group=group, rth=rth, tif=tif, stop_limit=stop_limit, account=account) time.sleep(0.0001) self.requestOrderIds() stopOrderId = self.placeOrder(contract, stopOrder, self.orderId + 2) # depends on [control=['if'], data=['stop']] # triggered trailing stop? # if ("triggerPrice" in kwargs) & ("trailPercent" in kwargs): # self.pendingTriggeredTrailingStopOrders.append() # self.signal_ttl = kwargs["signal_ttl"] if "signal_ttl" in kwargs else 0 return {'group': group, 'entryOrderId': entryOrderId, 'targetOrderId': targetOrderId, 'stopOrderId': stopOrderId}
def relpath(path): """Path helper, gives you a path relative to this file""" return os.path.normpath( os.path.join(os.path.abspath(os.path.dirname(__file__)), path) )
def function[relpath, parameter[path]]: constant[Path helper, gives you a path relative to this file] return[call[name[os].path.normpath, parameter[call[name[os].path.join, parameter[call[name[os].path.abspath, parameter[call[name[os].path.dirname, parameter[name[__file__]]]]], name[path]]]]]]
keyword[def] identifier[relpath] ( identifier[path] ): literal[string] keyword[return] identifier[os] . identifier[path] . identifier[normpath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] )), identifier[path] ) )
def relpath(path): """Path helper, gives you a path relative to this file""" return os.path.normpath(os.path.join(os.path.abspath(os.path.dirname(__file__)), path))
def get_orders(self, **params): """https://developers.coinbase.com/api/v2#list-orders""" response = self._get('v2', 'orders', params=params) return self._make_api_object(response, Order)
def function[get_orders, parameter[self]]: constant[https://developers.coinbase.com/api/v2#list-orders] variable[response] assign[=] call[name[self]._get, parameter[constant[v2], constant[orders]]] return[call[name[self]._make_api_object, parameter[name[response], name[Order]]]]
keyword[def] identifier[get_orders] ( identifier[self] ,** identifier[params] ): literal[string] identifier[response] = identifier[self] . identifier[_get] ( literal[string] , literal[string] , identifier[params] = identifier[params] ) keyword[return] identifier[self] . identifier[_make_api_object] ( identifier[response] , identifier[Order] )
def get_orders(self, **params): """https://developers.coinbase.com/api/v2#list-orders""" response = self._get('v2', 'orders', params=params) return self._make_api_object(response, Order)
def _iter_service_names(): ''' Detect all of the service names available to upstart via init configuration files and via classic sysv init scripts ''' found = set() for line in glob.glob('/etc/init.d/*'): name = os.path.basename(line) found.add(name) yield name # This walk method supports nested services as per the init man page # definition 'For example a configuration file /etc/init/rc-sysinit.conf # is named rc-sysinit, while a configuration file /etc/init/net/apache.conf # is named net/apache' init_root = '/etc/init/' for root, dirnames, filenames in salt.utils.path.os_walk(init_root): relpath = os.path.relpath(root, init_root) for filename in fnmatch.filter(filenames, '*.conf'): if relpath == '.': # service is defined in the root, no need to append prefix. name = filename[:-5] else: # service is nested, append its relative path prefix. name = os.path.join(relpath, filename[:-5]) if name in found: continue yield name
def function[_iter_service_names, parameter[]]: constant[ Detect all of the service names available to upstart via init configuration files and via classic sysv init scripts ] variable[found] assign[=] call[name[set], parameter[]] for taget[name[line]] in starred[call[name[glob].glob, parameter[constant[/etc/init.d/*]]]] begin[:] variable[name] assign[=] call[name[os].path.basename, parameter[name[line]]] call[name[found].add, parameter[name[name]]] <ast.Yield object at 0x7da1b1f74a00> variable[init_root] assign[=] constant[/etc/init/] for taget[tuple[[<ast.Name object at 0x7da1b1f74850>, <ast.Name object at 0x7da1b1f74a30>, <ast.Name object at 0x7da1b1f77460>]]] in starred[call[name[salt].utils.path.os_walk, parameter[name[init_root]]]] begin[:] variable[relpath] assign[=] call[name[os].path.relpath, parameter[name[root], name[init_root]]] for taget[name[filename]] in starred[call[name[fnmatch].filter, parameter[name[filenames], constant[*.conf]]]] begin[:] if compare[name[relpath] equal[==] constant[.]] begin[:] variable[name] assign[=] call[name[filename]][<ast.Slice object at 0x7da1b1f75360>] if compare[name[name] in name[found]] begin[:] continue <ast.Yield object at 0x7da1b1c0d2d0>
keyword[def] identifier[_iter_service_names] (): literal[string] identifier[found] = identifier[set] () keyword[for] identifier[line] keyword[in] identifier[glob] . identifier[glob] ( literal[string] ): identifier[name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[line] ) identifier[found] . identifier[add] ( identifier[name] ) keyword[yield] identifier[name] identifier[init_root] = literal[string] keyword[for] identifier[root] , identifier[dirnames] , identifier[filenames] keyword[in] identifier[salt] . identifier[utils] . identifier[path] . identifier[os_walk] ( identifier[init_root] ): identifier[relpath] = identifier[os] . identifier[path] . identifier[relpath] ( identifier[root] , identifier[init_root] ) keyword[for] identifier[filename] keyword[in] identifier[fnmatch] . identifier[filter] ( identifier[filenames] , literal[string] ): keyword[if] identifier[relpath] == literal[string] : identifier[name] = identifier[filename] [:- literal[int] ] keyword[else] : identifier[name] = identifier[os] . identifier[path] . identifier[join] ( identifier[relpath] , identifier[filename] [:- literal[int] ]) keyword[if] identifier[name] keyword[in] identifier[found] : keyword[continue] keyword[yield] identifier[name]
def _iter_service_names(): """ Detect all of the service names available to upstart via init configuration files and via classic sysv init scripts """ found = set() for line in glob.glob('/etc/init.d/*'): name = os.path.basename(line) found.add(name) yield name # depends on [control=['for'], data=['line']] # This walk method supports nested services as per the init man page # definition 'For example a configuration file /etc/init/rc-sysinit.conf # is named rc-sysinit, while a configuration file /etc/init/net/apache.conf # is named net/apache' init_root = '/etc/init/' for (root, dirnames, filenames) in salt.utils.path.os_walk(init_root): relpath = os.path.relpath(root, init_root) for filename in fnmatch.filter(filenames, '*.conf'): if relpath == '.': # service is defined in the root, no need to append prefix. name = filename[:-5] # depends on [control=['if'], data=[]] else: # service is nested, append its relative path prefix. name = os.path.join(relpath, filename[:-5]) if name in found: continue # depends on [control=['if'], data=[]] yield name # depends on [control=['for'], data=['filename']] # depends on [control=['for'], data=[]]
def get_user(self, id, **data): """ GET /users/:id/ Returns a :format:`user` for the specified user as ``user``. If you want to get details about the currently authenticated user, use ``/users/me/``. """ return self.get("/users/{0}/".format(id), data=data)
def function[get_user, parameter[self, id]]: constant[ GET /users/:id/ Returns a :format:`user` for the specified user as ``user``. If you want to get details about the currently authenticated user, use ``/users/me/``. ] return[call[name[self].get, parameter[call[constant[/users/{0}/].format, parameter[name[id]]]]]]
keyword[def] identifier[get_user] ( identifier[self] , identifier[id] ,** identifier[data] ): literal[string] keyword[return] identifier[self] . identifier[get] ( literal[string] . identifier[format] ( identifier[id] ), identifier[data] = identifier[data] )
def get_user(self, id, **data): """ GET /users/:id/ Returns a :format:`user` for the specified user as ``user``. If you want to get details about the currently authenticated user, use ``/users/me/``. """ return self.get('/users/{0}/'.format(id), data=data)
def propagate_all_tables_info(self, write=True): """ Find any items (specimens, samples, sites, or locations) from tables other than measurements and make sure they each have a row in their own table. For example, if a site name is in the samples table but not in the sites table, create a row for it in the sites table. """ for table_name in ["specimens", "samples", "sites", "locations"]: if not table_name in self.tables: continue df = self.tables[table_name].df parent_name, child_name = self.get_parent_and_child(table_name) if parent_name: if parent_name[:-1] in df.columns: parents = sorted(set(df[parent_name[:-1]].dropna().values.astype(str))) if parent_name in self.tables: # if there is a parent table, update it parent_df = self.tables[parent_name].df missing_parents = set(parents) - set(parent_df.index) if missing_parents: # add any missing values print("-I- Updating {} table with values from {} table".format(parent_name, table_name)) for item in missing_parents: self.add_item(parent_name, {parent_name[:-1]: item}, label=item) # save any changes to file if write: self.write_table_to_file(parent_name) else: # if there is no parent table, create it if necessary if parents: # create a parent_df with the names you got from the child print("-I- Creating new {} table with data from {} table".format(parent_name, table_name)) # add in the grandparent if available grandparent_name = self.get_parent_and_child(parent_name)[0] if grandparent_name: grandparent = "" if grandparent_name in df.columns: grandparent = df[df[parent_name] == item][grandparent_name].values[0] columns = [parent_name[:-1]]#, grandparent_name[:-1]] else: columns = [parent_name[:-1]] parent_df = pd.DataFrame(columns=columns, index=parents) parent_df[parent_name[:-1]] = parent_df.index if grandparent_name: if grandparent_name[:-1] in df.columns: parent_df = pd.merge(df[[parent_name[:-1], grandparent_name[:-1]]], parent_df, on=parent_name[:-1]) self.tables[parent_name] = MagicDataFrame(dtype=parent_name, df=parent_df) if write: # save new table to file self.write_table_to_file(parent_name) if child_name: if child_name in df.columns: raw_children = df[child_name].dropna().str.split(':') # create dict of all children with parent info parent_of_child = {} for parent, children in raw_children.items(): for child in children: # remove whitespace child = child.strip() old_parent = parent_of_child.get(child) if old_parent and parent and (old_parent != parent): print('-I- for {} {}, replacing: {} with: {}'.format(child_name[:-1], child, old_parent, parent)) parent_of_child[child] = parent # old way: # flatten list, ignore duplicates #children = sorted(set([item.strip() for sublist in raw_children for item in sublist])) if child_name in self.tables: # if there is already a child table, update it child_df = self.tables[child_name].df missing_children = set(parent_of_child.keys()) - set(child_df.index) if missing_children: # add any missing values print("-I- Updating {} table with values from {} table".format(child_name, table_name)) for item in missing_children: data = {child_name[:-1]: item, table_name[:-1]: parent_of_child[item]} self.add_item(child_name, data, label=item) if write: # save any changes to file self.write_table_to_file(child_name) else: # if there is no child table, create it if necessary if children: # create a child_df with the names you got from the parent print("-I- Creating new {} table with data from {} table".format(child_name, table_name)) # old way to make new table: #child_df = pd.DataFrame(columns=[table_name[:-1]], index=children) # new way to make new table children_list = sorted(parent_of_child.keys()) children_data = [[child_name, parent_of_child[c_name]] for c_name in children_list] child_df = pd.DataFrame(index=children_list, columns=[child_name[:-1], table_name[:-1]], data=children_data) self.tables[child_name] = MagicDataFrame(dtype=child_name, df=child_df) if write: # save new table to file self.write_table_to_file(child_name)
def function[propagate_all_tables_info, parameter[self, write]]: constant[ Find any items (specimens, samples, sites, or locations) from tables other than measurements and make sure they each have a row in their own table. For example, if a site name is in the samples table but not in the sites table, create a row for it in the sites table. ] for taget[name[table_name]] in starred[list[[<ast.Constant object at 0x7da1b04acc10>, <ast.Constant object at 0x7da1b04acc40>, <ast.Constant object at 0x7da1b04acc70>, <ast.Constant object at 0x7da1b04acca0>]]] begin[:] if <ast.UnaryOp object at 0x7da1b04acd30> begin[:] continue variable[df] assign[=] call[name[self].tables][name[table_name]].df <ast.Tuple object at 0x7da1b04ad000> assign[=] call[name[self].get_parent_and_child, parameter[name[table_name]]] if name[parent_name] begin[:] if compare[call[name[parent_name]][<ast.Slice object at 0x7da1b04ad270>] in name[df].columns] begin[:] variable[parents] assign[=] call[name[sorted], parameter[call[name[set], parameter[call[call[call[name[df]][call[name[parent_name]][<ast.Slice object at 0x7da1b04ad630>]].dropna, parameter[]].values.astype, parameter[name[str]]]]]]] if compare[name[parent_name] in name[self].tables] begin[:] variable[parent_df] assign[=] call[name[self].tables][name[parent_name]].df variable[missing_parents] assign[=] binary_operation[call[name[set], parameter[name[parents]]] - call[name[set], parameter[name[parent_df].index]]] if name[missing_parents] begin[:] call[name[print], parameter[call[constant[-I- Updating {} table with values from {} table].format, parameter[name[parent_name], name[table_name]]]]] for taget[name[item]] in starred[name[missing_parents]] begin[:] call[name[self].add_item, parameter[name[parent_name], dictionary[[<ast.Subscript object at 0x7da1b04aded0>], [<ast.Name object at 0x7da1b04adfc0>]]]] if name[write] begin[:] call[name[self].write_table_to_file, parameter[name[parent_name]]] if name[child_name] begin[:] if compare[name[child_name] in name[df].columns] begin[:] variable[raw_children] assign[=] call[call[call[name[df]][name[child_name]].dropna, parameter[]].str.split, parameter[constant[:]]] variable[parent_of_child] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da1b04aff40>, <ast.Name object at 0x7da1b04aff70>]]] in starred[call[name[raw_children].items, parameter[]]] begin[:] for taget[name[child]] in starred[name[children]] begin[:] variable[child] assign[=] call[name[child].strip, parameter[]] variable[old_parent] assign[=] call[name[parent_of_child].get, parameter[name[child]]] if <ast.BoolOp object at 0x7da1b0467f10> begin[:] call[name[print], parameter[call[constant[-I- for {} {}, replacing: {} with: {}].format, parameter[call[name[child_name]][<ast.Slice object at 0x7da1b0465840>], name[child], name[old_parent], name[parent]]]]] call[name[parent_of_child]][name[child]] assign[=] name[parent] if compare[name[child_name] in name[self].tables] begin[:] variable[child_df] assign[=] call[name[self].tables][name[child_name]].df variable[missing_children] assign[=] binary_operation[call[name[set], parameter[call[name[parent_of_child].keys, parameter[]]]] - call[name[set], parameter[name[child_df].index]]] if name[missing_children] begin[:] call[name[print], parameter[call[constant[-I- Updating {} table with values from {} table].format, parameter[name[child_name], name[table_name]]]]] for taget[name[item]] in starred[name[missing_children]] begin[:] variable[data] assign[=] dictionary[[<ast.Subscript object at 0x7da1b0467d60>, <ast.Subscript object at 0x7da1b0465a80>], [<ast.Name object at 0x7da1b0465300>, <ast.Subscript object at 0x7da1b04672e0>]] call[name[self].add_item, parameter[name[child_name], name[data]]] if name[write] begin[:] call[name[self].write_table_to_file, parameter[name[child_name]]]
keyword[def] identifier[propagate_all_tables_info] ( identifier[self] , identifier[write] = keyword[True] ): literal[string] keyword[for] identifier[table_name] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]: keyword[if] keyword[not] identifier[table_name] keyword[in] identifier[self] . identifier[tables] : keyword[continue] identifier[df] = identifier[self] . identifier[tables] [ identifier[table_name] ]. identifier[df] identifier[parent_name] , identifier[child_name] = identifier[self] . identifier[get_parent_and_child] ( identifier[table_name] ) keyword[if] identifier[parent_name] : keyword[if] identifier[parent_name] [:- literal[int] ] keyword[in] identifier[df] . identifier[columns] : identifier[parents] = identifier[sorted] ( identifier[set] ( identifier[df] [ identifier[parent_name] [:- literal[int] ]]. identifier[dropna] (). identifier[values] . identifier[astype] ( identifier[str] ))) keyword[if] identifier[parent_name] keyword[in] identifier[self] . identifier[tables] : identifier[parent_df] = identifier[self] . identifier[tables] [ identifier[parent_name] ]. identifier[df] identifier[missing_parents] = identifier[set] ( identifier[parents] )- identifier[set] ( identifier[parent_df] . identifier[index] ) keyword[if] identifier[missing_parents] : identifier[print] ( literal[string] . identifier[format] ( identifier[parent_name] , identifier[table_name] )) keyword[for] identifier[item] keyword[in] identifier[missing_parents] : identifier[self] . identifier[add_item] ( identifier[parent_name] ,{ identifier[parent_name] [:- literal[int] ]: identifier[item] }, identifier[label] = identifier[item] ) keyword[if] identifier[write] : identifier[self] . identifier[write_table_to_file] ( identifier[parent_name] ) keyword[else] : keyword[if] identifier[parents] : identifier[print] ( literal[string] . identifier[format] ( identifier[parent_name] , identifier[table_name] )) identifier[grandparent_name] = identifier[self] . identifier[get_parent_and_child] ( identifier[parent_name] )[ literal[int] ] keyword[if] identifier[grandparent_name] : identifier[grandparent] = literal[string] keyword[if] identifier[grandparent_name] keyword[in] identifier[df] . identifier[columns] : identifier[grandparent] = identifier[df] [ identifier[df] [ identifier[parent_name] ]== identifier[item] ][ identifier[grandparent_name] ]. identifier[values] [ literal[int] ] identifier[columns] =[ identifier[parent_name] [:- literal[int] ]] keyword[else] : identifier[columns] =[ identifier[parent_name] [:- literal[int] ]] identifier[parent_df] = identifier[pd] . identifier[DataFrame] ( identifier[columns] = identifier[columns] , identifier[index] = identifier[parents] ) identifier[parent_df] [ identifier[parent_name] [:- literal[int] ]]= identifier[parent_df] . identifier[index] keyword[if] identifier[grandparent_name] : keyword[if] identifier[grandparent_name] [:- literal[int] ] keyword[in] identifier[df] . identifier[columns] : identifier[parent_df] = identifier[pd] . identifier[merge] ( identifier[df] [[ identifier[parent_name] [:- literal[int] ], identifier[grandparent_name] [:- literal[int] ]]], identifier[parent_df] , identifier[on] = identifier[parent_name] [:- literal[int] ]) identifier[self] . identifier[tables] [ identifier[parent_name] ]= identifier[MagicDataFrame] ( identifier[dtype] = identifier[parent_name] , identifier[df] = identifier[parent_df] ) keyword[if] identifier[write] : identifier[self] . identifier[write_table_to_file] ( identifier[parent_name] ) keyword[if] identifier[child_name] : keyword[if] identifier[child_name] keyword[in] identifier[df] . identifier[columns] : identifier[raw_children] = identifier[df] [ identifier[child_name] ]. identifier[dropna] (). identifier[str] . identifier[split] ( literal[string] ) identifier[parent_of_child] ={} keyword[for] identifier[parent] , identifier[children] keyword[in] identifier[raw_children] . identifier[items] (): keyword[for] identifier[child] keyword[in] identifier[children] : identifier[child] = identifier[child] . identifier[strip] () identifier[old_parent] = identifier[parent_of_child] . identifier[get] ( identifier[child] ) keyword[if] identifier[old_parent] keyword[and] identifier[parent] keyword[and] ( identifier[old_parent] != identifier[parent] ): identifier[print] ( literal[string] . identifier[format] ( identifier[child_name] [:- literal[int] ], identifier[child] , identifier[old_parent] , identifier[parent] )) identifier[parent_of_child] [ identifier[child] ]= identifier[parent] keyword[if] identifier[child_name] keyword[in] identifier[self] . identifier[tables] : identifier[child_df] = identifier[self] . identifier[tables] [ identifier[child_name] ]. identifier[df] identifier[missing_children] = identifier[set] ( identifier[parent_of_child] . identifier[keys] ())- identifier[set] ( identifier[child_df] . identifier[index] ) keyword[if] identifier[missing_children] : identifier[print] ( literal[string] . identifier[format] ( identifier[child_name] , identifier[table_name] )) keyword[for] identifier[item] keyword[in] identifier[missing_children] : identifier[data] ={ identifier[child_name] [:- literal[int] ]: identifier[item] , identifier[table_name] [:- literal[int] ]: identifier[parent_of_child] [ identifier[item] ]} identifier[self] . identifier[add_item] ( identifier[child_name] , identifier[data] , identifier[label] = identifier[item] ) keyword[if] identifier[write] : identifier[self] . identifier[write_table_to_file] ( identifier[child_name] ) keyword[else] : keyword[if] identifier[children] : identifier[print] ( literal[string] . identifier[format] ( identifier[child_name] , identifier[table_name] )) identifier[children_list] = identifier[sorted] ( identifier[parent_of_child] . identifier[keys] ()) identifier[children_data] =[[ identifier[child_name] , identifier[parent_of_child] [ identifier[c_name] ]] keyword[for] identifier[c_name] keyword[in] identifier[children_list] ] identifier[child_df] = identifier[pd] . identifier[DataFrame] ( identifier[index] = identifier[children_list] , identifier[columns] =[ identifier[child_name] [:- literal[int] ], identifier[table_name] [:- literal[int] ]], identifier[data] = identifier[children_data] ) identifier[self] . identifier[tables] [ identifier[child_name] ]= identifier[MagicDataFrame] ( identifier[dtype] = identifier[child_name] , identifier[df] = identifier[child_df] ) keyword[if] identifier[write] : identifier[self] . identifier[write_table_to_file] ( identifier[child_name] )
def propagate_all_tables_info(self, write=True): """ Find any items (specimens, samples, sites, or locations) from tables other than measurements and make sure they each have a row in their own table. For example, if a site name is in the samples table but not in the sites table, create a row for it in the sites table. """ for table_name in ['specimens', 'samples', 'sites', 'locations']: if not table_name in self.tables: continue # depends on [control=['if'], data=[]] df = self.tables[table_name].df (parent_name, child_name) = self.get_parent_and_child(table_name) if parent_name: if parent_name[:-1] in df.columns: parents = sorted(set(df[parent_name[:-1]].dropna().values.astype(str))) if parent_name in self.tables: # if there is a parent table, update it parent_df = self.tables[parent_name].df missing_parents = set(parents) - set(parent_df.index) if missing_parents: # add any missing values print('-I- Updating {} table with values from {} table'.format(parent_name, table_name)) for item in missing_parents: self.add_item(parent_name, {parent_name[:-1]: item}, label=item) # depends on [control=['for'], data=['item']] # save any changes to file if write: self.write_table_to_file(parent_name) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['parent_name']] # if there is no parent table, create it if necessary elif parents: # create a parent_df with the names you got from the child print('-I- Creating new {} table with data from {} table'.format(parent_name, table_name)) # add in the grandparent if available grandparent_name = self.get_parent_and_child(parent_name)[0] if grandparent_name: grandparent = '' if grandparent_name in df.columns: grandparent = df[df[parent_name] == item][grandparent_name].values[0] # depends on [control=['if'], data=['grandparent_name']] columns = [parent_name[:-1]] #, grandparent_name[:-1]] # depends on [control=['if'], data=[]] else: columns = [parent_name[:-1]] parent_df = pd.DataFrame(columns=columns, index=parents) parent_df[parent_name[:-1]] = parent_df.index if grandparent_name: if grandparent_name[:-1] in df.columns: parent_df = pd.merge(df[[parent_name[:-1], grandparent_name[:-1]]], parent_df, on=parent_name[:-1]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] self.tables[parent_name] = MagicDataFrame(dtype=parent_name, df=parent_df) if write: # save new table to file self.write_table_to_file(parent_name) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if child_name: if child_name in df.columns: raw_children = df[child_name].dropna().str.split(':') # create dict of all children with parent info parent_of_child = {} for (parent, children) in raw_children.items(): for child in children: # remove whitespace child = child.strip() old_parent = parent_of_child.get(child) if old_parent and parent and (old_parent != parent): print('-I- for {} {}, replacing: {} with: {}'.format(child_name[:-1], child, old_parent, parent)) # depends on [control=['if'], data=[]] parent_of_child[child] = parent # depends on [control=['for'], data=['child']] # depends on [control=['for'], data=[]] # old way: # flatten list, ignore duplicates #children = sorted(set([item.strip() for sublist in raw_children for item in sublist])) if child_name in self.tables: # if there is already a child table, update it child_df = self.tables[child_name].df missing_children = set(parent_of_child.keys()) - set(child_df.index) if missing_children: # add any missing values print('-I- Updating {} table with values from {} table'.format(child_name, table_name)) for item in missing_children: data = {child_name[:-1]: item, table_name[:-1]: parent_of_child[item]} self.add_item(child_name, data, label=item) # depends on [control=['for'], data=['item']] if write: # save any changes to file self.write_table_to_file(child_name) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['child_name']] # if there is no child table, create it if necessary elif children: # create a child_df with the names you got from the parent print('-I- Creating new {} table with data from {} table'.format(child_name, table_name)) # old way to make new table: #child_df = pd.DataFrame(columns=[table_name[:-1]], index=children) # new way to make new table children_list = sorted(parent_of_child.keys()) children_data = [[child_name, parent_of_child[c_name]] for c_name in children_list] child_df = pd.DataFrame(index=children_list, columns=[child_name[:-1], table_name[:-1]], data=children_data) self.tables[child_name] = MagicDataFrame(dtype=child_name, df=child_df) if write: # save new table to file self.write_table_to_file(child_name) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['child_name']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['table_name']]
def depthToRGB8(float_img_buff, encoding): ''' Translates from Distance Image format to RGB. Inf values are represented by NaN, when converting to RGB, NaN passed to 0 @param float_img_buff: ROS Image to translate @type img: ros image @return a Opencv RGB image ''' gray_image = None if (encoding[-3:-2]== "U"): gray_image = float_img_buff else: float_img = np.zeros((float_img_buff.shape[0], float_img_buff.shape[1], 1), dtype = "float32") float_img.data = float_img_buff.data gray_image=cv2.convertScaleAbs(float_img, alpha=255/MAXRANGE) cv_image = cv2.cvtColor(gray_image, cv2.COLOR_GRAY2RGB) return cv_image
def function[depthToRGB8, parameter[float_img_buff, encoding]]: constant[ Translates from Distance Image format to RGB. Inf values are represented by NaN, when converting to RGB, NaN passed to 0 @param float_img_buff: ROS Image to translate @type img: ros image @return a Opencv RGB image ] variable[gray_image] assign[=] constant[None] if compare[call[name[encoding]][<ast.Slice object at 0x7da20c993c10>] equal[==] constant[U]] begin[:] variable[gray_image] assign[=] name[float_img_buff] variable[cv_image] assign[=] call[name[cv2].cvtColor, parameter[name[gray_image], name[cv2].COLOR_GRAY2RGB]] return[name[cv_image]]
keyword[def] identifier[depthToRGB8] ( identifier[float_img_buff] , identifier[encoding] ): literal[string] identifier[gray_image] = keyword[None] keyword[if] ( identifier[encoding] [- literal[int] :- literal[int] ]== literal[string] ): identifier[gray_image] = identifier[float_img_buff] keyword[else] : identifier[float_img] = identifier[np] . identifier[zeros] (( identifier[float_img_buff] . identifier[shape] [ literal[int] ], identifier[float_img_buff] . identifier[shape] [ literal[int] ], literal[int] ), identifier[dtype] = literal[string] ) identifier[float_img] . identifier[data] = identifier[float_img_buff] . identifier[data] identifier[gray_image] = identifier[cv2] . identifier[convertScaleAbs] ( identifier[float_img] , identifier[alpha] = literal[int] / identifier[MAXRANGE] ) identifier[cv_image] = identifier[cv2] . identifier[cvtColor] ( identifier[gray_image] , identifier[cv2] . identifier[COLOR_GRAY2RGB] ) keyword[return] identifier[cv_image]
def depthToRGB8(float_img_buff, encoding): """ Translates from Distance Image format to RGB. Inf values are represented by NaN, when converting to RGB, NaN passed to 0 @param float_img_buff: ROS Image to translate @type img: ros image @return a Opencv RGB image """ gray_image = None if encoding[-3:-2] == 'U': gray_image = float_img_buff # depends on [control=['if'], data=[]] else: float_img = np.zeros((float_img_buff.shape[0], float_img_buff.shape[1], 1), dtype='float32') float_img.data = float_img_buff.data gray_image = cv2.convertScaleAbs(float_img, alpha=255 / MAXRANGE) cv_image = cv2.cvtColor(gray_image, cv2.COLOR_GRAY2RGB) return cv_image
def fit(self, dataset): """ Computes the inverse document frequency. :param dataset: an RDD of term frequency vectors """ if not isinstance(dataset, RDD): raise TypeError("dataset should be an RDD of term frequency vectors") jmodel = callMLlibFunc("fitIDF", self.minDocFreq, dataset.map(_convert_to_vector)) return IDFModel(jmodel)
def function[fit, parameter[self, dataset]]: constant[ Computes the inverse document frequency. :param dataset: an RDD of term frequency vectors ] if <ast.UnaryOp object at 0x7da20c9900a0> begin[:] <ast.Raise object at 0x7da20c992890> variable[jmodel] assign[=] call[name[callMLlibFunc], parameter[constant[fitIDF], name[self].minDocFreq, call[name[dataset].map, parameter[name[_convert_to_vector]]]]] return[call[name[IDFModel], parameter[name[jmodel]]]]
keyword[def] identifier[fit] ( identifier[self] , identifier[dataset] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[dataset] , identifier[RDD] ): keyword[raise] identifier[TypeError] ( literal[string] ) identifier[jmodel] = identifier[callMLlibFunc] ( literal[string] , identifier[self] . identifier[minDocFreq] , identifier[dataset] . identifier[map] ( identifier[_convert_to_vector] )) keyword[return] identifier[IDFModel] ( identifier[jmodel] )
def fit(self, dataset): """ Computes the inverse document frequency. :param dataset: an RDD of term frequency vectors """ if not isinstance(dataset, RDD): raise TypeError('dataset should be an RDD of term frequency vectors') # depends on [control=['if'], data=[]] jmodel = callMLlibFunc('fitIDF', self.minDocFreq, dataset.map(_convert_to_vector)) return IDFModel(jmodel)
async def stop(self): """Stop this task and wait until it and all its subtasks end. This function will finalize this task either by using the finalizer function passed during creation or by calling task.cancel() if no finalizer was passed. It will then call join() on this task and any registered subtasks with the given maximum timeout, raising asyncio.TimeoutError if the tasks did not exit within the given timeout. This method should only be called once. After this method returns, the task is finished and no more subtasks can be added. If this task is being tracked inside of the BackgroundEventLoop that it is part of, it will automatically be removed from the event loop's list of tasks. """ if self.stopped: return self._logger.debug("Stopping task %s", self.name) if self._finalizer is not None: try: result = self._finalizer(self) if inspect.isawaitable(result): await result except: #pylint:disable=bare-except;We need to make sure we always wait for the task self._logger.exception("Error running finalizer for task %s", self.name) elif self.task is not None: self.task.cancel() tasks = [] if self.task is not None: tasks.append(self.task) tasks.extend(x.task for x in self.subtasks) finished = asyncio.gather(*tasks, return_exceptions=True) outcomes = [] try: outcomes = await asyncio.wait_for(finished, timeout=self._stop_timeout) except asyncio.TimeoutError as err: # See discussion here: https://github.com/python/asyncio/issues/253#issuecomment-120138132 # This prevents a nuisance log error message, finished is guaranteed # to be cancelled but not awaited when wait_for() has a timeout. try: outcomes = await finished except asyncio.CancelledError: pass # See https://mail.python.org/pipermail/python-3000/2008-May/013740.html # for why we need to explictly name the error here raise err finally: self.stopped = True for outcome in outcomes: if isinstance(outcome, Exception) and not isinstance(outcome, asyncio.CancelledError): self._logger.error(outcome) if self in self._loop.tasks: self._loop.tasks.remove(self)
<ast.AsyncFunctionDef object at 0x7da20e956ce0>
keyword[async] keyword[def] identifier[stop] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[stopped] : keyword[return] identifier[self] . identifier[_logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[name] ) keyword[if] identifier[self] . identifier[_finalizer] keyword[is] keyword[not] keyword[None] : keyword[try] : identifier[result] = identifier[self] . identifier[_finalizer] ( identifier[self] ) keyword[if] identifier[inspect] . identifier[isawaitable] ( identifier[result] ): keyword[await] identifier[result] keyword[except] : identifier[self] . identifier[_logger] . identifier[exception] ( literal[string] , identifier[self] . identifier[name] ) keyword[elif] identifier[self] . identifier[task] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[task] . identifier[cancel] () identifier[tasks] =[] keyword[if] identifier[self] . identifier[task] keyword[is] keyword[not] keyword[None] : identifier[tasks] . identifier[append] ( identifier[self] . identifier[task] ) identifier[tasks] . identifier[extend] ( identifier[x] . identifier[task] keyword[for] identifier[x] keyword[in] identifier[self] . identifier[subtasks] ) identifier[finished] = identifier[asyncio] . identifier[gather] (* identifier[tasks] , identifier[return_exceptions] = keyword[True] ) identifier[outcomes] =[] keyword[try] : identifier[outcomes] = keyword[await] identifier[asyncio] . identifier[wait_for] ( identifier[finished] , identifier[timeout] = identifier[self] . identifier[_stop_timeout] ) keyword[except] identifier[asyncio] . identifier[TimeoutError] keyword[as] identifier[err] : keyword[try] : identifier[outcomes] = keyword[await] identifier[finished] keyword[except] identifier[asyncio] . identifier[CancelledError] : keyword[pass] keyword[raise] identifier[err] keyword[finally] : identifier[self] . identifier[stopped] = keyword[True] keyword[for] identifier[outcome] keyword[in] identifier[outcomes] : keyword[if] identifier[isinstance] ( identifier[outcome] , identifier[Exception] ) keyword[and] keyword[not] identifier[isinstance] ( identifier[outcome] , identifier[asyncio] . identifier[CancelledError] ): identifier[self] . identifier[_logger] . identifier[error] ( identifier[outcome] ) keyword[if] identifier[self] keyword[in] identifier[self] . identifier[_loop] . identifier[tasks] : identifier[self] . identifier[_loop] . identifier[tasks] . identifier[remove] ( identifier[self] )
async def stop(self): """Stop this task and wait until it and all its subtasks end. This function will finalize this task either by using the finalizer function passed during creation or by calling task.cancel() if no finalizer was passed. It will then call join() on this task and any registered subtasks with the given maximum timeout, raising asyncio.TimeoutError if the tasks did not exit within the given timeout. This method should only be called once. After this method returns, the task is finished and no more subtasks can be added. If this task is being tracked inside of the BackgroundEventLoop that it is part of, it will automatically be removed from the event loop's list of tasks. """ if self.stopped: return # depends on [control=['if'], data=[]] self._logger.debug('Stopping task %s', self.name) if self._finalizer is not None: try: result = self._finalizer(self) if inspect.isawaitable(result): await result # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except: #pylint:disable=bare-except;We need to make sure we always wait for the task self._logger.exception('Error running finalizer for task %s', self.name) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] elif self.task is not None: self.task.cancel() # depends on [control=['if'], data=[]] tasks = [] if self.task is not None: tasks.append(self.task) # depends on [control=['if'], data=[]] tasks.extend((x.task for x in self.subtasks)) finished = asyncio.gather(*tasks, return_exceptions=True) outcomes = [] try: outcomes = await asyncio.wait_for(finished, timeout=self._stop_timeout) # depends on [control=['try'], data=[]] except asyncio.TimeoutError as err: # See discussion here: https://github.com/python/asyncio/issues/253#issuecomment-120138132 # This prevents a nuisance log error message, finished is guaranteed # to be cancelled but not awaited when wait_for() has a timeout. try: outcomes = await finished # depends on [control=['try'], data=[]] except asyncio.CancelledError: pass # depends on [control=['except'], data=[]] # See https://mail.python.org/pipermail/python-3000/2008-May/013740.html # for why we need to explictly name the error here raise err # depends on [control=['except'], data=['err']] finally: self.stopped = True for outcome in outcomes: if isinstance(outcome, Exception) and (not isinstance(outcome, asyncio.CancelledError)): self._logger.error(outcome) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['outcome']] if self in self._loop.tasks: self._loop.tasks.remove(self) # depends on [control=['if'], data=['self']]
def _get_vm_info(self): """ Returns this VM info. :returns: dict of info """ vm_info = {} results = yield from self.manager.execute("showvminfo", [self._vmname, "--machinereadable"]) for info in results: try: name, value = info.split('=', 1) except ValueError: continue vm_info[name.strip('"')] = value.strip('"') return vm_info
def function[_get_vm_info, parameter[self]]: constant[ Returns this VM info. :returns: dict of info ] variable[vm_info] assign[=] dictionary[[], []] variable[results] assign[=] <ast.YieldFrom object at 0x7da20c6c7220> for taget[name[info]] in starred[name[results]] begin[:] <ast.Try object at 0x7da20c6c7c70> call[name[vm_info]][call[name[name].strip, parameter[constant["]]]] assign[=] call[name[value].strip, parameter[constant["]]] return[name[vm_info]]
keyword[def] identifier[_get_vm_info] ( identifier[self] ): literal[string] identifier[vm_info] ={} identifier[results] = keyword[yield] keyword[from] identifier[self] . identifier[manager] . identifier[execute] ( literal[string] ,[ identifier[self] . identifier[_vmname] , literal[string] ]) keyword[for] identifier[info] keyword[in] identifier[results] : keyword[try] : identifier[name] , identifier[value] = identifier[info] . identifier[split] ( literal[string] , literal[int] ) keyword[except] identifier[ValueError] : keyword[continue] identifier[vm_info] [ identifier[name] . identifier[strip] ( literal[string] )]= identifier[value] . identifier[strip] ( literal[string] ) keyword[return] identifier[vm_info]
def _get_vm_info(self): """ Returns this VM info. :returns: dict of info """ vm_info = {} results = (yield from self.manager.execute('showvminfo', [self._vmname, '--machinereadable'])) for info in results: try: (name, value) = info.split('=', 1) # depends on [control=['try'], data=[]] except ValueError: continue # depends on [control=['except'], data=[]] vm_info[name.strip('"')] = value.strip('"') # depends on [control=['for'], data=['info']] return vm_info
def file_query_size(self, path, follow_symlinks): """Queries the size of a regular file in the guest. in path of type str Path to the file which size is requested. Guest path style. in follow_symlinks of type bool It @c true, symbolic links in the final path component will be followed to their target, and the size of the target is returned. If @c false, symbolic links in the final path component will make the method call fail (symblink is not a regular file). return size of type int Queried file size. raises :class:`VBoxErrorObjectNotFound` File to was not found. raises :class:`VBoxErrorIprtError` Error querying file size. """ if not isinstance(path, basestring): raise TypeError("path can only be an instance of type basestring") if not isinstance(follow_symlinks, bool): raise TypeError("follow_symlinks can only be an instance of type bool") size = self._call("fileQuerySize", in_p=[path, follow_symlinks]) return size
def function[file_query_size, parameter[self, path, follow_symlinks]]: constant[Queries the size of a regular file in the guest. in path of type str Path to the file which size is requested. Guest path style. in follow_symlinks of type bool It @c true, symbolic links in the final path component will be followed to their target, and the size of the target is returned. If @c false, symbolic links in the final path component will make the method call fail (symblink is not a regular file). return size of type int Queried file size. raises :class:`VBoxErrorObjectNotFound` File to was not found. raises :class:`VBoxErrorIprtError` Error querying file size. ] if <ast.UnaryOp object at 0x7da20c6c4610> begin[:] <ast.Raise object at 0x7da20c6c5150> if <ast.UnaryOp object at 0x7da20e9b1fc0> begin[:] <ast.Raise object at 0x7da20e9b3e50> variable[size] assign[=] call[name[self]._call, parameter[constant[fileQuerySize]]] return[name[size]]
keyword[def] identifier[file_query_size] ( identifier[self] , identifier[path] , identifier[follow_symlinks] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[path] , identifier[basestring] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[follow_symlinks] , identifier[bool] ): keyword[raise] identifier[TypeError] ( literal[string] ) identifier[size] = identifier[self] . identifier[_call] ( literal[string] , identifier[in_p] =[ identifier[path] , identifier[follow_symlinks] ]) keyword[return] identifier[size]
def file_query_size(self, path, follow_symlinks): """Queries the size of a regular file in the guest. in path of type str Path to the file which size is requested. Guest path style. in follow_symlinks of type bool It @c true, symbolic links in the final path component will be followed to their target, and the size of the target is returned. If @c false, symbolic links in the final path component will make the method call fail (symblink is not a regular file). return size of type int Queried file size. raises :class:`VBoxErrorObjectNotFound` File to was not found. raises :class:`VBoxErrorIprtError` Error querying file size. """ if not isinstance(path, basestring): raise TypeError('path can only be an instance of type basestring') # depends on [control=['if'], data=[]] if not isinstance(follow_symlinks, bool): raise TypeError('follow_symlinks can only be an instance of type bool') # depends on [control=['if'], data=[]] size = self._call('fileQuerySize', in_p=[path, follow_symlinks]) return size
def _draw_chars(self, data, to_draw): """ Draw the specified charachters using the specified format. """ i = 0 while not self._cursor.atBlockEnd() and i < len(to_draw) and len(to_draw) > 1: self._cursor.deleteChar() i += 1 self._cursor.insertText(to_draw, data.fmt)
def function[_draw_chars, parameter[self, data, to_draw]]: constant[ Draw the specified charachters using the specified format. ] variable[i] assign[=] constant[0] while <ast.BoolOp object at 0x7da20c76f8b0> begin[:] call[name[self]._cursor.deleteChar, parameter[]] <ast.AugAssign object at 0x7da20c76d270> call[name[self]._cursor.insertText, parameter[name[to_draw], name[data].fmt]]
keyword[def] identifier[_draw_chars] ( identifier[self] , identifier[data] , identifier[to_draw] ): literal[string] identifier[i] = literal[int] keyword[while] keyword[not] identifier[self] . identifier[_cursor] . identifier[atBlockEnd] () keyword[and] identifier[i] < identifier[len] ( identifier[to_draw] ) keyword[and] identifier[len] ( identifier[to_draw] )> literal[int] : identifier[self] . identifier[_cursor] . identifier[deleteChar] () identifier[i] += literal[int] identifier[self] . identifier[_cursor] . identifier[insertText] ( identifier[to_draw] , identifier[data] . identifier[fmt] )
def _draw_chars(self, data, to_draw): """ Draw the specified charachters using the specified format. """ i = 0 while not self._cursor.atBlockEnd() and i < len(to_draw) and (len(to_draw) > 1): self._cursor.deleteChar() i += 1 # depends on [control=['while'], data=[]] self._cursor.insertText(to_draw, data.fmt)
def expr_to_str(n, l=None): """ construct SQL string from expression node """ op = n[0] if op.startswith('_') and op.endswith('_'): op = op.strip('_') if op == 'var': return n[1] elif op == 'literal': if isinstance(n[1], basestring): return "'%s'" % n[1] return str(n[1]) elif op == 'cast': return "(%s)::%s" % (expr_to_str(n[1]), n[2]) elif op in '+-*/': return "(%s) %s (%s)" % (expr_to_str(n[1]), op, expr_to_str(n[2])) elif op == "extract": return "extract( %s from %s )" % (n[1], expr_to_str(n[2])) else: arg = ','.join(map(expr_to_str, n[1:])) return "%s(%s)" % (op, arg)
def function[expr_to_str, parameter[n, l]]: constant[ construct SQL string from expression node ] variable[op] assign[=] call[name[n]][constant[0]] if <ast.BoolOp object at 0x7da18f00f2b0> begin[:] variable[op] assign[=] call[name[op].strip, parameter[constant[_]]] if compare[name[op] equal[==] constant[var]] begin[:] return[call[name[n]][constant[1]]]
keyword[def] identifier[expr_to_str] ( identifier[n] , identifier[l] = keyword[None] ): literal[string] identifier[op] = identifier[n] [ literal[int] ] keyword[if] identifier[op] . identifier[startswith] ( literal[string] ) keyword[and] identifier[op] . identifier[endswith] ( literal[string] ): identifier[op] = identifier[op] . identifier[strip] ( literal[string] ) keyword[if] identifier[op] == literal[string] : keyword[return] identifier[n] [ literal[int] ] keyword[elif] identifier[op] == literal[string] : keyword[if] identifier[isinstance] ( identifier[n] [ literal[int] ], identifier[basestring] ): keyword[return] literal[string] % identifier[n] [ literal[int] ] keyword[return] identifier[str] ( identifier[n] [ literal[int] ]) keyword[elif] identifier[op] == literal[string] : keyword[return] literal[string] %( identifier[expr_to_str] ( identifier[n] [ literal[int] ]), identifier[n] [ literal[int] ]) keyword[elif] identifier[op] keyword[in] literal[string] : keyword[return] literal[string] %( identifier[expr_to_str] ( identifier[n] [ literal[int] ]), identifier[op] , identifier[expr_to_str] ( identifier[n] [ literal[int] ])) keyword[elif] identifier[op] == literal[string] : keyword[return] literal[string] %( identifier[n] [ literal[int] ], identifier[expr_to_str] ( identifier[n] [ literal[int] ])) keyword[else] : identifier[arg] = literal[string] . identifier[join] ( identifier[map] ( identifier[expr_to_str] , identifier[n] [ literal[int] :])) keyword[return] literal[string] %( identifier[op] , identifier[arg] )
def expr_to_str(n, l=None): """ construct SQL string from expression node """ op = n[0] if op.startswith('_') and op.endswith('_'): op = op.strip('_') if op == 'var': return n[1] # depends on [control=['if'], data=[]] elif op == 'literal': if isinstance(n[1], basestring): return "'%s'" % n[1] # depends on [control=['if'], data=[]] return str(n[1]) # depends on [control=['if'], data=[]] elif op == 'cast': return '(%s)::%s' % (expr_to_str(n[1]), n[2]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif op in '+-*/': return '(%s) %s (%s)' % (expr_to_str(n[1]), op, expr_to_str(n[2])) # depends on [control=['if'], data=['op']] elif op == 'extract': return 'extract( %s from %s )' % (n[1], expr_to_str(n[2])) # depends on [control=['if'], data=[]] else: arg = ','.join(map(expr_to_str, n[1:])) return '%s(%s)' % (op, arg)
def isbn10(self): ''' Encode ISBN number in ISBN10 format Raises exception if Bookland number different from 978 @rtype: string @return: ISBN formated as ISBN10 ''' if self._id[0:3] != '978': raise ISBNError("Invalid Bookland code: {}".format(self._id[0:3])) digit10 = _digit10(self._id[3:12]) if digit10 == 10: return self._id[3:12] + 'X' else: return self._id[3:12] + str(digit10)
def function[isbn10, parameter[self]]: constant[ Encode ISBN number in ISBN10 format Raises exception if Bookland number different from 978 @rtype: string @return: ISBN formated as ISBN10 ] if compare[call[name[self]._id][<ast.Slice object at 0x7da1b0805240>] not_equal[!=] constant[978]] begin[:] <ast.Raise object at 0x7da1b0804b50> variable[digit10] assign[=] call[name[_digit10], parameter[call[name[self]._id][<ast.Slice object at 0x7da1b08065f0>]]] if compare[name[digit10] equal[==] constant[10]] begin[:] return[binary_operation[call[name[self]._id][<ast.Slice object at 0x7da1b0804fa0>] + constant[X]]]
keyword[def] identifier[isbn10] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_id] [ literal[int] : literal[int] ]!= literal[string] : keyword[raise] identifier[ISBNError] ( literal[string] . identifier[format] ( identifier[self] . identifier[_id] [ literal[int] : literal[int] ])) identifier[digit10] = identifier[_digit10] ( identifier[self] . identifier[_id] [ literal[int] : literal[int] ]) keyword[if] identifier[digit10] == literal[int] : keyword[return] identifier[self] . identifier[_id] [ literal[int] : literal[int] ]+ literal[string] keyword[else] : keyword[return] identifier[self] . identifier[_id] [ literal[int] : literal[int] ]+ identifier[str] ( identifier[digit10] )
def isbn10(self): """ Encode ISBN number in ISBN10 format Raises exception if Bookland number different from 978 @rtype: string @return: ISBN formated as ISBN10 """ if self._id[0:3] != '978': raise ISBNError('Invalid Bookland code: {}'.format(self._id[0:3])) # depends on [control=['if'], data=[]] digit10 = _digit10(self._id[3:12]) if digit10 == 10: return self._id[3:12] + 'X' # depends on [control=['if'], data=[]] else: return self._id[3:12] + str(digit10)
def delete_file(self, commit, path): """ Deletes a file from a Commit. DeleteFile leaves a tombstone in the Commit, assuming the file isn't written to later attempting to get the file from the finished commit will result in not found error. The file will of course remain intact in the Commit's parent. Params: * commit: A tuple, string, or Commit object representing the commit. * path: The path to the file. """ req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path)) self.stub.DeleteFile(req, metadata=self.metadata)
def function[delete_file, parameter[self, commit, path]]: constant[ Deletes a file from a Commit. DeleteFile leaves a tombstone in the Commit, assuming the file isn't written to later attempting to get the file from the finished commit will result in not found error. The file will of course remain intact in the Commit's parent. Params: * commit: A tuple, string, or Commit object representing the commit. * path: The path to the file. ] variable[req] assign[=] call[name[proto].DeleteFileRequest, parameter[]] call[name[self].stub.DeleteFile, parameter[name[req]]]
keyword[def] identifier[delete_file] ( identifier[self] , identifier[commit] , identifier[path] ): literal[string] identifier[req] = identifier[proto] . identifier[DeleteFileRequest] ( identifier[file] = identifier[proto] . identifier[File] ( identifier[commit] = identifier[commit_from] ( identifier[commit] ), identifier[path] = identifier[path] )) identifier[self] . identifier[stub] . identifier[DeleteFile] ( identifier[req] , identifier[metadata] = identifier[self] . identifier[metadata] )
def delete_file(self, commit, path): """ Deletes a file from a Commit. DeleteFile leaves a tombstone in the Commit, assuming the file isn't written to later attempting to get the file from the finished commit will result in not found error. The file will of course remain intact in the Commit's parent. Params: * commit: A tuple, string, or Commit object representing the commit. * path: The path to the file. """ req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path)) self.stub.DeleteFile(req, metadata=self.metadata)
def remove_child(self, rhs): """Remove a given child element, specified by name or as element.""" if type(rhs) is XMLElement: lib.lsl_remove_child(self.e, rhs.e) else: lib.lsl_remove_child_n(self.e, rhs)
def function[remove_child, parameter[self, rhs]]: constant[Remove a given child element, specified by name or as element.] if compare[call[name[type], parameter[name[rhs]]] is name[XMLElement]] begin[:] call[name[lib].lsl_remove_child, parameter[name[self].e, name[rhs].e]]
keyword[def] identifier[remove_child] ( identifier[self] , identifier[rhs] ): literal[string] keyword[if] identifier[type] ( identifier[rhs] ) keyword[is] identifier[XMLElement] : identifier[lib] . identifier[lsl_remove_child] ( identifier[self] . identifier[e] , identifier[rhs] . identifier[e] ) keyword[else] : identifier[lib] . identifier[lsl_remove_child_n] ( identifier[self] . identifier[e] , identifier[rhs] )
def remove_child(self, rhs): """Remove a given child element, specified by name or as element.""" if type(rhs) is XMLElement: lib.lsl_remove_child(self.e, rhs.e) # depends on [control=['if'], data=[]] else: lib.lsl_remove_child_n(self.e, rhs)
def find_ds_mapping(data_source, es_major_version): """ Find the mapping given a perceval data source :param data_source: name of the perceval data source :param es_major_version: string with the major version for Elasticsearch :return: a dict with the mappings (raw and enriched) """ mappings = {"raw": None, "enriched": None} # Backend connectors connectors = get_connectors() try: raw_klass = connectors[data_source][1] enrich_klass = connectors[data_source][2] except KeyError: print("Data source not found", data_source) sys.exit(1) # Mapping for raw index backend = raw_klass(None) if backend: mapping = json.loads(backend.mapping.get_elastic_mappings(es_major_version)['items']) mappings['raw'] = [mapping, find_general_mappings(es_major_version)] # Mapping for enriched index backend = enrich_klass(None) if backend: mapping = json.loads(backend.mapping.get_elastic_mappings(es_major_version)['items']) mappings['enriched'] = [mapping, find_general_mappings(es_major_version)] return mappings
def function[find_ds_mapping, parameter[data_source, es_major_version]]: constant[ Find the mapping given a perceval data source :param data_source: name of the perceval data source :param es_major_version: string with the major version for Elasticsearch :return: a dict with the mappings (raw and enriched) ] variable[mappings] assign[=] dictionary[[<ast.Constant object at 0x7da1b0fed480>, <ast.Constant object at 0x7da1b0fef0a0>], [<ast.Constant object at 0x7da1b0fed510>, <ast.Constant object at 0x7da1b0feea40>]] variable[connectors] assign[=] call[name[get_connectors], parameter[]] <ast.Try object at 0x7da1b0fee860> variable[backend] assign[=] call[name[raw_klass], parameter[constant[None]]] if name[backend] begin[:] variable[mapping] assign[=] call[name[json].loads, parameter[call[call[name[backend].mapping.get_elastic_mappings, parameter[name[es_major_version]]]][constant[items]]]] call[name[mappings]][constant[raw]] assign[=] list[[<ast.Name object at 0x7da1b0fec6a0>, <ast.Call object at 0x7da1b0fef700>]] variable[backend] assign[=] call[name[enrich_klass], parameter[constant[None]]] if name[backend] begin[:] variable[mapping] assign[=] call[name[json].loads, parameter[call[call[name[backend].mapping.get_elastic_mappings, parameter[name[es_major_version]]]][constant[items]]]] call[name[mappings]][constant[enriched]] assign[=] list[[<ast.Name object at 0x7da1b0fedd20>, <ast.Call object at 0x7da1b0fedde0>]] return[name[mappings]]
keyword[def] identifier[find_ds_mapping] ( identifier[data_source] , identifier[es_major_version] ): literal[string] identifier[mappings] ={ literal[string] : keyword[None] , literal[string] : keyword[None] } identifier[connectors] = identifier[get_connectors] () keyword[try] : identifier[raw_klass] = identifier[connectors] [ identifier[data_source] ][ literal[int] ] identifier[enrich_klass] = identifier[connectors] [ identifier[data_source] ][ literal[int] ] keyword[except] identifier[KeyError] : identifier[print] ( literal[string] , identifier[data_source] ) identifier[sys] . identifier[exit] ( literal[int] ) identifier[backend] = identifier[raw_klass] ( keyword[None] ) keyword[if] identifier[backend] : identifier[mapping] = identifier[json] . identifier[loads] ( identifier[backend] . identifier[mapping] . identifier[get_elastic_mappings] ( identifier[es_major_version] )[ literal[string] ]) identifier[mappings] [ literal[string] ]=[ identifier[mapping] , identifier[find_general_mappings] ( identifier[es_major_version] )] identifier[backend] = identifier[enrich_klass] ( keyword[None] ) keyword[if] identifier[backend] : identifier[mapping] = identifier[json] . identifier[loads] ( identifier[backend] . identifier[mapping] . identifier[get_elastic_mappings] ( identifier[es_major_version] )[ literal[string] ]) identifier[mappings] [ literal[string] ]=[ identifier[mapping] , identifier[find_general_mappings] ( identifier[es_major_version] )] keyword[return] identifier[mappings]
def find_ds_mapping(data_source, es_major_version): """ Find the mapping given a perceval data source :param data_source: name of the perceval data source :param es_major_version: string with the major version for Elasticsearch :return: a dict with the mappings (raw and enriched) """ mappings = {'raw': None, 'enriched': None} # Backend connectors connectors = get_connectors() try: raw_klass = connectors[data_source][1] enrich_klass = connectors[data_source][2] # depends on [control=['try'], data=[]] except KeyError: print('Data source not found', data_source) sys.exit(1) # depends on [control=['except'], data=[]] # Mapping for raw index backend = raw_klass(None) if backend: mapping = json.loads(backend.mapping.get_elastic_mappings(es_major_version)['items']) mappings['raw'] = [mapping, find_general_mappings(es_major_version)] # depends on [control=['if'], data=[]] # Mapping for enriched index backend = enrich_klass(None) if backend: mapping = json.loads(backend.mapping.get_elastic_mappings(es_major_version)['items']) mappings['enriched'] = [mapping, find_general_mappings(es_major_version)] # depends on [control=['if'], data=[]] return mappings
def get_per_pixel_mean(self, names=('train', 'test')): """ Args: names (tuple[str]): the names ('train' or 'test') of the datasets Returns: a mean image of all images in the given datasets, with size 32x32x3 """ for name in names: assert name in ['train', 'test'], name train_files, test_files, _ = get_filenames(self.dir, self.cifar_classnum) all_files = [] if 'train' in names: all_files.extend(train_files) if 'test' in names: all_files.extend(test_files) all_imgs = [x[0] for x in read_cifar(all_files, self.cifar_classnum)] arr = np.array(all_imgs, dtype='float32') mean = np.mean(arr, axis=0) return mean
def function[get_per_pixel_mean, parameter[self, names]]: constant[ Args: names (tuple[str]): the names ('train' or 'test') of the datasets Returns: a mean image of all images in the given datasets, with size 32x32x3 ] for taget[name[name]] in starred[name[names]] begin[:] assert[compare[name[name] in list[[<ast.Constant object at 0x7da18f720d00>, <ast.Constant object at 0x7da18f723910>]]]] <ast.Tuple object at 0x7da18f7205e0> assign[=] call[name[get_filenames], parameter[name[self].dir, name[self].cifar_classnum]] variable[all_files] assign[=] list[[]] if compare[constant[train] in name[names]] begin[:] call[name[all_files].extend, parameter[name[train_files]]] if compare[constant[test] in name[names]] begin[:] call[name[all_files].extend, parameter[name[test_files]]] variable[all_imgs] assign[=] <ast.ListComp object at 0x7da18f722fe0> variable[arr] assign[=] call[name[np].array, parameter[name[all_imgs]]] variable[mean] assign[=] call[name[np].mean, parameter[name[arr]]] return[name[mean]]
keyword[def] identifier[get_per_pixel_mean] ( identifier[self] , identifier[names] =( literal[string] , literal[string] )): literal[string] keyword[for] identifier[name] keyword[in] identifier[names] : keyword[assert] identifier[name] keyword[in] [ literal[string] , literal[string] ], identifier[name] identifier[train_files] , identifier[test_files] , identifier[_] = identifier[get_filenames] ( identifier[self] . identifier[dir] , identifier[self] . identifier[cifar_classnum] ) identifier[all_files] =[] keyword[if] literal[string] keyword[in] identifier[names] : identifier[all_files] . identifier[extend] ( identifier[train_files] ) keyword[if] literal[string] keyword[in] identifier[names] : identifier[all_files] . identifier[extend] ( identifier[test_files] ) identifier[all_imgs] =[ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[read_cifar] ( identifier[all_files] , identifier[self] . identifier[cifar_classnum] )] identifier[arr] = identifier[np] . identifier[array] ( identifier[all_imgs] , identifier[dtype] = literal[string] ) identifier[mean] = identifier[np] . identifier[mean] ( identifier[arr] , identifier[axis] = literal[int] ) keyword[return] identifier[mean]
def get_per_pixel_mean(self, names=('train', 'test')): """ Args: names (tuple[str]): the names ('train' or 'test') of the datasets Returns: a mean image of all images in the given datasets, with size 32x32x3 """ for name in names: assert name in ['train', 'test'], name # depends on [control=['for'], data=['name']] (train_files, test_files, _) = get_filenames(self.dir, self.cifar_classnum) all_files = [] if 'train' in names: all_files.extend(train_files) # depends on [control=['if'], data=[]] if 'test' in names: all_files.extend(test_files) # depends on [control=['if'], data=[]] all_imgs = [x[0] for x in read_cifar(all_files, self.cifar_classnum)] arr = np.array(all_imgs, dtype='float32') mean = np.mean(arr, axis=0) return mean
def get_max_runs(x) -> np.array: """ Given a list of numbers, return a NumPy array of pairs (start index, end index + 1) of the runs of max value. Example:: >>> get_max_runs([7, 1, 2, 7, 7, 1, 2]) array([[0, 1], [3, 5]]) Assume x is not empty. Recipe comes from `Stack Overflow <http://stackoverflow.com/questions/1066758/find-length-of-sequences-of-identical-values-in-a-numpy-array>`_. """ # Get 0-1 array where 1 marks the max values of x x = np.array(x) m = np.max(x) y = (x == m) * 1 # Bound y by zeros to detect runs properly bounded = np.hstack(([0], y, [0])) # Get 1 at run starts and -1 at run ends diffs = np.diff(bounded) run_starts = np.where(diffs > 0)[0] run_ends = np.where(diffs < 0)[0] return np.array([run_starts, run_ends]).T
def function[get_max_runs, parameter[x]]: constant[ Given a list of numbers, return a NumPy array of pairs (start index, end index + 1) of the runs of max value. Example:: >>> get_max_runs([7, 1, 2, 7, 7, 1, 2]) array([[0, 1], [3, 5]]) Assume x is not empty. Recipe comes from `Stack Overflow <http://stackoverflow.com/questions/1066758/find-length-of-sequences-of-identical-values-in-a-numpy-array>`_. ] variable[x] assign[=] call[name[np].array, parameter[name[x]]] variable[m] assign[=] call[name[np].max, parameter[name[x]]] variable[y] assign[=] binary_operation[compare[name[x] equal[==] name[m]] * constant[1]] variable[bounded] assign[=] call[name[np].hstack, parameter[tuple[[<ast.List object at 0x7da20c992c20>, <ast.Name object at 0x7da20c993430>, <ast.List object at 0x7da20c991780>]]]] variable[diffs] assign[=] call[name[np].diff, parameter[name[bounded]]] variable[run_starts] assign[=] call[call[name[np].where, parameter[compare[name[diffs] greater[>] constant[0]]]]][constant[0]] variable[run_ends] assign[=] call[call[name[np].where, parameter[compare[name[diffs] less[<] constant[0]]]]][constant[0]] return[call[name[np].array, parameter[list[[<ast.Name object at 0x7da20c990cd0>, <ast.Name object at 0x7da20c9917b0>]]]].T]
keyword[def] identifier[get_max_runs] ( identifier[x] )-> identifier[np] . identifier[array] : literal[string] identifier[x] = identifier[np] . identifier[array] ( identifier[x] ) identifier[m] = identifier[np] . identifier[max] ( identifier[x] ) identifier[y] =( identifier[x] == identifier[m] )* literal[int] identifier[bounded] = identifier[np] . identifier[hstack] (([ literal[int] ], identifier[y] ,[ literal[int] ])) identifier[diffs] = identifier[np] . identifier[diff] ( identifier[bounded] ) identifier[run_starts] = identifier[np] . identifier[where] ( identifier[diffs] > literal[int] )[ literal[int] ] identifier[run_ends] = identifier[np] . identifier[where] ( identifier[diffs] < literal[int] )[ literal[int] ] keyword[return] identifier[np] . identifier[array] ([ identifier[run_starts] , identifier[run_ends] ]). identifier[T]
def get_max_runs(x) -> np.array: """ Given a list of numbers, return a NumPy array of pairs (start index, end index + 1) of the runs of max value. Example:: >>> get_max_runs([7, 1, 2, 7, 7, 1, 2]) array([[0, 1], [3, 5]]) Assume x is not empty. Recipe comes from `Stack Overflow <http://stackoverflow.com/questions/1066758/find-length-of-sequences-of-identical-values-in-a-numpy-array>`_. """ # Get 0-1 array where 1 marks the max values of x x = np.array(x) m = np.max(x) y = (x == m) * 1 # Bound y by zeros to detect runs properly bounded = np.hstack(([0], y, [0])) # Get 1 at run starts and -1 at run ends diffs = np.diff(bounded) run_starts = np.where(diffs > 0)[0] run_ends = np.where(diffs < 0)[0] return np.array([run_starts, run_ends]).T
def flatten(self): """ Get a flattened list of the items in the collection. :rtype: Collection """ def _flatten(d): if isinstance(d, dict): for v in d.values(): for nested_v in _flatten(v): yield nested_v elif isinstance(d, list): for list_v in d: for nested_v in _flatten(list_v): yield nested_v else: yield d return self.__class__(list(_flatten(self.items)))
def function[flatten, parameter[self]]: constant[ Get a flattened list of the items in the collection. :rtype: Collection ] def function[_flatten, parameter[d]]: if call[name[isinstance], parameter[name[d], name[dict]]] begin[:] for taget[name[v]] in starred[call[name[d].values, parameter[]]] begin[:] for taget[name[nested_v]] in starred[call[name[_flatten], parameter[name[v]]]] begin[:] <ast.Yield object at 0x7da1b0579fc0> return[call[name[self].__class__, parameter[call[name[list], parameter[call[name[_flatten], parameter[name[self].items]]]]]]]
keyword[def] identifier[flatten] ( identifier[self] ): literal[string] keyword[def] identifier[_flatten] ( identifier[d] ): keyword[if] identifier[isinstance] ( identifier[d] , identifier[dict] ): keyword[for] identifier[v] keyword[in] identifier[d] . identifier[values] (): keyword[for] identifier[nested_v] keyword[in] identifier[_flatten] ( identifier[v] ): keyword[yield] identifier[nested_v] keyword[elif] identifier[isinstance] ( identifier[d] , identifier[list] ): keyword[for] identifier[list_v] keyword[in] identifier[d] : keyword[for] identifier[nested_v] keyword[in] identifier[_flatten] ( identifier[list_v] ): keyword[yield] identifier[nested_v] keyword[else] : keyword[yield] identifier[d] keyword[return] identifier[self] . identifier[__class__] ( identifier[list] ( identifier[_flatten] ( identifier[self] . identifier[items] )))
def flatten(self): """ Get a flattened list of the items in the collection. :rtype: Collection """ def _flatten(d): if isinstance(d, dict): for v in d.values(): for nested_v in _flatten(v): yield nested_v # depends on [control=['for'], data=['nested_v']] # depends on [control=['for'], data=['v']] # depends on [control=['if'], data=[]] elif isinstance(d, list): for list_v in d: for nested_v in _flatten(list_v): yield nested_v # depends on [control=['for'], data=['nested_v']] # depends on [control=['for'], data=['list_v']] # depends on [control=['if'], data=[]] else: yield d return self.__class__(list(_flatten(self.items)))
def pipeline( ctx, input_fn, db_save, db_delete, output_fn, rules, species, namespace_targets, version, api, config_fn, ): """BEL Pipeline - BEL Nanopubs into BEL Edges This will process BEL Nanopubs into BEL Edges by validating, orthologizing (if requested), canonicalizing, and then computing the BEL Edges based on the given rule_set. \b input_fn: If input fn has *.gz, will read as a gzip file If input fn has *.jsonl*, will parsed as a JSONLines file IF input fn has *.json*, will be parsed as a JSON file If input fn has *.yaml* or *.yml*, will be parsed as a YAML file \b output_fn: If output fn has *.gz, will written as a gzip file If output fn has *.jsonl*, will written as a JSONLines file IF output fn has *.json*, will be written as a JSON file If output fn has *.yaml* or *.yml*, will be written as a YAML file If output fn has *.jgf, will be written as JSON Graph Formatted file """ if config_fn: config = bel.db.Config.merge_config(ctx.config, override_config_fn=config_fn) else: config = ctx.config # Configuration - will return the first truthy result in list else the default option if namespace_targets: namespace_targets = json.loads(namespace_targets) if rules: rules = rules.replace(" ", "").split(",") namespace_targets = utils.first_true( [namespace_targets, config["bel"]["lang"].get("canonical")], None ) rules = utils.first_true( [rules, config["bel"]["nanopub"].get("pipeline_edge_rules", False)], False ) api = utils.first_true( [api, config["bel_api"]["servers"].get("api_url", None)], None ) version = utils.first_true( [version, config["bel"]["lang"].get("default_bel_version", None)], None ) n = bnn.Nanopub() try: json_flag, jsonl_flag, yaml_flag, jgf_flag = False, False, False, False all_bel_edges = [] fout = None if db_save or db_delete: if db_delete: arango_client = bel.db.arangodb.get_client() bel.db.arangodb.delete_database(arango_client, "edgestore") else: arango_client = bel.db.arangodb.get_client() edgestore_handle = bel.db.arangodb.get_edgestore_handle(arango_client) elif re.search("ya?ml", output_fn): yaml_flag = True elif "jsonl" in output_fn: jsonl_flag = True elif "json" in output_fn: json_flag = True elif "jgf" in output_fn: jgf_flag = True if db_save: pass elif "gz" in output_fn: fout = gzip.open(output_fn, "wt") else: fout = open(output_fn, "wt") nanopub_cnt = 0 with timy.Timer() as timer: for np in bnf.read_nanopubs(input_fn): # print('Nanopub:\n', json.dumps(np, indent=4)) nanopub_cnt += 1 if nanopub_cnt % 100 == 0: timer.track(f"{nanopub_cnt} Nanopubs processed into Edges") bel_edges = n.bel_edges( np, namespace_targets=namespace_targets, orthologize_target=species, rules=rules, ) if db_save: bel.edge.edges.load_edges_into_db(edgestore_handle, edges=bel_edges) elif jsonl_flag: fout.write("{}\n".format(json.dumps(bel_edges))) else: all_bel_edges.extend(bel_edges) if db_save: pass elif yaml_flag: fout.write("{}\n".format(yaml.dumps(all_bel_edges))) elif json_flag: fout.write("{}\n".format(json.dumps(all_bel_edges))) elif jgf_flag: bnf.edges_to_jgf(output_fn, all_bel_edges) finally: if fout: fout.close()
def function[pipeline, parameter[ctx, input_fn, db_save, db_delete, output_fn, rules, species, namespace_targets, version, api, config_fn]]: constant[BEL Pipeline - BEL Nanopubs into BEL Edges This will process BEL Nanopubs into BEL Edges by validating, orthologizing (if requested), canonicalizing, and then computing the BEL Edges based on the given rule_set.  input_fn: If input fn has *.gz, will read as a gzip file If input fn has *.jsonl*, will parsed as a JSONLines file IF input fn has *.json*, will be parsed as a JSON file If input fn has *.yaml* or *.yml*, will be parsed as a YAML file  output_fn: If output fn has *.gz, will written as a gzip file If output fn has *.jsonl*, will written as a JSONLines file IF output fn has *.json*, will be written as a JSON file If output fn has *.yaml* or *.yml*, will be written as a YAML file If output fn has *.jgf, will be written as JSON Graph Formatted file ] if name[config_fn] begin[:] variable[config] assign[=] call[name[bel].db.Config.merge_config, parameter[name[ctx].config]] if name[namespace_targets] begin[:] variable[namespace_targets] assign[=] call[name[json].loads, parameter[name[namespace_targets]]] if name[rules] begin[:] variable[rules] assign[=] call[call[name[rules].replace, parameter[constant[ ], constant[]]].split, parameter[constant[,]]] variable[namespace_targets] assign[=] call[name[utils].first_true, parameter[list[[<ast.Name object at 0x7da20e957e80>, <ast.Call object at 0x7da20e954dc0>]], constant[None]]] variable[rules] assign[=] call[name[utils].first_true, parameter[list[[<ast.Name object at 0x7da20e954610>, <ast.Call object at 0x7da20e954e50>]], constant[False]]] variable[api] assign[=] call[name[utils].first_true, parameter[list[[<ast.Name object at 0x7da20e957880>, <ast.Call object at 0x7da20e956c20>]], constant[None]]] variable[version] assign[=] call[name[utils].first_true, parameter[list[[<ast.Name object at 0x7da20e957250>, <ast.Call object at 0x7da20e956f50>]], constant[None]]] variable[n] assign[=] call[name[bnn].Nanopub, parameter[]] <ast.Try object at 0x7da20e955240>
keyword[def] identifier[pipeline] ( identifier[ctx] , identifier[input_fn] , identifier[db_save] , identifier[db_delete] , identifier[output_fn] , identifier[rules] , identifier[species] , identifier[namespace_targets] , identifier[version] , identifier[api] , identifier[config_fn] , ): literal[string] keyword[if] identifier[config_fn] : identifier[config] = identifier[bel] . identifier[db] . identifier[Config] . identifier[merge_config] ( identifier[ctx] . identifier[config] , identifier[override_config_fn] = identifier[config_fn] ) keyword[else] : identifier[config] = identifier[ctx] . identifier[config] keyword[if] identifier[namespace_targets] : identifier[namespace_targets] = identifier[json] . identifier[loads] ( identifier[namespace_targets] ) keyword[if] identifier[rules] : identifier[rules] = identifier[rules] . identifier[replace] ( literal[string] , literal[string] ). identifier[split] ( literal[string] ) identifier[namespace_targets] = identifier[utils] . identifier[first_true] ( [ identifier[namespace_targets] , identifier[config] [ literal[string] ][ literal[string] ]. identifier[get] ( literal[string] )], keyword[None] ) identifier[rules] = identifier[utils] . identifier[first_true] ( [ identifier[rules] , identifier[config] [ literal[string] ][ literal[string] ]. identifier[get] ( literal[string] , keyword[False] )], keyword[False] ) identifier[api] = identifier[utils] . identifier[first_true] ( [ identifier[api] , identifier[config] [ literal[string] ][ literal[string] ]. identifier[get] ( literal[string] , keyword[None] )], keyword[None] ) identifier[version] = identifier[utils] . identifier[first_true] ( [ identifier[version] , identifier[config] [ literal[string] ][ literal[string] ]. identifier[get] ( literal[string] , keyword[None] )], keyword[None] ) identifier[n] = identifier[bnn] . identifier[Nanopub] () keyword[try] : identifier[json_flag] , identifier[jsonl_flag] , identifier[yaml_flag] , identifier[jgf_flag] = keyword[False] , keyword[False] , keyword[False] , keyword[False] identifier[all_bel_edges] =[] identifier[fout] = keyword[None] keyword[if] identifier[db_save] keyword[or] identifier[db_delete] : keyword[if] identifier[db_delete] : identifier[arango_client] = identifier[bel] . identifier[db] . identifier[arangodb] . identifier[get_client] () identifier[bel] . identifier[db] . identifier[arangodb] . identifier[delete_database] ( identifier[arango_client] , literal[string] ) keyword[else] : identifier[arango_client] = identifier[bel] . identifier[db] . identifier[arangodb] . identifier[get_client] () identifier[edgestore_handle] = identifier[bel] . identifier[db] . identifier[arangodb] . identifier[get_edgestore_handle] ( identifier[arango_client] ) keyword[elif] identifier[re] . identifier[search] ( literal[string] , identifier[output_fn] ): identifier[yaml_flag] = keyword[True] keyword[elif] literal[string] keyword[in] identifier[output_fn] : identifier[jsonl_flag] = keyword[True] keyword[elif] literal[string] keyword[in] identifier[output_fn] : identifier[json_flag] = keyword[True] keyword[elif] literal[string] keyword[in] identifier[output_fn] : identifier[jgf_flag] = keyword[True] keyword[if] identifier[db_save] : keyword[pass] keyword[elif] literal[string] keyword[in] identifier[output_fn] : identifier[fout] = identifier[gzip] . identifier[open] ( identifier[output_fn] , literal[string] ) keyword[else] : identifier[fout] = identifier[open] ( identifier[output_fn] , literal[string] ) identifier[nanopub_cnt] = literal[int] keyword[with] identifier[timy] . identifier[Timer] () keyword[as] identifier[timer] : keyword[for] identifier[np] keyword[in] identifier[bnf] . identifier[read_nanopubs] ( identifier[input_fn] ): identifier[nanopub_cnt] += literal[int] keyword[if] identifier[nanopub_cnt] % literal[int] == literal[int] : identifier[timer] . identifier[track] ( literal[string] ) identifier[bel_edges] = identifier[n] . identifier[bel_edges] ( identifier[np] , identifier[namespace_targets] = identifier[namespace_targets] , identifier[orthologize_target] = identifier[species] , identifier[rules] = identifier[rules] , ) keyword[if] identifier[db_save] : identifier[bel] . identifier[edge] . identifier[edges] . identifier[load_edges_into_db] ( identifier[edgestore_handle] , identifier[edges] = identifier[bel_edges] ) keyword[elif] identifier[jsonl_flag] : identifier[fout] . identifier[write] ( literal[string] . identifier[format] ( identifier[json] . identifier[dumps] ( identifier[bel_edges] ))) keyword[else] : identifier[all_bel_edges] . identifier[extend] ( identifier[bel_edges] ) keyword[if] identifier[db_save] : keyword[pass] keyword[elif] identifier[yaml_flag] : identifier[fout] . identifier[write] ( literal[string] . identifier[format] ( identifier[yaml] . identifier[dumps] ( identifier[all_bel_edges] ))) keyword[elif] identifier[json_flag] : identifier[fout] . identifier[write] ( literal[string] . identifier[format] ( identifier[json] . identifier[dumps] ( identifier[all_bel_edges] ))) keyword[elif] identifier[jgf_flag] : identifier[bnf] . identifier[edges_to_jgf] ( identifier[output_fn] , identifier[all_bel_edges] ) keyword[finally] : keyword[if] identifier[fout] : identifier[fout] . identifier[close] ()
def pipeline(ctx, input_fn, db_save, db_delete, output_fn, rules, species, namespace_targets, version, api, config_fn): """BEL Pipeline - BEL Nanopubs into BEL Edges This will process BEL Nanopubs into BEL Edges by validating, orthologizing (if requested), canonicalizing, and then computing the BEL Edges based on the given rule_set. \x08 input_fn: If input fn has *.gz, will read as a gzip file If input fn has *.jsonl*, will parsed as a JSONLines file IF input fn has *.json*, will be parsed as a JSON file If input fn has *.yaml* or *.yml*, will be parsed as a YAML file \x08 output_fn: If output fn has *.gz, will written as a gzip file If output fn has *.jsonl*, will written as a JSONLines file IF output fn has *.json*, will be written as a JSON file If output fn has *.yaml* or *.yml*, will be written as a YAML file If output fn has *.jgf, will be written as JSON Graph Formatted file """ if config_fn: config = bel.db.Config.merge_config(ctx.config, override_config_fn=config_fn) # depends on [control=['if'], data=[]] else: config = ctx.config # Configuration - will return the first truthy result in list else the default option if namespace_targets: namespace_targets = json.loads(namespace_targets) # depends on [control=['if'], data=[]] if rules: rules = rules.replace(' ', '').split(',') # depends on [control=['if'], data=[]] namespace_targets = utils.first_true([namespace_targets, config['bel']['lang'].get('canonical')], None) rules = utils.first_true([rules, config['bel']['nanopub'].get('pipeline_edge_rules', False)], False) api = utils.first_true([api, config['bel_api']['servers'].get('api_url', None)], None) version = utils.first_true([version, config['bel']['lang'].get('default_bel_version', None)], None) n = bnn.Nanopub() try: (json_flag, jsonl_flag, yaml_flag, jgf_flag) = (False, False, False, False) all_bel_edges = [] fout = None if db_save or db_delete: if db_delete: arango_client = bel.db.arangodb.get_client() bel.db.arangodb.delete_database(arango_client, 'edgestore') # depends on [control=['if'], data=[]] else: arango_client = bel.db.arangodb.get_client() edgestore_handle = bel.db.arangodb.get_edgestore_handle(arango_client) # depends on [control=['if'], data=[]] elif re.search('ya?ml', output_fn): yaml_flag = True # depends on [control=['if'], data=[]] elif 'jsonl' in output_fn: jsonl_flag = True # depends on [control=['if'], data=[]] elif 'json' in output_fn: json_flag = True # depends on [control=['if'], data=[]] elif 'jgf' in output_fn: jgf_flag = True # depends on [control=['if'], data=[]] if db_save: pass # depends on [control=['if'], data=[]] elif 'gz' in output_fn: fout = gzip.open(output_fn, 'wt') # depends on [control=['if'], data=['output_fn']] else: fout = open(output_fn, 'wt') nanopub_cnt = 0 with timy.Timer() as timer: for np in bnf.read_nanopubs(input_fn): # print('Nanopub:\n', json.dumps(np, indent=4)) nanopub_cnt += 1 if nanopub_cnt % 100 == 0: timer.track(f'{nanopub_cnt} Nanopubs processed into Edges') # depends on [control=['if'], data=[]] bel_edges = n.bel_edges(np, namespace_targets=namespace_targets, orthologize_target=species, rules=rules) if db_save: bel.edge.edges.load_edges_into_db(edgestore_handle, edges=bel_edges) # depends on [control=['if'], data=[]] elif jsonl_flag: fout.write('{}\n'.format(json.dumps(bel_edges))) # depends on [control=['if'], data=[]] else: all_bel_edges.extend(bel_edges) # depends on [control=['for'], data=['np']] # depends on [control=['with'], data=['timer']] if db_save: pass # depends on [control=['if'], data=[]] elif yaml_flag: fout.write('{}\n'.format(yaml.dumps(all_bel_edges))) # depends on [control=['if'], data=[]] elif json_flag: fout.write('{}\n'.format(json.dumps(all_bel_edges))) # depends on [control=['if'], data=[]] elif jgf_flag: bnf.edges_to_jgf(output_fn, all_bel_edges) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] finally: if fout: fout.close() # depends on [control=['if'], data=[]]
def has_changed(self, field_name: str = None) -> bool: """ Check if a field has changed since the model was instantiated. """ changed = self._diff_with_initial.keys() if self._meta.get_field(field_name).get_internal_type() == 'ForeignKey': if not field_name.endswith('_id'): field_name = field_name+'_id' if field_name in changed: return True return False
def function[has_changed, parameter[self, field_name]]: constant[ Check if a field has changed since the model was instantiated. ] variable[changed] assign[=] call[name[self]._diff_with_initial.keys, parameter[]] if compare[call[call[name[self]._meta.get_field, parameter[name[field_name]]].get_internal_type, parameter[]] equal[==] constant[ForeignKey]] begin[:] if <ast.UnaryOp object at 0x7da1b1117c10> begin[:] variable[field_name] assign[=] binary_operation[name[field_name] + constant[_id]] if compare[name[field_name] in name[changed]] begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[has_changed] ( identifier[self] , identifier[field_name] : identifier[str] = keyword[None] )-> identifier[bool] : literal[string] identifier[changed] = identifier[self] . identifier[_diff_with_initial] . identifier[keys] () keyword[if] identifier[self] . identifier[_meta] . identifier[get_field] ( identifier[field_name] ). identifier[get_internal_type] ()== literal[string] : keyword[if] keyword[not] identifier[field_name] . identifier[endswith] ( literal[string] ): identifier[field_name] = identifier[field_name] + literal[string] keyword[if] identifier[field_name] keyword[in] identifier[changed] : keyword[return] keyword[True] keyword[return] keyword[False]
def has_changed(self, field_name: str=None) -> bool: """ Check if a field has changed since the model was instantiated. """ changed = self._diff_with_initial.keys() if self._meta.get_field(field_name).get_internal_type() == 'ForeignKey': if not field_name.endswith('_id'): field_name = field_name + '_id' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if field_name in changed: return True # depends on [control=['if'], data=[]] return False
def connection_lost(self, exc): """ Called when the connection to the remote worker is broken. Closes the worker. """ logger.debug("worker connection lost") self._worker.close() self._workers.remove(self._worker)
def function[connection_lost, parameter[self, exc]]: constant[ Called when the connection to the remote worker is broken. Closes the worker. ] call[name[logger].debug, parameter[constant[worker connection lost]]] call[name[self]._worker.close, parameter[]] call[name[self]._workers.remove, parameter[name[self]._worker]]
keyword[def] identifier[connection_lost] ( identifier[self] , identifier[exc] ): literal[string] identifier[logger] . identifier[debug] ( literal[string] ) identifier[self] . identifier[_worker] . identifier[close] () identifier[self] . identifier[_workers] . identifier[remove] ( identifier[self] . identifier[_worker] )
def connection_lost(self, exc): """ Called when the connection to the remote worker is broken. Closes the worker. """ logger.debug('worker connection lost') self._worker.close() self._workers.remove(self._worker)
def get_alert(self, alert_id): """ :param alert_id: Alert identifier :return: TheHive Alert :rtype: json """ req = self.url + "/api/alert/{}".format(alert_id) try: return requests.get(req, proxies=self.proxies, auth=self.auth, verify=self.cert) except requests.exceptions.RequestException as e: raise AlertException("Alert fetch error: {}".format(e))
def function[get_alert, parameter[self, alert_id]]: constant[ :param alert_id: Alert identifier :return: TheHive Alert :rtype: json ] variable[req] assign[=] binary_operation[name[self].url + call[constant[/api/alert/{}].format, parameter[name[alert_id]]]] <ast.Try object at 0x7da20c76c1c0>
keyword[def] identifier[get_alert] ( identifier[self] , identifier[alert_id] ): literal[string] identifier[req] = identifier[self] . identifier[url] + literal[string] . identifier[format] ( identifier[alert_id] ) keyword[try] : keyword[return] identifier[requests] . identifier[get] ( identifier[req] , identifier[proxies] = identifier[self] . identifier[proxies] , identifier[auth] = identifier[self] . identifier[auth] , identifier[verify] = identifier[self] . identifier[cert] ) keyword[except] identifier[requests] . identifier[exceptions] . identifier[RequestException] keyword[as] identifier[e] : keyword[raise] identifier[AlertException] ( literal[string] . identifier[format] ( identifier[e] ))
def get_alert(self, alert_id): """ :param alert_id: Alert identifier :return: TheHive Alert :rtype: json """ req = self.url + '/api/alert/{}'.format(alert_id) try: return requests.get(req, proxies=self.proxies, auth=self.auth, verify=self.cert) # depends on [control=['try'], data=[]] except requests.exceptions.RequestException as e: raise AlertException('Alert fetch error: {}'.format(e)) # depends on [control=['except'], data=['e']]
def _mysqld_process_checkpoint(): '''this helper method checks if mysql server is available in the sys if not fires up one ''' try: subprocess.check_output("pgrep mysqld", shell=True) except Exception: logger.warning( 'Your mysql server is offline, fake2db will try to launch it now!', extra=extra_information) # close_fds = True argument is the flag that is responsible # for Popen to launch the process completely independent subprocess.Popen("mysqld", close_fds=True, shell=True) time.sleep(3)
def function[_mysqld_process_checkpoint, parameter[]]: constant[this helper method checks if mysql server is available in the sys if not fires up one ] <ast.Try object at 0x7da1b08bc3a0>
keyword[def] identifier[_mysqld_process_checkpoint] (): literal[string] keyword[try] : identifier[subprocess] . identifier[check_output] ( literal[string] , identifier[shell] = keyword[True] ) keyword[except] identifier[Exception] : identifier[logger] . identifier[warning] ( literal[string] , identifier[extra] = identifier[extra_information] ) identifier[subprocess] . identifier[Popen] ( literal[string] , identifier[close_fds] = keyword[True] , identifier[shell] = keyword[True] ) identifier[time] . identifier[sleep] ( literal[int] )
def _mysqld_process_checkpoint(): """this helper method checks if mysql server is available in the sys if not fires up one """ try: subprocess.check_output('pgrep mysqld', shell=True) # depends on [control=['try'], data=[]] except Exception: logger.warning('Your mysql server is offline, fake2db will try to launch it now!', extra=extra_information) # close_fds = True argument is the flag that is responsible # for Popen to launch the process completely independent subprocess.Popen('mysqld', close_fds=True, shell=True) time.sleep(3) # depends on [control=['except'], data=[]]
def _get_rule_source(self, rule): """Gets the variable part of the source code for a rule.""" p = len(self.input_source) + rule.position source = self.input_source[p:p + rule.consumed].rstrip() return self._indent(source, depth=self.indent + " ", skip_first_line=True)
def function[_get_rule_source, parameter[self, rule]]: constant[Gets the variable part of the source code for a rule.] variable[p] assign[=] binary_operation[call[name[len], parameter[name[self].input_source]] + name[rule].position] variable[source] assign[=] call[call[name[self].input_source][<ast.Slice object at 0x7da1b013dcc0>].rstrip, parameter[]] return[call[name[self]._indent, parameter[name[source]]]]
keyword[def] identifier[_get_rule_source] ( identifier[self] , identifier[rule] ): literal[string] identifier[p] = identifier[len] ( identifier[self] . identifier[input_source] )+ identifier[rule] . identifier[position] identifier[source] = identifier[self] . identifier[input_source] [ identifier[p] : identifier[p] + identifier[rule] . identifier[consumed] ]. identifier[rstrip] () keyword[return] identifier[self] . identifier[_indent] ( identifier[source] , identifier[depth] = identifier[self] . identifier[indent] + literal[string] , identifier[skip_first_line] = keyword[True] )
def _get_rule_source(self, rule): """Gets the variable part of the source code for a rule.""" p = len(self.input_source) + rule.position source = self.input_source[p:p + rule.consumed].rstrip() return self._indent(source, depth=self.indent + ' ', skip_first_line=True)
def partial_tempering(topfile="processed.top", outfile="scaled.top", banned_lines='', scale_lipids=1.0, scale_protein=1.0): """Set up topology for partial tempering (REST2) replica exchange. .. versionchanged:: 0.7.0 Use keyword arguments instead of an `args` Namespace object. """ banned_lines = map(int, banned_lines.split()) top = TOP(topfile) groups = [("_", float(scale_protein)), ("=", float(scale_lipids))] # # CMAPTYPES # cmaptypes = [] for ct in top.cmaptypes: cmaptypes.append(ct) for gr, scale in groups: ctA = copy.deepcopy(ct) ctA.atype1 += gr ctA.atype2 += gr ctA.atype3 += gr ctA.atype4 += gr ctA.atype8 += gr ctA.gromacs['param'] = [ v*scale for v in ct.gromacs['param'] ] cmaptypes.append(ctA) logger.debug("cmaptypes was {0}, is {1}".format(len(top.cmaptypes), len(cmaptypes))) top.cmaptypes = cmaptypes # # ATOMTYPES # atomtypes = [] for at in top.atomtypes: atomtypes.append(at) for gr, scale in groups: atA = copy.deepcopy(at) atA.atnum = atA.atype atA.atype += gr atA.gromacs['param']['lje'] *= scale atomtypes.append(atA) top.atomtypes = atomtypes # # PAIRTYPES # pairtypes = [] for pt in top.pairtypes: pairtypes.append(pt) for gr, scale in groups: ptA = copy.deepcopy(pt) ptA.atype1 += gr ptA.atype2 += gr ptA.gromacs['param']['lje14'] *= scale pairtypes.append(ptA) top.pairtypes = pairtypes # # BONDTYPES # bondtypes = [] for bt in top.bondtypes: bondtypes.append(bt) for gr, scale in groups: btA = copy.deepcopy(bt) btA.atype1 += gr btA.atype2 += gr bondtypes.append(btA) top.bondtypes = bondtypes # # ANGLETYPES # angletypes = [] for at in top.angletypes: angletypes.append(at) for gr, scale in groups: atA = copy.deepcopy(at) atA.atype1 += gr atA.atype2 += gr atA.atype3 += gr angletypes.append(atA) top.angletypes = angletypes # # Build dihedral dictionary # dihedraltypes = {} for dt in top.dihedraltypes: dt.disabled = True dt.comment = "; type={0!s}-{1!s}-{2!s}-{3!s}-9\n; LINE({4:d}) ".format( dt.atype1, dt.atype2, dt.atype3, dt.atype4, dt.line) dt.comment = dt.comment.replace("_","") #if "X-CTL2-CTL2-X-9" in dt.comment: print dt name = "{0}-{1}-{2}-{3}-{4}".format(dt.atype1, dt.atype2, dt.atype3, dt.atype4, dt.gromacs['func']) if not name in dihedraltypes: dihedraltypes[name] = [] dihedraltypes[name].append(dt) logger.debug("Build dihedraltypes dictionary with {0} entries".format(len(dihedraltypes))) # # Build improper dictionary # impropertypes = {} for it in top.impropertypes: it.disabled = True it.comment = "; LINE({0:d}) ".format(it.line) name = "{0}-{1}-{2}-{3}-{4}".format( it.atype1, it.atype2, it.atype3, it.atype4, it.gromacs['func']) if not name in impropertypes: impropertypes[name] = [] impropertypes[name].append(it) logger.debug("Build impropertypes dictionary with {0} entries".format(len(impropertypes))) for molname_mol in top.dict_molname_mol: if not 'Protein' in molname_mol: continue mol = top.dict_molname_mol[molname_mol] for at in mol.atoms: at.charge *= math.sqrt(scale_protein) mol = scale_dihedrals(mol, dihedraltypes, scale_protein, banned_lines) mol = scale_impropers(mol, impropertypes, 1.0, banned_lines) top.write(outfile)
def function[partial_tempering, parameter[topfile, outfile, banned_lines, scale_lipids, scale_protein]]: constant[Set up topology for partial tempering (REST2) replica exchange. .. versionchanged:: 0.7.0 Use keyword arguments instead of an `args` Namespace object. ] variable[banned_lines] assign[=] call[name[map], parameter[name[int], call[name[banned_lines].split, parameter[]]]] variable[top] assign[=] call[name[TOP], parameter[name[topfile]]] variable[groups] assign[=] list[[<ast.Tuple object at 0x7da20c6abd30>, <ast.Tuple object at 0x7da20c6a8100>]] variable[cmaptypes] assign[=] list[[]] for taget[name[ct]] in starred[name[top].cmaptypes] begin[:] call[name[cmaptypes].append, parameter[name[ct]]] for taget[tuple[[<ast.Name object at 0x7da20c6ab820>, <ast.Name object at 0x7da20c6ab7f0>]]] in starred[name[groups]] begin[:] variable[ctA] assign[=] call[name[copy].deepcopy, parameter[name[ct]]] <ast.AugAssign object at 0x7da20c6a8a00> <ast.AugAssign object at 0x7da20c6abbb0> <ast.AugAssign object at 0x7da20c6a8e20> <ast.AugAssign object at 0x7da20c6aa4d0> <ast.AugAssign object at 0x7da20c6ab670> call[name[ctA].gromacs][constant[param]] assign[=] <ast.ListComp object at 0x7da20c6ab460> call[name[cmaptypes].append, parameter[name[ctA]]] call[name[logger].debug, parameter[call[constant[cmaptypes was {0}, is {1}].format, parameter[call[name[len], parameter[name[top].cmaptypes]], call[name[len], parameter[name[cmaptypes]]]]]]] name[top].cmaptypes assign[=] name[cmaptypes] variable[atomtypes] assign[=] list[[]] for taget[name[at]] in starred[name[top].atomtypes] begin[:] call[name[atomtypes].append, parameter[name[at]]] for taget[tuple[[<ast.Name object at 0x7da20c6a88b0>, <ast.Name object at 0x7da20c6a93c0>]]] in starred[name[groups]] begin[:] variable[atA] assign[=] call[name[copy].deepcopy, parameter[name[at]]] name[atA].atnum assign[=] name[atA].atype <ast.AugAssign object at 0x7da20c6abc70> <ast.AugAssign object at 0x7da20c6aa8c0> call[name[atomtypes].append, parameter[name[atA]]] name[top].atomtypes assign[=] name[atomtypes] variable[pairtypes] assign[=] list[[]] for taget[name[pt]] in starred[name[top].pairtypes] begin[:] call[name[pairtypes].append, parameter[name[pt]]] for taget[tuple[[<ast.Name object at 0x7da20c6aa080>, <ast.Name object at 0x7da20c6aab90>]]] in starred[name[groups]] begin[:] variable[ptA] assign[=] call[name[copy].deepcopy, parameter[name[pt]]] <ast.AugAssign object at 0x7da20c6aad70> <ast.AugAssign object at 0x7da20c6a9f00> <ast.AugAssign object at 0x7da20c6a92a0> call[name[pairtypes].append, parameter[name[ptA]]] name[top].pairtypes assign[=] name[pairtypes] variable[bondtypes] assign[=] list[[]] for taget[name[bt]] in starred[name[top].bondtypes] begin[:] call[name[bondtypes].append, parameter[name[bt]]] for taget[tuple[[<ast.Name object at 0x7da20c6c7430>, <ast.Name object at 0x7da20c6c7820>]]] in starred[name[groups]] begin[:] variable[btA] assign[=] call[name[copy].deepcopy, parameter[name[bt]]] <ast.AugAssign object at 0x7da20c6c5210> <ast.AugAssign object at 0x7da20c6c4910> call[name[bondtypes].append, parameter[name[btA]]] name[top].bondtypes assign[=] name[bondtypes] variable[angletypes] assign[=] list[[]] for taget[name[at]] in starred[name[top].angletypes] begin[:] call[name[angletypes].append, parameter[name[at]]] for taget[tuple[[<ast.Name object at 0x7da20c6c4ee0>, <ast.Name object at 0x7da20c6c6ec0>]]] in starred[name[groups]] begin[:] variable[atA] assign[=] call[name[copy].deepcopy, parameter[name[at]]] <ast.AugAssign object at 0x7da20c6c63b0> <ast.AugAssign object at 0x7da20c6c5510> <ast.AugAssign object at 0x7da20c6c7cd0> call[name[angletypes].append, parameter[name[atA]]] name[top].angletypes assign[=] name[angletypes] variable[dihedraltypes] assign[=] dictionary[[], []] for taget[name[dt]] in starred[name[top].dihedraltypes] begin[:] name[dt].disabled assign[=] constant[True] name[dt].comment assign[=] call[constant[; type={0!s}-{1!s}-{2!s}-{3!s}-9 ; LINE({4:d}) ].format, parameter[name[dt].atype1, name[dt].atype2, name[dt].atype3, name[dt].atype4, name[dt].line]] name[dt].comment assign[=] call[name[dt].comment.replace, parameter[constant[_], constant[]]] variable[name] assign[=] call[constant[{0}-{1}-{2}-{3}-{4}].format, parameter[name[dt].atype1, name[dt].atype2, name[dt].atype3, name[dt].atype4, call[name[dt].gromacs][constant[func]]]] if <ast.UnaryOp object at 0x7da20c6c5480> begin[:] call[name[dihedraltypes]][name[name]] assign[=] list[[]] call[call[name[dihedraltypes]][name[name]].append, parameter[name[dt]]] call[name[logger].debug, parameter[call[constant[Build dihedraltypes dictionary with {0} entries].format, parameter[call[name[len], parameter[name[dihedraltypes]]]]]]] variable[impropertypes] assign[=] dictionary[[], []] for taget[name[it]] in starred[name[top].impropertypes] begin[:] name[it].disabled assign[=] constant[True] name[it].comment assign[=] call[constant[; LINE({0:d}) ].format, parameter[name[it].line]] variable[name] assign[=] call[constant[{0}-{1}-{2}-{3}-{4}].format, parameter[name[it].atype1, name[it].atype2, name[it].atype3, name[it].atype4, call[name[it].gromacs][constant[func]]]] if <ast.UnaryOp object at 0x7da207f9b880> begin[:] call[name[impropertypes]][name[name]] assign[=] list[[]] call[call[name[impropertypes]][name[name]].append, parameter[name[it]]] call[name[logger].debug, parameter[call[constant[Build impropertypes dictionary with {0} entries].format, parameter[call[name[len], parameter[name[impropertypes]]]]]]] for taget[name[molname_mol]] in starred[name[top].dict_molname_mol] begin[:] if <ast.UnaryOp object at 0x7da207f9b4c0> begin[:] continue variable[mol] assign[=] call[name[top].dict_molname_mol][name[molname_mol]] for taget[name[at]] in starred[name[mol].atoms] begin[:] <ast.AugAssign object at 0x7da207f9a470> variable[mol] assign[=] call[name[scale_dihedrals], parameter[name[mol], name[dihedraltypes], name[scale_protein], name[banned_lines]]] variable[mol] assign[=] call[name[scale_impropers], parameter[name[mol], name[impropertypes], constant[1.0], name[banned_lines]]] call[name[top].write, parameter[name[outfile]]]
keyword[def] identifier[partial_tempering] ( identifier[topfile] = literal[string] , identifier[outfile] = literal[string] , identifier[banned_lines] = literal[string] , identifier[scale_lipids] = literal[int] , identifier[scale_protein] = literal[int] ): literal[string] identifier[banned_lines] = identifier[map] ( identifier[int] , identifier[banned_lines] . identifier[split] ()) identifier[top] = identifier[TOP] ( identifier[topfile] ) identifier[groups] =[( literal[string] , identifier[float] ( identifier[scale_protein] )),( literal[string] , identifier[float] ( identifier[scale_lipids] ))] identifier[cmaptypes] =[] keyword[for] identifier[ct] keyword[in] identifier[top] . identifier[cmaptypes] : identifier[cmaptypes] . identifier[append] ( identifier[ct] ) keyword[for] identifier[gr] , identifier[scale] keyword[in] identifier[groups] : identifier[ctA] = identifier[copy] . identifier[deepcopy] ( identifier[ct] ) identifier[ctA] . identifier[atype1] += identifier[gr] identifier[ctA] . identifier[atype2] += identifier[gr] identifier[ctA] . identifier[atype3] += identifier[gr] identifier[ctA] . identifier[atype4] += identifier[gr] identifier[ctA] . identifier[atype8] += identifier[gr] identifier[ctA] . identifier[gromacs] [ literal[string] ]=[ identifier[v] * identifier[scale] keyword[for] identifier[v] keyword[in] identifier[ct] . identifier[gromacs] [ literal[string] ]] identifier[cmaptypes] . identifier[append] ( identifier[ctA] ) identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[len] ( identifier[top] . identifier[cmaptypes] ), identifier[len] ( identifier[cmaptypes] ))) identifier[top] . identifier[cmaptypes] = identifier[cmaptypes] identifier[atomtypes] =[] keyword[for] identifier[at] keyword[in] identifier[top] . identifier[atomtypes] : identifier[atomtypes] . identifier[append] ( identifier[at] ) keyword[for] identifier[gr] , identifier[scale] keyword[in] identifier[groups] : identifier[atA] = identifier[copy] . identifier[deepcopy] ( identifier[at] ) identifier[atA] . identifier[atnum] = identifier[atA] . identifier[atype] identifier[atA] . identifier[atype] += identifier[gr] identifier[atA] . identifier[gromacs] [ literal[string] ][ literal[string] ]*= identifier[scale] identifier[atomtypes] . identifier[append] ( identifier[atA] ) identifier[top] . identifier[atomtypes] = identifier[atomtypes] identifier[pairtypes] =[] keyword[for] identifier[pt] keyword[in] identifier[top] . identifier[pairtypes] : identifier[pairtypes] . identifier[append] ( identifier[pt] ) keyword[for] identifier[gr] , identifier[scale] keyword[in] identifier[groups] : identifier[ptA] = identifier[copy] . identifier[deepcopy] ( identifier[pt] ) identifier[ptA] . identifier[atype1] += identifier[gr] identifier[ptA] . identifier[atype2] += identifier[gr] identifier[ptA] . identifier[gromacs] [ literal[string] ][ literal[string] ]*= identifier[scale] identifier[pairtypes] . identifier[append] ( identifier[ptA] ) identifier[top] . identifier[pairtypes] = identifier[pairtypes] identifier[bondtypes] =[] keyword[for] identifier[bt] keyword[in] identifier[top] . identifier[bondtypes] : identifier[bondtypes] . identifier[append] ( identifier[bt] ) keyword[for] identifier[gr] , identifier[scale] keyword[in] identifier[groups] : identifier[btA] = identifier[copy] . identifier[deepcopy] ( identifier[bt] ) identifier[btA] . identifier[atype1] += identifier[gr] identifier[btA] . identifier[atype2] += identifier[gr] identifier[bondtypes] . identifier[append] ( identifier[btA] ) identifier[top] . identifier[bondtypes] = identifier[bondtypes] identifier[angletypes] =[] keyword[for] identifier[at] keyword[in] identifier[top] . identifier[angletypes] : identifier[angletypes] . identifier[append] ( identifier[at] ) keyword[for] identifier[gr] , identifier[scale] keyword[in] identifier[groups] : identifier[atA] = identifier[copy] . identifier[deepcopy] ( identifier[at] ) identifier[atA] . identifier[atype1] += identifier[gr] identifier[atA] . identifier[atype2] += identifier[gr] identifier[atA] . identifier[atype3] += identifier[gr] identifier[angletypes] . identifier[append] ( identifier[atA] ) identifier[top] . identifier[angletypes] = identifier[angletypes] identifier[dihedraltypes] ={} keyword[for] identifier[dt] keyword[in] identifier[top] . identifier[dihedraltypes] : identifier[dt] . identifier[disabled] = keyword[True] identifier[dt] . identifier[comment] = literal[string] . identifier[format] ( identifier[dt] . identifier[atype1] , identifier[dt] . identifier[atype2] , identifier[dt] . identifier[atype3] , identifier[dt] . identifier[atype4] , identifier[dt] . identifier[line] ) identifier[dt] . identifier[comment] = identifier[dt] . identifier[comment] . identifier[replace] ( literal[string] , literal[string] ) identifier[name] = literal[string] . identifier[format] ( identifier[dt] . identifier[atype1] , identifier[dt] . identifier[atype2] , identifier[dt] . identifier[atype3] , identifier[dt] . identifier[atype4] , identifier[dt] . identifier[gromacs] [ literal[string] ]) keyword[if] keyword[not] identifier[name] keyword[in] identifier[dihedraltypes] : identifier[dihedraltypes] [ identifier[name] ]=[] identifier[dihedraltypes] [ identifier[name] ]. identifier[append] ( identifier[dt] ) identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[len] ( identifier[dihedraltypes] ))) identifier[impropertypes] ={} keyword[for] identifier[it] keyword[in] identifier[top] . identifier[impropertypes] : identifier[it] . identifier[disabled] = keyword[True] identifier[it] . identifier[comment] = literal[string] . identifier[format] ( identifier[it] . identifier[line] ) identifier[name] = literal[string] . identifier[format] ( identifier[it] . identifier[atype1] , identifier[it] . identifier[atype2] , identifier[it] . identifier[atype3] , identifier[it] . identifier[atype4] , identifier[it] . identifier[gromacs] [ literal[string] ]) keyword[if] keyword[not] identifier[name] keyword[in] identifier[impropertypes] : identifier[impropertypes] [ identifier[name] ]=[] identifier[impropertypes] [ identifier[name] ]. identifier[append] ( identifier[it] ) identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[len] ( identifier[impropertypes] ))) keyword[for] identifier[molname_mol] keyword[in] identifier[top] . identifier[dict_molname_mol] : keyword[if] keyword[not] literal[string] keyword[in] identifier[molname_mol] : keyword[continue] identifier[mol] = identifier[top] . identifier[dict_molname_mol] [ identifier[molname_mol] ] keyword[for] identifier[at] keyword[in] identifier[mol] . identifier[atoms] : identifier[at] . identifier[charge] *= identifier[math] . identifier[sqrt] ( identifier[scale_protein] ) identifier[mol] = identifier[scale_dihedrals] ( identifier[mol] , identifier[dihedraltypes] , identifier[scale_protein] , identifier[banned_lines] ) identifier[mol] = identifier[scale_impropers] ( identifier[mol] , identifier[impropertypes] , literal[int] , identifier[banned_lines] ) identifier[top] . identifier[write] ( identifier[outfile] )
def partial_tempering(topfile='processed.top', outfile='scaled.top', banned_lines='', scale_lipids=1.0, scale_protein=1.0): """Set up topology for partial tempering (REST2) replica exchange. .. versionchanged:: 0.7.0 Use keyword arguments instead of an `args` Namespace object. """ banned_lines = map(int, banned_lines.split()) top = TOP(topfile) groups = [('_', float(scale_protein)), ('=', float(scale_lipids))] # # CMAPTYPES # cmaptypes = [] for ct in top.cmaptypes: cmaptypes.append(ct) for (gr, scale) in groups: ctA = copy.deepcopy(ct) ctA.atype1 += gr ctA.atype2 += gr ctA.atype3 += gr ctA.atype4 += gr ctA.atype8 += gr ctA.gromacs['param'] = [v * scale for v in ct.gromacs['param']] cmaptypes.append(ctA) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['ct']] logger.debug('cmaptypes was {0}, is {1}'.format(len(top.cmaptypes), len(cmaptypes))) top.cmaptypes = cmaptypes # # ATOMTYPES # atomtypes = [] for at in top.atomtypes: atomtypes.append(at) for (gr, scale) in groups: atA = copy.deepcopy(at) atA.atnum = atA.atype atA.atype += gr atA.gromacs['param']['lje'] *= scale atomtypes.append(atA) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['at']] top.atomtypes = atomtypes # # PAIRTYPES # pairtypes = [] for pt in top.pairtypes: pairtypes.append(pt) for (gr, scale) in groups: ptA = copy.deepcopy(pt) ptA.atype1 += gr ptA.atype2 += gr ptA.gromacs['param']['lje14'] *= scale pairtypes.append(ptA) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['pt']] top.pairtypes = pairtypes # # BONDTYPES # bondtypes = [] for bt in top.bondtypes: bondtypes.append(bt) for (gr, scale) in groups: btA = copy.deepcopy(bt) btA.atype1 += gr btA.atype2 += gr bondtypes.append(btA) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['bt']] top.bondtypes = bondtypes # # ANGLETYPES # angletypes = [] for at in top.angletypes: angletypes.append(at) for (gr, scale) in groups: atA = copy.deepcopy(at) atA.atype1 += gr atA.atype2 += gr atA.atype3 += gr angletypes.append(atA) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['at']] top.angletypes = angletypes # # Build dihedral dictionary # dihedraltypes = {} for dt in top.dihedraltypes: dt.disabled = True dt.comment = '; type={0!s}-{1!s}-{2!s}-{3!s}-9\n; LINE({4:d}) '.format(dt.atype1, dt.atype2, dt.atype3, dt.atype4, dt.line) dt.comment = dt.comment.replace('_', '') #if "X-CTL2-CTL2-X-9" in dt.comment: print dt name = '{0}-{1}-{2}-{3}-{4}'.format(dt.atype1, dt.atype2, dt.atype3, dt.atype4, dt.gromacs['func']) if not name in dihedraltypes: dihedraltypes[name] = [] # depends on [control=['if'], data=[]] dihedraltypes[name].append(dt) # depends on [control=['for'], data=['dt']] logger.debug('Build dihedraltypes dictionary with {0} entries'.format(len(dihedraltypes))) # # Build improper dictionary # impropertypes = {} for it in top.impropertypes: it.disabled = True it.comment = '; LINE({0:d}) '.format(it.line) name = '{0}-{1}-{2}-{3}-{4}'.format(it.atype1, it.atype2, it.atype3, it.atype4, it.gromacs['func']) if not name in impropertypes: impropertypes[name] = [] # depends on [control=['if'], data=[]] impropertypes[name].append(it) # depends on [control=['for'], data=['it']] logger.debug('Build impropertypes dictionary with {0} entries'.format(len(impropertypes))) for molname_mol in top.dict_molname_mol: if not 'Protein' in molname_mol: continue # depends on [control=['if'], data=[]] mol = top.dict_molname_mol[molname_mol] for at in mol.atoms: at.charge *= math.sqrt(scale_protein) # depends on [control=['for'], data=['at']] mol = scale_dihedrals(mol, dihedraltypes, scale_protein, banned_lines) mol = scale_impropers(mol, impropertypes, 1.0, banned_lines) # depends on [control=['for'], data=['molname_mol']] top.write(outfile)
def button_with_label(self, description, assistants=None): """ Function creates a button with lave. If assistant is specified then text is aligned """ btn = self.create_button() label = self.create_label(description) if assistants is not None: h_box = self.create_box(orientation=Gtk.Orientation.VERTICAL) h_box.pack_start(label, False, False, 0) label_ass = self.create_label( assistants, justify=Gtk.Justification.LEFT ) label_ass.set_alignment(0, 0) h_box.pack_start(label_ass, False, False, 12) btn.add(h_box) else: btn.add(label) return btn
def function[button_with_label, parameter[self, description, assistants]]: constant[ Function creates a button with lave. If assistant is specified then text is aligned ] variable[btn] assign[=] call[name[self].create_button, parameter[]] variable[label] assign[=] call[name[self].create_label, parameter[name[description]]] if compare[name[assistants] is_not constant[None]] begin[:] variable[h_box] assign[=] call[name[self].create_box, parameter[]] call[name[h_box].pack_start, parameter[name[label], constant[False], constant[False], constant[0]]] variable[label_ass] assign[=] call[name[self].create_label, parameter[name[assistants]]] call[name[label_ass].set_alignment, parameter[constant[0], constant[0]]] call[name[h_box].pack_start, parameter[name[label_ass], constant[False], constant[False], constant[12]]] call[name[btn].add, parameter[name[h_box]]] return[name[btn]]
keyword[def] identifier[button_with_label] ( identifier[self] , identifier[description] , identifier[assistants] = keyword[None] ): literal[string] identifier[btn] = identifier[self] . identifier[create_button] () identifier[label] = identifier[self] . identifier[create_label] ( identifier[description] ) keyword[if] identifier[assistants] keyword[is] keyword[not] keyword[None] : identifier[h_box] = identifier[self] . identifier[create_box] ( identifier[orientation] = identifier[Gtk] . identifier[Orientation] . identifier[VERTICAL] ) identifier[h_box] . identifier[pack_start] ( identifier[label] , keyword[False] , keyword[False] , literal[int] ) identifier[label_ass] = identifier[self] . identifier[create_label] ( identifier[assistants] , identifier[justify] = identifier[Gtk] . identifier[Justification] . identifier[LEFT] ) identifier[label_ass] . identifier[set_alignment] ( literal[int] , literal[int] ) identifier[h_box] . identifier[pack_start] ( identifier[label_ass] , keyword[False] , keyword[False] , literal[int] ) identifier[btn] . identifier[add] ( identifier[h_box] ) keyword[else] : identifier[btn] . identifier[add] ( identifier[label] ) keyword[return] identifier[btn]
def button_with_label(self, description, assistants=None): """ Function creates a button with lave. If assistant is specified then text is aligned """ btn = self.create_button() label = self.create_label(description) if assistants is not None: h_box = self.create_box(orientation=Gtk.Orientation.VERTICAL) h_box.pack_start(label, False, False, 0) label_ass = self.create_label(assistants, justify=Gtk.Justification.LEFT) label_ass.set_alignment(0, 0) h_box.pack_start(label_ass, False, False, 12) btn.add(h_box) # depends on [control=['if'], data=['assistants']] else: btn.add(label) return btn
def handle_batch_requests(request, *args, **kwargs): ''' A view function to handle the overall processing of batch requests. ''' batch_start_time = datetime.now() try: # Get the Individual WSGI requests. wsgi_requests = get_wsgi_requests(request) except BadBatchRequest as brx: return HttpResponseBadRequest(content=brx.message) # Fire these WSGI requests, and collect the response for the same. response = execute_requests(wsgi_requests) # Evrything's done, return the response. resp = HttpResponse( content=json.dumps(response), content_type="application/json") if _settings.ADD_DURATION_HEADER: resp.__setitem__(_settings.DURATION_HEADER_NAME, str((datetime.now() - batch_start_time).seconds)) return resp
def function[handle_batch_requests, parameter[request]]: constant[ A view function to handle the overall processing of batch requests. ] variable[batch_start_time] assign[=] call[name[datetime].now, parameter[]] <ast.Try object at 0x7da1b0eb8340> variable[response] assign[=] call[name[execute_requests], parameter[name[wsgi_requests]]] variable[resp] assign[=] call[name[HttpResponse], parameter[]] if name[_settings].ADD_DURATION_HEADER begin[:] call[name[resp].__setitem__, parameter[name[_settings].DURATION_HEADER_NAME, call[name[str], parameter[binary_operation[call[name[datetime].now, parameter[]] - name[batch_start_time]].seconds]]]] return[name[resp]]
keyword[def] identifier[handle_batch_requests] ( identifier[request] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[batch_start_time] = identifier[datetime] . identifier[now] () keyword[try] : identifier[wsgi_requests] = identifier[get_wsgi_requests] ( identifier[request] ) keyword[except] identifier[BadBatchRequest] keyword[as] identifier[brx] : keyword[return] identifier[HttpResponseBadRequest] ( identifier[content] = identifier[brx] . identifier[message] ) identifier[response] = identifier[execute_requests] ( identifier[wsgi_requests] ) identifier[resp] = identifier[HttpResponse] ( identifier[content] = identifier[json] . identifier[dumps] ( identifier[response] ), identifier[content_type] = literal[string] ) keyword[if] identifier[_settings] . identifier[ADD_DURATION_HEADER] : identifier[resp] . identifier[__setitem__] ( identifier[_settings] . identifier[DURATION_HEADER_NAME] , identifier[str] (( identifier[datetime] . identifier[now] ()- identifier[batch_start_time] ). identifier[seconds] )) keyword[return] identifier[resp]
def handle_batch_requests(request, *args, **kwargs): """ A view function to handle the overall processing of batch requests. """ batch_start_time = datetime.now() try: # Get the Individual WSGI requests. wsgi_requests = get_wsgi_requests(request) # depends on [control=['try'], data=[]] except BadBatchRequest as brx: return HttpResponseBadRequest(content=brx.message) # depends on [control=['except'], data=['brx']] # Fire these WSGI requests, and collect the response for the same. response = execute_requests(wsgi_requests) # Evrything's done, return the response. resp = HttpResponse(content=json.dumps(response), content_type='application/json') if _settings.ADD_DURATION_HEADER: resp.__setitem__(_settings.DURATION_HEADER_NAME, str((datetime.now() - batch_start_time).seconds)) # depends on [control=['if'], data=[]] return resp
def authorize_security_group( self, group_name=None, group_id=None, source_group_name="", source_group_owner_id="", ip_protocol="", from_port="", to_port="", cidr_ip=""): """ There are two ways to use C{authorize_security_group}: 1) associate an existing group (source group) with the one that you are targeting (group_name) with an authorization update; or 2) associate a set of IP permissions with the group you are targeting with an authorization update. @param group_name: The group you will be modifying with a new authorization. @param group_id: The id of the group you will be modifying with a new authorization. Optionally, the following parameters: @param source_group_name: Name of security group to authorize access to when operating on a user/group pair. @param source_group_owner_id: Owner of security group to authorize access to when operating on a user/group pair. If those parameters are not specified, then the following must be: @param ip_protocol: IP protocol to authorize access to when operating on a CIDR IP. @param from_port: Bottom of port range to authorize access to when operating on a CIDR IP. This contains the ICMP type if ICMP is being authorized. @param to_port: Top of port range to authorize access to when operating on a CIDR IP. This contains the ICMP code if ICMP is being authorized. @param cidr_ip: CIDR IP range to authorize access to when operating on a CIDR IP. @return: A C{Deferred} that will fire with a truth value for the success of the operation. """ if source_group_name and source_group_owner_id: parameters = { "SourceSecurityGroupName": source_group_name, "SourceSecurityGroupOwnerId": source_group_owner_id, } elif ip_protocol and from_port and to_port and cidr_ip: parameters = { "IpProtocol": ip_protocol, "FromPort": from_port, "ToPort": to_port, "CidrIp": cidr_ip, } else: msg = ("You must specify either both group parameters or " "all the ip parameters.") raise ValueError(msg) if group_id: parameters["GroupId"] = group_id elif group_name: parameters["GroupName"] = group_name else: raise ValueError("You must specify either the group name of the group id.") query = self.query_factory( action="AuthorizeSecurityGroupIngress", creds=self.creds, endpoint=self.endpoint, other_params=parameters) d = query.submit() return d.addCallback(self.parser.truth_return)
def function[authorize_security_group, parameter[self, group_name, group_id, source_group_name, source_group_owner_id, ip_protocol, from_port, to_port, cidr_ip]]: constant[ There are two ways to use C{authorize_security_group}: 1) associate an existing group (source group) with the one that you are targeting (group_name) with an authorization update; or 2) associate a set of IP permissions with the group you are targeting with an authorization update. @param group_name: The group you will be modifying with a new authorization. @param group_id: The id of the group you will be modifying with a new authorization. Optionally, the following parameters: @param source_group_name: Name of security group to authorize access to when operating on a user/group pair. @param source_group_owner_id: Owner of security group to authorize access to when operating on a user/group pair. If those parameters are not specified, then the following must be: @param ip_protocol: IP protocol to authorize access to when operating on a CIDR IP. @param from_port: Bottom of port range to authorize access to when operating on a CIDR IP. This contains the ICMP type if ICMP is being authorized. @param to_port: Top of port range to authorize access to when operating on a CIDR IP. This contains the ICMP code if ICMP is being authorized. @param cidr_ip: CIDR IP range to authorize access to when operating on a CIDR IP. @return: A C{Deferred} that will fire with a truth value for the success of the operation. ] if <ast.BoolOp object at 0x7da18ede5300> begin[:] variable[parameters] assign[=] dictionary[[<ast.Constant object at 0x7da18ede7280>, <ast.Constant object at 0x7da18ede5f30>], [<ast.Name object at 0x7da18ede6b00>, <ast.Name object at 0x7da18ede62c0>]] if name[group_id] begin[:] call[name[parameters]][constant[GroupId]] assign[=] name[group_id] variable[query] assign[=] call[name[self].query_factory, parameter[]] variable[d] assign[=] call[name[query].submit, parameter[]] return[call[name[d].addCallback, parameter[name[self].parser.truth_return]]]
keyword[def] identifier[authorize_security_group] ( identifier[self] , identifier[group_name] = keyword[None] , identifier[group_id] = keyword[None] , identifier[source_group_name] = literal[string] , identifier[source_group_owner_id] = literal[string] , identifier[ip_protocol] = literal[string] , identifier[from_port] = literal[string] , identifier[to_port] = literal[string] , identifier[cidr_ip] = literal[string] ): literal[string] keyword[if] identifier[source_group_name] keyword[and] identifier[source_group_owner_id] : identifier[parameters] ={ literal[string] : identifier[source_group_name] , literal[string] : identifier[source_group_owner_id] , } keyword[elif] identifier[ip_protocol] keyword[and] identifier[from_port] keyword[and] identifier[to_port] keyword[and] identifier[cidr_ip] : identifier[parameters] ={ literal[string] : identifier[ip_protocol] , literal[string] : identifier[from_port] , literal[string] : identifier[to_port] , literal[string] : identifier[cidr_ip] , } keyword[else] : identifier[msg] =( literal[string] literal[string] ) keyword[raise] identifier[ValueError] ( identifier[msg] ) keyword[if] identifier[group_id] : identifier[parameters] [ literal[string] ]= identifier[group_id] keyword[elif] identifier[group_name] : identifier[parameters] [ literal[string] ]= identifier[group_name] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[query] = identifier[self] . identifier[query_factory] ( identifier[action] = literal[string] , identifier[creds] = identifier[self] . identifier[creds] , identifier[endpoint] = identifier[self] . identifier[endpoint] , identifier[other_params] = identifier[parameters] ) identifier[d] = identifier[query] . identifier[submit] () keyword[return] identifier[d] . identifier[addCallback] ( identifier[self] . identifier[parser] . identifier[truth_return] )
def authorize_security_group(self, group_name=None, group_id=None, source_group_name='', source_group_owner_id='', ip_protocol='', from_port='', to_port='', cidr_ip=''): """ There are two ways to use C{authorize_security_group}: 1) associate an existing group (source group) with the one that you are targeting (group_name) with an authorization update; or 2) associate a set of IP permissions with the group you are targeting with an authorization update. @param group_name: The group you will be modifying with a new authorization. @param group_id: The id of the group you will be modifying with a new authorization. Optionally, the following parameters: @param source_group_name: Name of security group to authorize access to when operating on a user/group pair. @param source_group_owner_id: Owner of security group to authorize access to when operating on a user/group pair. If those parameters are not specified, then the following must be: @param ip_protocol: IP protocol to authorize access to when operating on a CIDR IP. @param from_port: Bottom of port range to authorize access to when operating on a CIDR IP. This contains the ICMP type if ICMP is being authorized. @param to_port: Top of port range to authorize access to when operating on a CIDR IP. This contains the ICMP code if ICMP is being authorized. @param cidr_ip: CIDR IP range to authorize access to when operating on a CIDR IP. @return: A C{Deferred} that will fire with a truth value for the success of the operation. """ if source_group_name and source_group_owner_id: parameters = {'SourceSecurityGroupName': source_group_name, 'SourceSecurityGroupOwnerId': source_group_owner_id} # depends on [control=['if'], data=[]] elif ip_protocol and from_port and to_port and cidr_ip: parameters = {'IpProtocol': ip_protocol, 'FromPort': from_port, 'ToPort': to_port, 'CidrIp': cidr_ip} # depends on [control=['if'], data=[]] else: msg = 'You must specify either both group parameters or all the ip parameters.' raise ValueError(msg) if group_id: parameters['GroupId'] = group_id # depends on [control=['if'], data=[]] elif group_name: parameters['GroupName'] = group_name # depends on [control=['if'], data=[]] else: raise ValueError('You must specify either the group name of the group id.') query = self.query_factory(action='AuthorizeSecurityGroupIngress', creds=self.creds, endpoint=self.endpoint, other_params=parameters) d = query.submit() return d.addCallback(self.parser.truth_return)
def generator_by_digest(family, digest_size): """ Return generator by hash generator family name and digest size :param family: name of hash-generator family :return: WHashGeneratorProto class """ for generator_name in WHash.available_generators(family=family): generator = WHash.generator(generator_name) if generator.generator_digest_size() == digest_size: return generator raise ValueError('Hash generator is not available')
def function[generator_by_digest, parameter[family, digest_size]]: constant[ Return generator by hash generator family name and digest size :param family: name of hash-generator family :return: WHashGeneratorProto class ] for taget[name[generator_name]] in starred[call[name[WHash].available_generators, parameter[]]] begin[:] variable[generator] assign[=] call[name[WHash].generator, parameter[name[generator_name]]] if compare[call[name[generator].generator_digest_size, parameter[]] equal[==] name[digest_size]] begin[:] return[name[generator]] <ast.Raise object at 0x7da20e954250>
keyword[def] identifier[generator_by_digest] ( identifier[family] , identifier[digest_size] ): literal[string] keyword[for] identifier[generator_name] keyword[in] identifier[WHash] . identifier[available_generators] ( identifier[family] = identifier[family] ): identifier[generator] = identifier[WHash] . identifier[generator] ( identifier[generator_name] ) keyword[if] identifier[generator] . identifier[generator_digest_size] ()== identifier[digest_size] : keyword[return] identifier[generator] keyword[raise] identifier[ValueError] ( literal[string] )
def generator_by_digest(family, digest_size): """ Return generator by hash generator family name and digest size :param family: name of hash-generator family :return: WHashGeneratorProto class """ for generator_name in WHash.available_generators(family=family): generator = WHash.generator(generator_name) if generator.generator_digest_size() == digest_size: return generator # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['generator_name']] raise ValueError('Hash generator is not available')
def get_argument_values(self, model, prefix_args): """ Extract arguments for model from the environment and return as a tuple that is ready to be passed to the model. :param callable model: Python model of the function :param tuple prefix_args: Parameters to pass to model before actual ones :return: Arguments to be passed to the model :rtype: tuple """ spec = inspect.getfullargspec(model) if spec.varargs: logger.warning("ABI: A vararg model must be a unary function.") nargs = len(spec.args) - len(prefix_args) # If the model is a method, we need to account for `self` if inspect.ismethod(model): nargs -= 1 def resolve_argument(arg): if isinstance(arg, str): return self._cpu.read_register(arg) else: return self._cpu.read_int(arg) # Create a stream of resolved arguments from argument descriptors descriptors = self.get_arguments() argument_iter = map(resolve_argument, descriptors) from ..models import isvariadic # prevent circular imports if isvariadic(model): arguments = prefix_args + (argument_iter,) else: arguments = prefix_args + tuple(islice(argument_iter, nargs)) return arguments
def function[get_argument_values, parameter[self, model, prefix_args]]: constant[ Extract arguments for model from the environment and return as a tuple that is ready to be passed to the model. :param callable model: Python model of the function :param tuple prefix_args: Parameters to pass to model before actual ones :return: Arguments to be passed to the model :rtype: tuple ] variable[spec] assign[=] call[name[inspect].getfullargspec, parameter[name[model]]] if name[spec].varargs begin[:] call[name[logger].warning, parameter[constant[ABI: A vararg model must be a unary function.]]] variable[nargs] assign[=] binary_operation[call[name[len], parameter[name[spec].args]] - call[name[len], parameter[name[prefix_args]]]] if call[name[inspect].ismethod, parameter[name[model]]] begin[:] <ast.AugAssign object at 0x7da18dc061a0> def function[resolve_argument, parameter[arg]]: if call[name[isinstance], parameter[name[arg], name[str]]] begin[:] return[call[name[self]._cpu.read_register, parameter[name[arg]]]] variable[descriptors] assign[=] call[name[self].get_arguments, parameter[]] variable[argument_iter] assign[=] call[name[map], parameter[name[resolve_argument], name[descriptors]]] from relative_module[models] import module[isvariadic] if call[name[isvariadic], parameter[name[model]]] begin[:] variable[arguments] assign[=] binary_operation[name[prefix_args] + tuple[[<ast.Name object at 0x7da18dc05e70>]]] return[name[arguments]]
keyword[def] identifier[get_argument_values] ( identifier[self] , identifier[model] , identifier[prefix_args] ): literal[string] identifier[spec] = identifier[inspect] . identifier[getfullargspec] ( identifier[model] ) keyword[if] identifier[spec] . identifier[varargs] : identifier[logger] . identifier[warning] ( literal[string] ) identifier[nargs] = identifier[len] ( identifier[spec] . identifier[args] )- identifier[len] ( identifier[prefix_args] ) keyword[if] identifier[inspect] . identifier[ismethod] ( identifier[model] ): identifier[nargs] -= literal[int] keyword[def] identifier[resolve_argument] ( identifier[arg] ): keyword[if] identifier[isinstance] ( identifier[arg] , identifier[str] ): keyword[return] identifier[self] . identifier[_cpu] . identifier[read_register] ( identifier[arg] ) keyword[else] : keyword[return] identifier[self] . identifier[_cpu] . identifier[read_int] ( identifier[arg] ) identifier[descriptors] = identifier[self] . identifier[get_arguments] () identifier[argument_iter] = identifier[map] ( identifier[resolve_argument] , identifier[descriptors] ) keyword[from] .. identifier[models] keyword[import] identifier[isvariadic] keyword[if] identifier[isvariadic] ( identifier[model] ): identifier[arguments] = identifier[prefix_args] +( identifier[argument_iter] ,) keyword[else] : identifier[arguments] = identifier[prefix_args] + identifier[tuple] ( identifier[islice] ( identifier[argument_iter] , identifier[nargs] )) keyword[return] identifier[arguments]
def get_argument_values(self, model, prefix_args): """ Extract arguments for model from the environment and return as a tuple that is ready to be passed to the model. :param callable model: Python model of the function :param tuple prefix_args: Parameters to pass to model before actual ones :return: Arguments to be passed to the model :rtype: tuple """ spec = inspect.getfullargspec(model) if spec.varargs: logger.warning('ABI: A vararg model must be a unary function.') # depends on [control=['if'], data=[]] nargs = len(spec.args) - len(prefix_args) # If the model is a method, we need to account for `self` if inspect.ismethod(model): nargs -= 1 # depends on [control=['if'], data=[]] def resolve_argument(arg): if isinstance(arg, str): return self._cpu.read_register(arg) # depends on [control=['if'], data=[]] else: return self._cpu.read_int(arg) # Create a stream of resolved arguments from argument descriptors descriptors = self.get_arguments() argument_iter = map(resolve_argument, descriptors) from ..models import isvariadic # prevent circular imports if isvariadic(model): arguments = prefix_args + (argument_iter,) # depends on [control=['if'], data=[]] else: arguments = prefix_args + tuple(islice(argument_iter, nargs)) return arguments
def logical_chassis_fwdl_sanity_output_fwdl_cmd_status(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity") config = logical_chassis_fwdl_sanity output = ET.SubElement(logical_chassis_fwdl_sanity, "output") fwdl_cmd_status = ET.SubElement(output, "fwdl-cmd-status") fwdl_cmd_status.text = kwargs.pop('fwdl_cmd_status') callback = kwargs.pop('callback', self._callback) return callback(config)
def function[logical_chassis_fwdl_sanity_output_fwdl_cmd_status, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[logical_chassis_fwdl_sanity] assign[=] call[name[ET].Element, parameter[constant[logical_chassis_fwdl_sanity]]] variable[config] assign[=] name[logical_chassis_fwdl_sanity] variable[output] assign[=] call[name[ET].SubElement, parameter[name[logical_chassis_fwdl_sanity], constant[output]]] variable[fwdl_cmd_status] assign[=] call[name[ET].SubElement, parameter[name[output], constant[fwdl-cmd-status]]] name[fwdl_cmd_status].text assign[=] call[name[kwargs].pop, parameter[constant[fwdl_cmd_status]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[logical_chassis_fwdl_sanity_output_fwdl_cmd_status] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[logical_chassis_fwdl_sanity] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[config] = identifier[logical_chassis_fwdl_sanity] identifier[output] = identifier[ET] . identifier[SubElement] ( identifier[logical_chassis_fwdl_sanity] , literal[string] ) identifier[fwdl_cmd_status] = identifier[ET] . identifier[SubElement] ( identifier[output] , literal[string] ) identifier[fwdl_cmd_status] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def logical_chassis_fwdl_sanity_output_fwdl_cmd_status(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') logical_chassis_fwdl_sanity = ET.Element('logical_chassis_fwdl_sanity') config = logical_chassis_fwdl_sanity output = ET.SubElement(logical_chassis_fwdl_sanity, 'output') fwdl_cmd_status = ET.SubElement(output, 'fwdl-cmd-status') fwdl_cmd_status.text = kwargs.pop('fwdl_cmd_status') callback = kwargs.pop('callback', self._callback) return callback(config)
def markdown_to_safe_html(markdown_string): """Convert Markdown to HTML that's safe to splice into the DOM. Arguments: markdown_string: A Unicode string or UTF-8--encoded bytestring containing Markdown source. Markdown tables are supported. Returns: A string containing safe HTML. """ warning = '' # Convert to utf-8 whenever we have a binary input. if isinstance(markdown_string, six.binary_type): markdown_string_decoded = markdown_string.decode('utf-8') # Remove null bytes and warn if there were any, since it probably means # we were given a bad encoding. markdown_string = markdown_string_decoded.replace(u'\x00', u'') num_null_bytes = len(markdown_string_decoded) - len(markdown_string) if num_null_bytes: warning = ('<!-- WARNING: discarded %d null bytes in markdown string ' 'after UTF-8 decoding -->\n') % num_null_bytes string_html = markdown.markdown( markdown_string, extensions=['markdown.extensions.tables']) string_sanitized = bleach.clean( string_html, tags=_ALLOWED_TAGS, attributes=_ALLOWED_ATTRIBUTES) return warning + string_sanitized
def function[markdown_to_safe_html, parameter[markdown_string]]: constant[Convert Markdown to HTML that's safe to splice into the DOM. Arguments: markdown_string: A Unicode string or UTF-8--encoded bytestring containing Markdown source. Markdown tables are supported. Returns: A string containing safe HTML. ] variable[warning] assign[=] constant[] if call[name[isinstance], parameter[name[markdown_string], name[six].binary_type]] begin[:] variable[markdown_string_decoded] assign[=] call[name[markdown_string].decode, parameter[constant[utf-8]]] variable[markdown_string] assign[=] call[name[markdown_string_decoded].replace, parameter[constant[], constant[]]] variable[num_null_bytes] assign[=] binary_operation[call[name[len], parameter[name[markdown_string_decoded]]] - call[name[len], parameter[name[markdown_string]]]] if name[num_null_bytes] begin[:] variable[warning] assign[=] binary_operation[constant[<!-- WARNING: discarded %d null bytes in markdown string after UTF-8 decoding --> ] <ast.Mod object at 0x7da2590d6920> name[num_null_bytes]] variable[string_html] assign[=] call[name[markdown].markdown, parameter[name[markdown_string]]] variable[string_sanitized] assign[=] call[name[bleach].clean, parameter[name[string_html]]] return[binary_operation[name[warning] + name[string_sanitized]]]
keyword[def] identifier[markdown_to_safe_html] ( identifier[markdown_string] ): literal[string] identifier[warning] = literal[string] keyword[if] identifier[isinstance] ( identifier[markdown_string] , identifier[six] . identifier[binary_type] ): identifier[markdown_string_decoded] = identifier[markdown_string] . identifier[decode] ( literal[string] ) identifier[markdown_string] = identifier[markdown_string_decoded] . identifier[replace] ( literal[string] , literal[string] ) identifier[num_null_bytes] = identifier[len] ( identifier[markdown_string_decoded] )- identifier[len] ( identifier[markdown_string] ) keyword[if] identifier[num_null_bytes] : identifier[warning] =( literal[string] literal[string] )% identifier[num_null_bytes] identifier[string_html] = identifier[markdown] . identifier[markdown] ( identifier[markdown_string] , identifier[extensions] =[ literal[string] ]) identifier[string_sanitized] = identifier[bleach] . identifier[clean] ( identifier[string_html] , identifier[tags] = identifier[_ALLOWED_TAGS] , identifier[attributes] = identifier[_ALLOWED_ATTRIBUTES] ) keyword[return] identifier[warning] + identifier[string_sanitized]
def markdown_to_safe_html(markdown_string): """Convert Markdown to HTML that's safe to splice into the DOM. Arguments: markdown_string: A Unicode string or UTF-8--encoded bytestring containing Markdown source. Markdown tables are supported. Returns: A string containing safe HTML. """ warning = '' # Convert to utf-8 whenever we have a binary input. if isinstance(markdown_string, six.binary_type): markdown_string_decoded = markdown_string.decode('utf-8') # Remove null bytes and warn if there were any, since it probably means # we were given a bad encoding. markdown_string = markdown_string_decoded.replace(u'\x00', u'') num_null_bytes = len(markdown_string_decoded) - len(markdown_string) if num_null_bytes: warning = '<!-- WARNING: discarded %d null bytes in markdown string after UTF-8 decoding -->\n' % num_null_bytes # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] string_html = markdown.markdown(markdown_string, extensions=['markdown.extensions.tables']) string_sanitized = bleach.clean(string_html, tags=_ALLOWED_TAGS, attributes=_ALLOWED_ATTRIBUTES) return warning + string_sanitized