code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def process_allele(allele_data, dbsnp_data, header, reference): """Combine data from multiple lines refering to a single allele. Returns three items in this order: (string) concatenated variant sequence (ie allele the genome has) (string) concatenated reference sequence (string) start position (1-based) """ # One-based start to match VCF coordinates start = str(int(allele_data[0][header['begin']])) var_allele = '' ref_allele = '' filters = [] for data in allele_data: if 'varQuality' in header: if 'VQLOW' in data[header['varQuality']]: filters.append('VQLOW') else: var_filter = data[header['varFilter']] if var_filter and not var_filter == "PASS": filters = filters + var_filter.split(';') if data[header['varType']] == 'no-call': filters = ['NOCALL'] ref_allele = ref_allele + data[header['reference']] continue var_allele = var_allele + data[header['alleleSeq']] ref_allele = ref_allele + data[header['reference']] if data[header['xRef']]: for dbsnp_item in data[header['xRef']].split(';'): dbsnp_data.append(dbsnp_item.split(':')[1]) # It's theoretically possible to break up a partial no-call allele into # separated gVCF lines, but it's hard. Treat the whole allele as no-call. if 'NOCALL' in filters: filters = ['NOCALL'] var_allele = '?' return var_allele, ref_allele, start, filters
def function[process_allele, parameter[allele_data, dbsnp_data, header, reference]]: constant[Combine data from multiple lines refering to a single allele. Returns three items in this order: (string) concatenated variant sequence (ie allele the genome has) (string) concatenated reference sequence (string) start position (1-based) ] variable[start] assign[=] call[name[str], parameter[call[name[int], parameter[call[call[name[allele_data]][constant[0]]][call[name[header]][constant[begin]]]]]]] variable[var_allele] assign[=] constant[] variable[ref_allele] assign[=] constant[] variable[filters] assign[=] list[[]] for taget[name[data]] in starred[name[allele_data]] begin[:] if compare[constant[varQuality] in name[header]] begin[:] if compare[constant[VQLOW] in call[name[data]][call[name[header]][constant[varQuality]]]] begin[:] call[name[filters].append, parameter[constant[VQLOW]]] if compare[call[name[data]][call[name[header]][constant[varType]]] equal[==] constant[no-call]] begin[:] variable[filters] assign[=] list[[<ast.Constant object at 0x7da1b0a49210>]] variable[ref_allele] assign[=] binary_operation[name[ref_allele] + call[name[data]][call[name[header]][constant[reference]]]] continue variable[var_allele] assign[=] binary_operation[name[var_allele] + call[name[data]][call[name[header]][constant[alleleSeq]]]] variable[ref_allele] assign[=] binary_operation[name[ref_allele] + call[name[data]][call[name[header]][constant[reference]]]] if call[name[data]][call[name[header]][constant[xRef]]] begin[:] for taget[name[dbsnp_item]] in starred[call[call[name[data]][call[name[header]][constant[xRef]]].split, parameter[constant[;]]]] begin[:] call[name[dbsnp_data].append, parameter[call[call[name[dbsnp_item].split, parameter[constant[:]]]][constant[1]]]] if compare[constant[NOCALL] in name[filters]] begin[:] variable[filters] assign[=] list[[<ast.Constant object at 0x7da1b0a48850>]] variable[var_allele] assign[=] constant[?] return[tuple[[<ast.Name object at 0x7da1b0a49300>, <ast.Name object at 0x7da1b0a48a90>, <ast.Name object at 0x7da1b0a49e10>, <ast.Name object at 0x7da1b0a498a0>]]]
keyword[def] identifier[process_allele] ( identifier[allele_data] , identifier[dbsnp_data] , identifier[header] , identifier[reference] ): literal[string] identifier[start] = identifier[str] ( identifier[int] ( identifier[allele_data] [ literal[int] ][ identifier[header] [ literal[string] ]])) identifier[var_allele] = literal[string] identifier[ref_allele] = literal[string] identifier[filters] =[] keyword[for] identifier[data] keyword[in] identifier[allele_data] : keyword[if] literal[string] keyword[in] identifier[header] : keyword[if] literal[string] keyword[in] identifier[data] [ identifier[header] [ literal[string] ]]: identifier[filters] . identifier[append] ( literal[string] ) keyword[else] : identifier[var_filter] = identifier[data] [ identifier[header] [ literal[string] ]] keyword[if] identifier[var_filter] keyword[and] keyword[not] identifier[var_filter] == literal[string] : identifier[filters] = identifier[filters] + identifier[var_filter] . identifier[split] ( literal[string] ) keyword[if] identifier[data] [ identifier[header] [ literal[string] ]]== literal[string] : identifier[filters] =[ literal[string] ] identifier[ref_allele] = identifier[ref_allele] + identifier[data] [ identifier[header] [ literal[string] ]] keyword[continue] identifier[var_allele] = identifier[var_allele] + identifier[data] [ identifier[header] [ literal[string] ]] identifier[ref_allele] = identifier[ref_allele] + identifier[data] [ identifier[header] [ literal[string] ]] keyword[if] identifier[data] [ identifier[header] [ literal[string] ]]: keyword[for] identifier[dbsnp_item] keyword[in] identifier[data] [ identifier[header] [ literal[string] ]]. identifier[split] ( literal[string] ): identifier[dbsnp_data] . identifier[append] ( identifier[dbsnp_item] . identifier[split] ( literal[string] )[ literal[int] ]) keyword[if] literal[string] keyword[in] identifier[filters] : identifier[filters] =[ literal[string] ] identifier[var_allele] = literal[string] keyword[return] identifier[var_allele] , identifier[ref_allele] , identifier[start] , identifier[filters]
def process_allele(allele_data, dbsnp_data, header, reference): """Combine data from multiple lines refering to a single allele. Returns three items in this order: (string) concatenated variant sequence (ie allele the genome has) (string) concatenated reference sequence (string) start position (1-based) """ # One-based start to match VCF coordinates start = str(int(allele_data[0][header['begin']])) var_allele = '' ref_allele = '' filters = [] for data in allele_data: if 'varQuality' in header: if 'VQLOW' in data[header['varQuality']]: filters.append('VQLOW') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['header']] else: var_filter = data[header['varFilter']] if var_filter and (not var_filter == 'PASS'): filters = filters + var_filter.split(';') # depends on [control=['if'], data=[]] if data[header['varType']] == 'no-call': filters = ['NOCALL'] ref_allele = ref_allele + data[header['reference']] continue # depends on [control=['if'], data=[]] var_allele = var_allele + data[header['alleleSeq']] ref_allele = ref_allele + data[header['reference']] if data[header['xRef']]: for dbsnp_item in data[header['xRef']].split(';'): dbsnp_data.append(dbsnp_item.split(':')[1]) # depends on [control=['for'], data=['dbsnp_item']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['data']] # It's theoretically possible to break up a partial no-call allele into # separated gVCF lines, but it's hard. Treat the whole allele as no-call. if 'NOCALL' in filters: filters = ['NOCALL'] var_allele = '?' # depends on [control=['if'], data=['filters']] return (var_allele, ref_allele, start, filters)
def _show_final_paste_message(self, tl_key, no_pasted_cells): """Show actually pasted number of cells""" plural = "" if no_pasted_cells == 1 else _("s") statustext = _("{ncells} cell{plural} pasted at cell {topleft}").\ format(ncells=no_pasted_cells, plural=plural, topleft=tl_key) post_command_event(self.main_window, self.StatusBarMsg, text=statustext)
def function[_show_final_paste_message, parameter[self, tl_key, no_pasted_cells]]: constant[Show actually pasted number of cells] variable[plural] assign[=] <ast.IfExp object at 0x7da1b17224a0> variable[statustext] assign[=] call[call[name[_], parameter[constant[{ncells} cell{plural} pasted at cell {topleft}]]].format, parameter[]] call[name[post_command_event], parameter[name[self].main_window, name[self].StatusBarMsg]]
keyword[def] identifier[_show_final_paste_message] ( identifier[self] , identifier[tl_key] , identifier[no_pasted_cells] ): literal[string] identifier[plural] = literal[string] keyword[if] identifier[no_pasted_cells] == literal[int] keyword[else] identifier[_] ( literal[string] ) identifier[statustext] = identifier[_] ( literal[string] ). identifier[format] ( identifier[ncells] = identifier[no_pasted_cells] , identifier[plural] = identifier[plural] , identifier[topleft] = identifier[tl_key] ) identifier[post_command_event] ( identifier[self] . identifier[main_window] , identifier[self] . identifier[StatusBarMsg] , identifier[text] = identifier[statustext] )
def _show_final_paste_message(self, tl_key, no_pasted_cells): """Show actually pasted number of cells""" plural = '' if no_pasted_cells == 1 else _('s') statustext = _('{ncells} cell{plural} pasted at cell {topleft}').format(ncells=no_pasted_cells, plural=plural, topleft=tl_key) post_command_event(self.main_window, self.StatusBarMsg, text=statustext)
def count(self, signature = None): """ Counts how many crash dumps have been stored in this database. Optionally filters the count by heuristic signature. @type signature: object @param signature: (Optional) Count only the crashes that match this signature. See L{Crash.signature} for more details. @rtype: int @return: Count of crash dumps stored in this database. """ query = self._session.query(CrashDTO.id) if signature: sig_pickled = pickle.dumps(signature, protocol = 0) query = query.filter_by(signature = sig_pickled) return query.count()
def function[count, parameter[self, signature]]: constant[ Counts how many crash dumps have been stored in this database. Optionally filters the count by heuristic signature. @type signature: object @param signature: (Optional) Count only the crashes that match this signature. See L{Crash.signature} for more details. @rtype: int @return: Count of crash dumps stored in this database. ] variable[query] assign[=] call[name[self]._session.query, parameter[name[CrashDTO].id]] if name[signature] begin[:] variable[sig_pickled] assign[=] call[name[pickle].dumps, parameter[name[signature]]] variable[query] assign[=] call[name[query].filter_by, parameter[]] return[call[name[query].count, parameter[]]]
keyword[def] identifier[count] ( identifier[self] , identifier[signature] = keyword[None] ): literal[string] identifier[query] = identifier[self] . identifier[_session] . identifier[query] ( identifier[CrashDTO] . identifier[id] ) keyword[if] identifier[signature] : identifier[sig_pickled] = identifier[pickle] . identifier[dumps] ( identifier[signature] , identifier[protocol] = literal[int] ) identifier[query] = identifier[query] . identifier[filter_by] ( identifier[signature] = identifier[sig_pickled] ) keyword[return] identifier[query] . identifier[count] ()
def count(self, signature=None): """ Counts how many crash dumps have been stored in this database. Optionally filters the count by heuristic signature. @type signature: object @param signature: (Optional) Count only the crashes that match this signature. See L{Crash.signature} for more details. @rtype: int @return: Count of crash dumps stored in this database. """ query = self._session.query(CrashDTO.id) if signature: sig_pickled = pickle.dumps(signature, protocol=0) query = query.filter_by(signature=sig_pickled) # depends on [control=['if'], data=[]] return query.count()
def blockSelectionSignals( self, state ): """ Sets the state for the seleciton finished signal. When it \ is set to True, it will emit the signal. This is used \ internally to control selection signal propogation, so \ should not really be called unless you know why you are \ calling it. :param state <bool> """ if ( self._selectionSignalsBlocked == state ): return self._selectionSignalsBlocked = state if ( not state ): self.emitSelectionFinished()
def function[blockSelectionSignals, parameter[self, state]]: constant[ Sets the state for the seleciton finished signal. When it is set to True, it will emit the signal. This is used internally to control selection signal propogation, so should not really be called unless you know why you are calling it. :param state <bool> ] if compare[name[self]._selectionSignalsBlocked equal[==] name[state]] begin[:] return[None] name[self]._selectionSignalsBlocked assign[=] name[state] if <ast.UnaryOp object at 0x7da20c796710> begin[:] call[name[self].emitSelectionFinished, parameter[]]
keyword[def] identifier[blockSelectionSignals] ( identifier[self] , identifier[state] ): literal[string] keyword[if] ( identifier[self] . identifier[_selectionSignalsBlocked] == identifier[state] ): keyword[return] identifier[self] . identifier[_selectionSignalsBlocked] = identifier[state] keyword[if] ( keyword[not] identifier[state] ): identifier[self] . identifier[emitSelectionFinished] ()
def blockSelectionSignals(self, state): """ Sets the state for the seleciton finished signal. When it is set to True, it will emit the signal. This is used internally to control selection signal propogation, so should not really be called unless you know why you are calling it. :param state <bool> """ if self._selectionSignalsBlocked == state: return # depends on [control=['if'], data=[]] self._selectionSignalsBlocked = state if not state: self.emitSelectionFinished() # depends on [control=['if'], data=[]]
def from_line(cls, line): """:return: New RefLogEntry instance from the given revlog line. :param line: line bytes without trailing newline :raise ValueError: If line could not be parsed""" line = line.decode(defenc) fields = line.split('\t', 1) if len(fields) == 1: info, msg = fields[0], None elif len(fields) == 2: info, msg = fields else: raise ValueError("Line must have up to two TAB-separated fields." " Got %s" % repr(line)) # END handle first split oldhexsha = info[:40] newhexsha = info[41:81] for hexsha in (oldhexsha, newhexsha): if not cls._re_hexsha_only.match(hexsha): raise ValueError("Invalid hexsha: %r" % (hexsha,)) # END if hexsha re doesn't match # END for each hexsha email_end = info.find('>', 82) if email_end == -1: raise ValueError("Missing token: >") # END handle missing end brace actor = Actor._from_string(info[82:email_end + 1]) time, tz_offset = parse_date(info[email_end + 2:]) return RefLogEntry((oldhexsha, newhexsha, actor, (time, tz_offset), msg))
def function[from_line, parameter[cls, line]]: constant[:return: New RefLogEntry instance from the given revlog line. :param line: line bytes without trailing newline :raise ValueError: If line could not be parsed] variable[line] assign[=] call[name[line].decode, parameter[name[defenc]]] variable[fields] assign[=] call[name[line].split, parameter[constant[ ], constant[1]]] if compare[call[name[len], parameter[name[fields]]] equal[==] constant[1]] begin[:] <ast.Tuple object at 0x7da204347850> assign[=] tuple[[<ast.Subscript object at 0x7da2043446d0>, <ast.Constant object at 0x7da1b23583d0>]] variable[oldhexsha] assign[=] call[name[info]][<ast.Slice object at 0x7da204346290>] variable[newhexsha] assign[=] call[name[info]][<ast.Slice object at 0x7da204344dc0>] for taget[name[hexsha]] in starred[tuple[[<ast.Name object at 0x7da204346380>, <ast.Name object at 0x7da204347790>]]] begin[:] if <ast.UnaryOp object at 0x7da204346bc0> begin[:] <ast.Raise object at 0x7da204344220> variable[email_end] assign[=] call[name[info].find, parameter[constant[>], constant[82]]] if compare[name[email_end] equal[==] <ast.UnaryOp object at 0x7da2043477f0>] begin[:] <ast.Raise object at 0x7da204345e10> variable[actor] assign[=] call[name[Actor]._from_string, parameter[call[name[info]][<ast.Slice object at 0x7da204346fb0>]]] <ast.Tuple object at 0x7da204347eb0> assign[=] call[name[parse_date], parameter[call[name[info]][<ast.Slice object at 0x7da204345cc0>]]] return[call[name[RefLogEntry], parameter[tuple[[<ast.Name object at 0x7da2043441f0>, <ast.Name object at 0x7da204347e50>, <ast.Name object at 0x7da2043447c0>, <ast.Tuple object at 0x7da204345810>, <ast.Name object at 0x7da204345750>]]]]]
keyword[def] identifier[from_line] ( identifier[cls] , identifier[line] ): literal[string] identifier[line] = identifier[line] . identifier[decode] ( identifier[defenc] ) identifier[fields] = identifier[line] . identifier[split] ( literal[string] , literal[int] ) keyword[if] identifier[len] ( identifier[fields] )== literal[int] : identifier[info] , identifier[msg] = identifier[fields] [ literal[int] ], keyword[None] keyword[elif] identifier[len] ( identifier[fields] )== literal[int] : identifier[info] , identifier[msg] = identifier[fields] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] % identifier[repr] ( identifier[line] )) identifier[oldhexsha] = identifier[info] [: literal[int] ] identifier[newhexsha] = identifier[info] [ literal[int] : literal[int] ] keyword[for] identifier[hexsha] keyword[in] ( identifier[oldhexsha] , identifier[newhexsha] ): keyword[if] keyword[not] identifier[cls] . identifier[_re_hexsha_only] . identifier[match] ( identifier[hexsha] ): keyword[raise] identifier[ValueError] ( literal[string] %( identifier[hexsha] ,)) identifier[email_end] = identifier[info] . identifier[find] ( literal[string] , literal[int] ) keyword[if] identifier[email_end] ==- literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[actor] = identifier[Actor] . identifier[_from_string] ( identifier[info] [ literal[int] : identifier[email_end] + literal[int] ]) identifier[time] , identifier[tz_offset] = identifier[parse_date] ( identifier[info] [ identifier[email_end] + literal[int] :]) keyword[return] identifier[RefLogEntry] (( identifier[oldhexsha] , identifier[newhexsha] , identifier[actor] ,( identifier[time] , identifier[tz_offset] ), identifier[msg] ))
def from_line(cls, line): """:return: New RefLogEntry instance from the given revlog line. :param line: line bytes without trailing newline :raise ValueError: If line could not be parsed""" line = line.decode(defenc) fields = line.split('\t', 1) if len(fields) == 1: (info, msg) = (fields[0], None) # depends on [control=['if'], data=[]] elif len(fields) == 2: (info, msg) = fields # depends on [control=['if'], data=[]] else: raise ValueError('Line must have up to two TAB-separated fields. Got %s' % repr(line)) # END handle first split oldhexsha = info[:40] newhexsha = info[41:81] for hexsha in (oldhexsha, newhexsha): if not cls._re_hexsha_only.match(hexsha): raise ValueError('Invalid hexsha: %r' % (hexsha,)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['hexsha']] # END if hexsha re doesn't match # END for each hexsha email_end = info.find('>', 82) if email_end == -1: raise ValueError('Missing token: >') # depends on [control=['if'], data=[]] # END handle missing end brace actor = Actor._from_string(info[82:email_end + 1]) (time, tz_offset) = parse_date(info[email_end + 2:]) return RefLogEntry((oldhexsha, newhexsha, actor, (time, tz_offset), msg))
def all_regions(self): """Get a tuple of all chromosome, start and end regions. """ regions = [] for sq in self._bam.header["SQ"]: regions.append((sq["SN"], 1, int(sq["LN"]))) return regions
def function[all_regions, parameter[self]]: constant[Get a tuple of all chromosome, start and end regions. ] variable[regions] assign[=] list[[]] for taget[name[sq]] in starred[call[name[self]._bam.header][constant[SQ]]] begin[:] call[name[regions].append, parameter[tuple[[<ast.Subscript object at 0x7da1b18daa40>, <ast.Constant object at 0x7da1b18d99c0>, <ast.Call object at 0x7da1b18da290>]]]] return[name[regions]]
keyword[def] identifier[all_regions] ( identifier[self] ): literal[string] identifier[regions] =[] keyword[for] identifier[sq] keyword[in] identifier[self] . identifier[_bam] . identifier[header] [ literal[string] ]: identifier[regions] . identifier[append] (( identifier[sq] [ literal[string] ], literal[int] , identifier[int] ( identifier[sq] [ literal[string] ]))) keyword[return] identifier[regions]
def all_regions(self): """Get a tuple of all chromosome, start and end regions. """ regions = [] for sq in self._bam.header['SQ']: regions.append((sq['SN'], 1, int(sq['LN']))) # depends on [control=['for'], data=['sq']] return regions
def parse_transform(transform_str): """Converts a valid SVG transformation string into a 3x3 matrix. If the string is empty or null, this returns a 3x3 identity matrix""" if not transform_str: return np.identity(3) elif not isinstance(transform_str, str): raise TypeError('Must provide a string to parse') total_transform = np.identity(3) transform_substrs = transform_str.split(')')[:-1] # Skip the last element, because it should be empty for substr in transform_substrs: total_transform = total_transform.dot(_parse_transform_substr(substr)) return total_transform
def function[parse_transform, parameter[transform_str]]: constant[Converts a valid SVG transformation string into a 3x3 matrix. If the string is empty or null, this returns a 3x3 identity matrix] if <ast.UnaryOp object at 0x7da1b2344f40> begin[:] return[call[name[np].identity, parameter[constant[3]]]] variable[total_transform] assign[=] call[name[np].identity, parameter[constant[3]]] variable[transform_substrs] assign[=] call[call[name[transform_str].split, parameter[constant[)]]]][<ast.Slice object at 0x7da1b2344d90>] for taget[name[substr]] in starred[name[transform_substrs]] begin[:] variable[total_transform] assign[=] call[name[total_transform].dot, parameter[call[name[_parse_transform_substr], parameter[name[substr]]]]] return[name[total_transform]]
keyword[def] identifier[parse_transform] ( identifier[transform_str] ): literal[string] keyword[if] keyword[not] identifier[transform_str] : keyword[return] identifier[np] . identifier[identity] ( literal[int] ) keyword[elif] keyword[not] identifier[isinstance] ( identifier[transform_str] , identifier[str] ): keyword[raise] identifier[TypeError] ( literal[string] ) identifier[total_transform] = identifier[np] . identifier[identity] ( literal[int] ) identifier[transform_substrs] = identifier[transform_str] . identifier[split] ( literal[string] )[:- literal[int] ] keyword[for] identifier[substr] keyword[in] identifier[transform_substrs] : identifier[total_transform] = identifier[total_transform] . identifier[dot] ( identifier[_parse_transform_substr] ( identifier[substr] )) keyword[return] identifier[total_transform]
def parse_transform(transform_str): """Converts a valid SVG transformation string into a 3x3 matrix. If the string is empty or null, this returns a 3x3 identity matrix""" if not transform_str: return np.identity(3) # depends on [control=['if'], data=[]] elif not isinstance(transform_str, str): raise TypeError('Must provide a string to parse') # depends on [control=['if'], data=[]] total_transform = np.identity(3) transform_substrs = transform_str.split(')')[:-1] # Skip the last element, because it should be empty for substr in transform_substrs: total_transform = total_transform.dot(_parse_transform_substr(substr)) # depends on [control=['for'], data=['substr']] return total_transform
def get_suitable_slot_for_duplicate(self, src_slot): """Returns the suitable position for a duplicate analysis, taking into account if there is a WorksheetTemplate assigned to this worksheet. By default, returns a new slot at the end of the worksheet unless there is a slot defined for a duplicate of the src_slot in the worksheet template layout not yet used. :param src_slot: :return: suitable slot position for a duplicate of src_slot """ slot_from = to_int(src_slot, 0) if slot_from < 1: return -1 # Are the analyses from src_slot suitable for duplicates creation? container = self.get_container_at(slot_from) if not container or not IAnalysisRequest.providedBy(container): # We cannot create duplicates from analyses other than routine ones, # those that belong to an Analysis Request. return -1 occupied = self.get_slot_positions(type='all') wst = self.getWorksheetTemplate() if not wst: # No worksheet template assigned, add a new slot at the end of # the worksheet with the duplicate there slot_to = max(occupied) + 1 return slot_to # If there is a match with the layout defined in the Worksheet # Template, use that slot instead of adding a new one at the end of # the worksheet layout = wst.getLayout() for pos in layout: if pos['type'] != 'd' or to_int(pos['dup']) != slot_from: continue slot_to = int(pos['pos']) if slot_to in occupied: # Not an empty slot continue # This slot is empty, use it instead of adding a new # slot at the end of the worksheet return slot_to # Add a new slot at the end of the worksheet, but take into account # that a worksheet template is assigned, so we need to take care to # not override slots defined by its layout occupied.append(len(layout)) slot_to = max(occupied) + 1 return slot_to
def function[get_suitable_slot_for_duplicate, parameter[self, src_slot]]: constant[Returns the suitable position for a duplicate analysis, taking into account if there is a WorksheetTemplate assigned to this worksheet. By default, returns a new slot at the end of the worksheet unless there is a slot defined for a duplicate of the src_slot in the worksheet template layout not yet used. :param src_slot: :return: suitable slot position for a duplicate of src_slot ] variable[slot_from] assign[=] call[name[to_int], parameter[name[src_slot], constant[0]]] if compare[name[slot_from] less[<] constant[1]] begin[:] return[<ast.UnaryOp object at 0x7da1b231cfa0>] variable[container] assign[=] call[name[self].get_container_at, parameter[name[slot_from]]] if <ast.BoolOp object at 0x7da1b231e650> begin[:] return[<ast.UnaryOp object at 0x7da1b231d9c0>] variable[occupied] assign[=] call[name[self].get_slot_positions, parameter[]] variable[wst] assign[=] call[name[self].getWorksheetTemplate, parameter[]] if <ast.UnaryOp object at 0x7da1b231e830> begin[:] variable[slot_to] assign[=] binary_operation[call[name[max], parameter[name[occupied]]] + constant[1]] return[name[slot_to]] variable[layout] assign[=] call[name[wst].getLayout, parameter[]] for taget[name[pos]] in starred[name[layout]] begin[:] if <ast.BoolOp object at 0x7da1b231d030> begin[:] continue variable[slot_to] assign[=] call[name[int], parameter[call[name[pos]][constant[pos]]]] if compare[name[slot_to] in name[occupied]] begin[:] continue return[name[slot_to]] call[name[occupied].append, parameter[call[name[len], parameter[name[layout]]]]] variable[slot_to] assign[=] binary_operation[call[name[max], parameter[name[occupied]]] + constant[1]] return[name[slot_to]]
keyword[def] identifier[get_suitable_slot_for_duplicate] ( identifier[self] , identifier[src_slot] ): literal[string] identifier[slot_from] = identifier[to_int] ( identifier[src_slot] , literal[int] ) keyword[if] identifier[slot_from] < literal[int] : keyword[return] - literal[int] identifier[container] = identifier[self] . identifier[get_container_at] ( identifier[slot_from] ) keyword[if] keyword[not] identifier[container] keyword[or] keyword[not] identifier[IAnalysisRequest] . identifier[providedBy] ( identifier[container] ): keyword[return] - literal[int] identifier[occupied] = identifier[self] . identifier[get_slot_positions] ( identifier[type] = literal[string] ) identifier[wst] = identifier[self] . identifier[getWorksheetTemplate] () keyword[if] keyword[not] identifier[wst] : identifier[slot_to] = identifier[max] ( identifier[occupied] )+ literal[int] keyword[return] identifier[slot_to] identifier[layout] = identifier[wst] . identifier[getLayout] () keyword[for] identifier[pos] keyword[in] identifier[layout] : keyword[if] identifier[pos] [ literal[string] ]!= literal[string] keyword[or] identifier[to_int] ( identifier[pos] [ literal[string] ])!= identifier[slot_from] : keyword[continue] identifier[slot_to] = identifier[int] ( identifier[pos] [ literal[string] ]) keyword[if] identifier[slot_to] keyword[in] identifier[occupied] : keyword[continue] keyword[return] identifier[slot_to] identifier[occupied] . identifier[append] ( identifier[len] ( identifier[layout] )) identifier[slot_to] = identifier[max] ( identifier[occupied] )+ literal[int] keyword[return] identifier[slot_to]
def get_suitable_slot_for_duplicate(self, src_slot): """Returns the suitable position for a duplicate analysis, taking into account if there is a WorksheetTemplate assigned to this worksheet. By default, returns a new slot at the end of the worksheet unless there is a slot defined for a duplicate of the src_slot in the worksheet template layout not yet used. :param src_slot: :return: suitable slot position for a duplicate of src_slot """ slot_from = to_int(src_slot, 0) if slot_from < 1: return -1 # depends on [control=['if'], data=[]] # Are the analyses from src_slot suitable for duplicates creation? container = self.get_container_at(slot_from) if not container or not IAnalysisRequest.providedBy(container): # We cannot create duplicates from analyses other than routine ones, # those that belong to an Analysis Request. return -1 # depends on [control=['if'], data=[]] occupied = self.get_slot_positions(type='all') wst = self.getWorksheetTemplate() if not wst: # No worksheet template assigned, add a new slot at the end of # the worksheet with the duplicate there slot_to = max(occupied) + 1 return slot_to # depends on [control=['if'], data=[]] # If there is a match with the layout defined in the Worksheet # Template, use that slot instead of adding a new one at the end of # the worksheet layout = wst.getLayout() for pos in layout: if pos['type'] != 'd' or to_int(pos['dup']) != slot_from: continue # depends on [control=['if'], data=[]] slot_to = int(pos['pos']) if slot_to in occupied: # Not an empty slot continue # depends on [control=['if'], data=[]] # This slot is empty, use it instead of adding a new # slot at the end of the worksheet return slot_to # depends on [control=['for'], data=['pos']] # Add a new slot at the end of the worksheet, but take into account # that a worksheet template is assigned, so we need to take care to # not override slots defined by its layout occupied.append(len(layout)) slot_to = max(occupied) + 1 return slot_to
def release(version): """Tags all submodules for a new release. Ensures that git tags, as well as the version.py files in each submodule, agree and that the new version is strictly greater than the current version. Will fail if the new version is not an increment (following PEP 440). Creates a new git tag and commit. """ check_new_version(version) set_new_version(version) commit_new_version(version) set_git_tag(version)
def function[release, parameter[version]]: constant[Tags all submodules for a new release. Ensures that git tags, as well as the version.py files in each submodule, agree and that the new version is strictly greater than the current version. Will fail if the new version is not an increment (following PEP 440). Creates a new git tag and commit. ] call[name[check_new_version], parameter[name[version]]] call[name[set_new_version], parameter[name[version]]] call[name[commit_new_version], parameter[name[version]]] call[name[set_git_tag], parameter[name[version]]]
keyword[def] identifier[release] ( identifier[version] ): literal[string] identifier[check_new_version] ( identifier[version] ) identifier[set_new_version] ( identifier[version] ) identifier[commit_new_version] ( identifier[version] ) identifier[set_git_tag] ( identifier[version] )
def release(version): """Tags all submodules for a new release. Ensures that git tags, as well as the version.py files in each submodule, agree and that the new version is strictly greater than the current version. Will fail if the new version is not an increment (following PEP 440). Creates a new git tag and commit. """ check_new_version(version) set_new_version(version) commit_new_version(version) set_git_tag(version)
def to_dict(self): """Return this message as a dict suitable for json encoding.""" import copy options = copy.deepcopy(self._options) # JSON don't like datetimes. eta = options.get('task_args', {}).get('eta') if eta: options['task_args']['eta'] = time.mktime(eta.timetuple()) return options
def function[to_dict, parameter[self]]: constant[Return this message as a dict suitable for json encoding.] import module[copy] variable[options] assign[=] call[name[copy].deepcopy, parameter[name[self]._options]] variable[eta] assign[=] call[call[name[options].get, parameter[constant[task_args], dictionary[[], []]]].get, parameter[constant[eta]]] if name[eta] begin[:] call[call[name[options]][constant[task_args]]][constant[eta]] assign[=] call[name[time].mktime, parameter[call[name[eta].timetuple, parameter[]]]] return[name[options]]
keyword[def] identifier[to_dict] ( identifier[self] ): literal[string] keyword[import] identifier[copy] identifier[options] = identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[_options] ) identifier[eta] = identifier[options] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] ) keyword[if] identifier[eta] : identifier[options] [ literal[string] ][ literal[string] ]= identifier[time] . identifier[mktime] ( identifier[eta] . identifier[timetuple] ()) keyword[return] identifier[options]
def to_dict(self): """Return this message as a dict suitable for json encoding.""" import copy options = copy.deepcopy(self._options) # JSON don't like datetimes. eta = options.get('task_args', {}).get('eta') if eta: options['task_args']['eta'] = time.mktime(eta.timetuple()) # depends on [control=['if'], data=[]] return options
def make_room_alias(chain_id: ChainID, *suffixes: str) -> str: """Given a chain_id and any number of suffixes (global room names, pair of addresses), compose and return the canonical room name for raiden network network name from raiden_contracts.constants.ID_TO_NETWORKNAME is used for name, if available, else numeric id Params: chain_id: numeric blockchain id for that room, as raiden rooms are per-chain specific *suffixes: one or more suffixes for the name Returns: Qualified full room name. e.g.: make_room_alias(3, 'discovery') == 'raiden_ropsten_discovery' """ network_name = ID_TO_NETWORKNAME.get(chain_id, str(chain_id)) return ROOM_NAME_SEPARATOR.join([ROOM_NAME_PREFIX, network_name, *suffixes])
def function[make_room_alias, parameter[chain_id]]: constant[Given a chain_id and any number of suffixes (global room names, pair of addresses), compose and return the canonical room name for raiden network network name from raiden_contracts.constants.ID_TO_NETWORKNAME is used for name, if available, else numeric id Params: chain_id: numeric blockchain id for that room, as raiden rooms are per-chain specific *suffixes: one or more suffixes for the name Returns: Qualified full room name. e.g.: make_room_alias(3, 'discovery') == 'raiden_ropsten_discovery' ] variable[network_name] assign[=] call[name[ID_TO_NETWORKNAME].get, parameter[name[chain_id], call[name[str], parameter[name[chain_id]]]]] return[call[name[ROOM_NAME_SEPARATOR].join, parameter[list[[<ast.Name object at 0x7da1b175d450>, <ast.Name object at 0x7da1b175d420>, <ast.Starred object at 0x7da1b175d3f0>]]]]]
keyword[def] identifier[make_room_alias] ( identifier[chain_id] : identifier[ChainID] ,* identifier[suffixes] : identifier[str] )-> identifier[str] : literal[string] identifier[network_name] = identifier[ID_TO_NETWORKNAME] . identifier[get] ( identifier[chain_id] , identifier[str] ( identifier[chain_id] )) keyword[return] identifier[ROOM_NAME_SEPARATOR] . identifier[join] ([ identifier[ROOM_NAME_PREFIX] , identifier[network_name] ,* identifier[suffixes] ])
def make_room_alias(chain_id: ChainID, *suffixes: str) -> str: """Given a chain_id and any number of suffixes (global room names, pair of addresses), compose and return the canonical room name for raiden network network name from raiden_contracts.constants.ID_TO_NETWORKNAME is used for name, if available, else numeric id Params: chain_id: numeric blockchain id for that room, as raiden rooms are per-chain specific *suffixes: one or more suffixes for the name Returns: Qualified full room name. e.g.: make_room_alias(3, 'discovery') == 'raiden_ropsten_discovery' """ network_name = ID_TO_NETWORKNAME.get(chain_id, str(chain_id)) return ROOM_NAME_SEPARATOR.join([ROOM_NAME_PREFIX, network_name, *suffixes])
def list_vm_images_sub(access_token, subscription_id): '''List VM images in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of a list of VM images. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/images', '?api-version=', COMP_API]) return do_get_next(endpoint, access_token)
def function[list_vm_images_sub, parameter[access_token, subscription_id]]: constant[List VM images in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of a list of VM images. ] variable[endpoint] assign[=] call[constant[].join, parameter[list[[<ast.Call object at 0x7da1b05da1a0>, <ast.Constant object at 0x7da1b05dafb0>, <ast.Name object at 0x7da1b05db370>, <ast.Constant object at 0x7da1b04d8790>, <ast.Constant object at 0x7da1b04d8400>, <ast.Name object at 0x7da1b04db1c0>]]]] return[call[name[do_get_next], parameter[name[endpoint], name[access_token]]]]
keyword[def] identifier[list_vm_images_sub] ( identifier[access_token] , identifier[subscription_id] ): literal[string] identifier[endpoint] = literal[string] . identifier[join] ([ identifier[get_rm_endpoint] (), literal[string] , identifier[subscription_id] , literal[string] , literal[string] , identifier[COMP_API] ]) keyword[return] identifier[do_get_next] ( identifier[endpoint] , identifier[access_token] )
def list_vm_images_sub(access_token, subscription_id): """List VM images in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of a list of VM images. """ endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/images', '?api-version=', COMP_API]) return do_get_next(endpoint, access_token)
def _make_c_string(string): """Make a 'C' string.""" if isinstance(string, bytes): try: _utf_8_decode(string, None, True) return string + b"\x00" except UnicodeError: raise InvalidStringData("strings in documents must be valid " "UTF-8: %r" % string) else: return _utf_8_encode(string)[0] + b"\x00"
def function[_make_c_string, parameter[string]]: constant[Make a 'C' string.] if call[name[isinstance], parameter[name[string], name[bytes]]] begin[:] <ast.Try object at 0x7da20c6a9ff0>
keyword[def] identifier[_make_c_string] ( identifier[string] ): literal[string] keyword[if] identifier[isinstance] ( identifier[string] , identifier[bytes] ): keyword[try] : identifier[_utf_8_decode] ( identifier[string] , keyword[None] , keyword[True] ) keyword[return] identifier[string] + literal[string] keyword[except] identifier[UnicodeError] : keyword[raise] identifier[InvalidStringData] ( literal[string] literal[string] % identifier[string] ) keyword[else] : keyword[return] identifier[_utf_8_encode] ( identifier[string] )[ literal[int] ]+ literal[string]
def _make_c_string(string): """Make a 'C' string.""" if isinstance(string, bytes): try: _utf_8_decode(string, None, True) return string + b'\x00' # depends on [control=['try'], data=[]] except UnicodeError: raise InvalidStringData('strings in documents must be valid UTF-8: %r' % string) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: return _utf_8_encode(string)[0] + b'\x00'
def flush(self): """Delete all entries from the cache""" keys = self.client.smembers(self.keys_container) for key in keys: self.delete_entry(key)
def function[flush, parameter[self]]: constant[Delete all entries from the cache] variable[keys] assign[=] call[name[self].client.smembers, parameter[name[self].keys_container]] for taget[name[key]] in starred[name[keys]] begin[:] call[name[self].delete_entry, parameter[name[key]]]
keyword[def] identifier[flush] ( identifier[self] ): literal[string] identifier[keys] = identifier[self] . identifier[client] . identifier[smembers] ( identifier[self] . identifier[keys_container] ) keyword[for] identifier[key] keyword[in] identifier[keys] : identifier[self] . identifier[delete_entry] ( identifier[key] )
def flush(self): """Delete all entries from the cache""" keys = self.client.smembers(self.keys_container) for key in keys: self.delete_entry(key) # depends on [control=['for'], data=['key']]
def _create_sagemaker_pipeline_model(self, instance_type): """Create a SageMaker Model Entity Args: instance_type (str): The EC2 instance type that this Model will be used for, this is only used to determine if the image needs GPU support or not. accelerator_type (str): Type of Elastic Inference accelerator to attach to an endpoint for model loading and inference, for example, 'ml.eia1.medium'. If not specified, no Elastic Inference accelerator will be attached to the endpoint. """ if not self.sagemaker_session: self.sagemaker_session = Session() containers = self.pipeline_container_def(instance_type) self.name = self.name or name_from_image(containers[0]['Image']) self.sagemaker_session.create_model(self.name, self.role, containers, vpc_config=self.vpc_config)
def function[_create_sagemaker_pipeline_model, parameter[self, instance_type]]: constant[Create a SageMaker Model Entity Args: instance_type (str): The EC2 instance type that this Model will be used for, this is only used to determine if the image needs GPU support or not. accelerator_type (str): Type of Elastic Inference accelerator to attach to an endpoint for model loading and inference, for example, 'ml.eia1.medium'. If not specified, no Elastic Inference accelerator will be attached to the endpoint. ] if <ast.UnaryOp object at 0x7da1b1f0b9d0> begin[:] name[self].sagemaker_session assign[=] call[name[Session], parameter[]] variable[containers] assign[=] call[name[self].pipeline_container_def, parameter[name[instance_type]]] name[self].name assign[=] <ast.BoolOp object at 0x7da1b21c72e0> call[name[self].sagemaker_session.create_model, parameter[name[self].name, name[self].role, name[containers]]]
keyword[def] identifier[_create_sagemaker_pipeline_model] ( identifier[self] , identifier[instance_type] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[sagemaker_session] : identifier[self] . identifier[sagemaker_session] = identifier[Session] () identifier[containers] = identifier[self] . identifier[pipeline_container_def] ( identifier[instance_type] ) identifier[self] . identifier[name] = identifier[self] . identifier[name] keyword[or] identifier[name_from_image] ( identifier[containers] [ literal[int] ][ literal[string] ]) identifier[self] . identifier[sagemaker_session] . identifier[create_model] ( identifier[self] . identifier[name] , identifier[self] . identifier[role] , identifier[containers] , identifier[vpc_config] = identifier[self] . identifier[vpc_config] )
def _create_sagemaker_pipeline_model(self, instance_type): """Create a SageMaker Model Entity Args: instance_type (str): The EC2 instance type that this Model will be used for, this is only used to determine if the image needs GPU support or not. accelerator_type (str): Type of Elastic Inference accelerator to attach to an endpoint for model loading and inference, for example, 'ml.eia1.medium'. If not specified, no Elastic Inference accelerator will be attached to the endpoint. """ if not self.sagemaker_session: self.sagemaker_session = Session() # depends on [control=['if'], data=[]] containers = self.pipeline_container_def(instance_type) self.name = self.name or name_from_image(containers[0]['Image']) self.sagemaker_session.create_model(self.name, self.role, containers, vpc_config=self.vpc_config)
def _runtime(self, project, base): ''' first runtimes all project type files, like `project/type.vim`, then the specific project file, like `project/type/name.vim`. ''' def run(path_suf): path = '{}/{}'.format(base, path_suf) err = 'error sourcing {}.vim: {{}}'.format(path) return ( self.vim.runtime(path) .cata(L(err.format)(_) >> List, lambda a: List()) ) return ( project.all_types.flat_map(run) + (project.tpe.map(_ + '/' + project.name).map(run) | List()) + run('all/*') ).map(Error)
def function[_runtime, parameter[self, project, base]]: constant[ first runtimes all project type files, like `project/type.vim`, then the specific project file, like `project/type/name.vim`. ] def function[run, parameter[path_suf]]: variable[path] assign[=] call[constant[{}/{}].format, parameter[name[base], name[path_suf]]] variable[err] assign[=] call[constant[error sourcing {}.vim: {{}}].format, parameter[name[path]]] return[call[call[name[self].vim.runtime, parameter[name[path]]].cata, parameter[binary_operation[call[call[name[L], parameter[name[err].format]], parameter[name[_]]] <ast.RShift object at 0x7da2590d6a40> name[List]], <ast.Lambda object at 0x7da1b0a1d7b0>]]] return[call[binary_operation[binary_operation[call[name[project].all_types.flat_map, parameter[name[run]]] + binary_operation[call[call[name[project].tpe.map, parameter[binary_operation[binary_operation[name[_] + constant[/]] + name[project].name]]].map, parameter[name[run]]] <ast.BitOr object at 0x7da2590d6aa0> call[name[List], parameter[]]]] + call[name[run], parameter[constant[all/*]]]].map, parameter[name[Error]]]]
keyword[def] identifier[_runtime] ( identifier[self] , identifier[project] , identifier[base] ): literal[string] keyword[def] identifier[run] ( identifier[path_suf] ): identifier[path] = literal[string] . identifier[format] ( identifier[base] , identifier[path_suf] ) identifier[err] = literal[string] . identifier[format] ( identifier[path] ) keyword[return] ( identifier[self] . identifier[vim] . identifier[runtime] ( identifier[path] ) . identifier[cata] ( identifier[L] ( identifier[err] . identifier[format] )( identifier[_] )>> identifier[List] , keyword[lambda] identifier[a] : identifier[List] ()) ) keyword[return] ( identifier[project] . identifier[all_types] . identifier[flat_map] ( identifier[run] )+ ( identifier[project] . identifier[tpe] . identifier[map] ( identifier[_] + literal[string] + identifier[project] . identifier[name] ). identifier[map] ( identifier[run] )| identifier[List] ())+ identifier[run] ( literal[string] ) ). identifier[map] ( identifier[Error] )
def _runtime(self, project, base): """ first runtimes all project type files, like `project/type.vim`, then the specific project file, like `project/type/name.vim`. """ def run(path_suf): path = '{}/{}'.format(base, path_suf) err = 'error sourcing {}.vim: {{}}'.format(path) return self.vim.runtime(path).cata(L(err.format)(_) >> List, lambda a: List()) return (project.all_types.flat_map(run) + (project.tpe.map(_ + '/' + project.name).map(run) | List()) + run('all/*')).map(Error)
def from_df(cls, df): """Convert a Pandas DataFrame into a Table.""" t = cls() labels = df.columns for label in df.columns: t.append_column(label, df[label]) return t
def function[from_df, parameter[cls, df]]: constant[Convert a Pandas DataFrame into a Table.] variable[t] assign[=] call[name[cls], parameter[]] variable[labels] assign[=] name[df].columns for taget[name[label]] in starred[name[df].columns] begin[:] call[name[t].append_column, parameter[name[label], call[name[df]][name[label]]]] return[name[t]]
keyword[def] identifier[from_df] ( identifier[cls] , identifier[df] ): literal[string] identifier[t] = identifier[cls] () identifier[labels] = identifier[df] . identifier[columns] keyword[for] identifier[label] keyword[in] identifier[df] . identifier[columns] : identifier[t] . identifier[append_column] ( identifier[label] , identifier[df] [ identifier[label] ]) keyword[return] identifier[t]
def from_df(cls, df): """Convert a Pandas DataFrame into a Table.""" t = cls() labels = df.columns for label in df.columns: t.append_column(label, df[label]) # depends on [control=['for'], data=['label']] return t
def is_between(self, low, high): """Asserts that val is numeric and is between low and high.""" val_type = type(self.val) self._validate_between_args(val_type, low, high) if self.val < low or self.val > high: if val_type is datetime.datetime: self._err('Expected <%s> to be between <%s> and <%s>, but was not.' % (self.val.strftime('%Y-%m-%d %H:%M:%S'), low.strftime('%Y-%m-%d %H:%M:%S'), high.strftime('%Y-%m-%d %H:%M:%S'))) else: self._err('Expected <%s> to be between <%s> and <%s>, but was not.' % (self.val, low, high)) return self
def function[is_between, parameter[self, low, high]]: constant[Asserts that val is numeric and is between low and high.] variable[val_type] assign[=] call[name[type], parameter[name[self].val]] call[name[self]._validate_between_args, parameter[name[val_type], name[low], name[high]]] if <ast.BoolOp object at 0x7da1b016f430> begin[:] if compare[name[val_type] is name[datetime].datetime] begin[:] call[name[self]._err, parameter[binary_operation[constant[Expected <%s> to be between <%s> and <%s>, but was not.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b016e7a0>, <ast.Call object at 0x7da1b0122680>, <ast.Call object at 0x7da1b01224d0>]]]]] return[name[self]]
keyword[def] identifier[is_between] ( identifier[self] , identifier[low] , identifier[high] ): literal[string] identifier[val_type] = identifier[type] ( identifier[self] . identifier[val] ) identifier[self] . identifier[_validate_between_args] ( identifier[val_type] , identifier[low] , identifier[high] ) keyword[if] identifier[self] . identifier[val] < identifier[low] keyword[or] identifier[self] . identifier[val] > identifier[high] : keyword[if] identifier[val_type] keyword[is] identifier[datetime] . identifier[datetime] : identifier[self] . identifier[_err] ( literal[string] %( identifier[self] . identifier[val] . identifier[strftime] ( literal[string] ), identifier[low] . identifier[strftime] ( literal[string] ), identifier[high] . identifier[strftime] ( literal[string] ))) keyword[else] : identifier[self] . identifier[_err] ( literal[string] %( identifier[self] . identifier[val] , identifier[low] , identifier[high] )) keyword[return] identifier[self]
def is_between(self, low, high): """Asserts that val is numeric and is between low and high.""" val_type = type(self.val) self._validate_between_args(val_type, low, high) if self.val < low or self.val > high: if val_type is datetime.datetime: self._err('Expected <%s> to be between <%s> and <%s>, but was not.' % (self.val.strftime('%Y-%m-%d %H:%M:%S'), low.strftime('%Y-%m-%d %H:%M:%S'), high.strftime('%Y-%m-%d %H:%M:%S'))) # depends on [control=['if'], data=[]] else: self._err('Expected <%s> to be between <%s> and <%s>, but was not.' % (self.val, low, high)) # depends on [control=['if'], data=[]] return self
def get_chunk_size(N, n): """Given a dimension of size 'N', determine the number of rows or columns that can fit into memory. Parameters ---------- N : int The size of one of the dimension of a two-dimensional array. n : int The number of times an 'N' by 'chunks_size' array can fit in memory. Returns ------- chunks_size : int The size of a dimension orthogonal to the dimension of size 'N'. """ mem_free = memory()['free'] if mem_free > 60000000: chunks_size = int(((mem_free - 10000000) * 1000) / (4 * n * N)) return chunks_size elif mem_free > 40000000: chunks_size = int(((mem_free - 7000000) * 1000) / (4 * n * N)) return chunks_size elif mem_free > 14000000: chunks_size = int(((mem_free - 2000000) * 1000) / (4 * n * N)) return chunks_size elif mem_free > 8000000: chunks_size = int(((mem_free - 1400000) * 1000) / (4 * n * N)) return chunks_size elif mem_free > 2000000: chunks_size = int(((mem_free - 900000) * 1000) / (4 * n * N)) return chunks_size elif mem_free > 1000000: chunks_size = int(((mem_free - 400000) * 1000) / (4 * n * N)) return chunks_size else: raise MemoryError("\nERROR: DBSCAN_multiplex @ get_chunk_size:\n" "this machine does not have enough free memory " "to perform the remaining computations.\n")
def function[get_chunk_size, parameter[N, n]]: constant[Given a dimension of size 'N', determine the number of rows or columns that can fit into memory. Parameters ---------- N : int The size of one of the dimension of a two-dimensional array. n : int The number of times an 'N' by 'chunks_size' array can fit in memory. Returns ------- chunks_size : int The size of a dimension orthogonal to the dimension of size 'N'. ] variable[mem_free] assign[=] call[call[name[memory], parameter[]]][constant[free]] if compare[name[mem_free] greater[>] constant[60000000]] begin[:] variable[chunks_size] assign[=] call[name[int], parameter[binary_operation[binary_operation[binary_operation[name[mem_free] - constant[10000000]] * constant[1000]] / binary_operation[binary_operation[constant[4] * name[n]] * name[N]]]]] return[name[chunks_size]]
keyword[def] identifier[get_chunk_size] ( identifier[N] , identifier[n] ): literal[string] identifier[mem_free] = identifier[memory] ()[ literal[string] ] keyword[if] identifier[mem_free] > literal[int] : identifier[chunks_size] = identifier[int] ((( identifier[mem_free] - literal[int] )* literal[int] )/( literal[int] * identifier[n] * identifier[N] )) keyword[return] identifier[chunks_size] keyword[elif] identifier[mem_free] > literal[int] : identifier[chunks_size] = identifier[int] ((( identifier[mem_free] - literal[int] )* literal[int] )/( literal[int] * identifier[n] * identifier[N] )) keyword[return] identifier[chunks_size] keyword[elif] identifier[mem_free] > literal[int] : identifier[chunks_size] = identifier[int] ((( identifier[mem_free] - literal[int] )* literal[int] )/( literal[int] * identifier[n] * identifier[N] )) keyword[return] identifier[chunks_size] keyword[elif] identifier[mem_free] > literal[int] : identifier[chunks_size] = identifier[int] ((( identifier[mem_free] - literal[int] )* literal[int] )/( literal[int] * identifier[n] * identifier[N] )) keyword[return] identifier[chunks_size] keyword[elif] identifier[mem_free] > literal[int] : identifier[chunks_size] = identifier[int] ((( identifier[mem_free] - literal[int] )* literal[int] )/( literal[int] * identifier[n] * identifier[N] )) keyword[return] identifier[chunks_size] keyword[elif] identifier[mem_free] > literal[int] : identifier[chunks_size] = identifier[int] ((( identifier[mem_free] - literal[int] )* literal[int] )/( literal[int] * identifier[n] * identifier[N] )) keyword[return] identifier[chunks_size] keyword[else] : keyword[raise] identifier[MemoryError] ( literal[string] literal[string] literal[string] )
def get_chunk_size(N, n): """Given a dimension of size 'N', determine the number of rows or columns that can fit into memory. Parameters ---------- N : int The size of one of the dimension of a two-dimensional array. n : int The number of times an 'N' by 'chunks_size' array can fit in memory. Returns ------- chunks_size : int The size of a dimension orthogonal to the dimension of size 'N'. """ mem_free = memory()['free'] if mem_free > 60000000: chunks_size = int((mem_free - 10000000) * 1000 / (4 * n * N)) return chunks_size # depends on [control=['if'], data=['mem_free']] elif mem_free > 40000000: chunks_size = int((mem_free - 7000000) * 1000 / (4 * n * N)) return chunks_size # depends on [control=['if'], data=['mem_free']] elif mem_free > 14000000: chunks_size = int((mem_free - 2000000) * 1000 / (4 * n * N)) return chunks_size # depends on [control=['if'], data=['mem_free']] elif mem_free > 8000000: chunks_size = int((mem_free - 1400000) * 1000 / (4 * n * N)) return chunks_size # depends on [control=['if'], data=['mem_free']] elif mem_free > 2000000: chunks_size = int((mem_free - 900000) * 1000 / (4 * n * N)) return chunks_size # depends on [control=['if'], data=['mem_free']] elif mem_free > 1000000: chunks_size = int((mem_free - 400000) * 1000 / (4 * n * N)) return chunks_size # depends on [control=['if'], data=['mem_free']] else: raise MemoryError('\nERROR: DBSCAN_multiplex @ get_chunk_size:\nthis machine does not have enough free memory to perform the remaining computations.\n')
def update_config(new_config): """ Update config options with the provided dictionary of options. """ flask_app.base_config.update(new_config) # Check for changed working directory. if new_config.has_key('working_directory'): wd = os.path.abspath(new_config['working_directory']) if nbmanager.notebook_dir != wd: if not os.path.exists(wd): raise IOError('Path not found: %s' % wd) nbmanager.notebook_dir = wd
def function[update_config, parameter[new_config]]: constant[ Update config options with the provided dictionary of options. ] call[name[flask_app].base_config.update, parameter[name[new_config]]] if call[name[new_config].has_key, parameter[constant[working_directory]]] begin[:] variable[wd] assign[=] call[name[os].path.abspath, parameter[call[name[new_config]][constant[working_directory]]]] if compare[name[nbmanager].notebook_dir not_equal[!=] name[wd]] begin[:] if <ast.UnaryOp object at 0x7da18bcc9db0> begin[:] <ast.Raise object at 0x7da2047e85e0> name[nbmanager].notebook_dir assign[=] name[wd]
keyword[def] identifier[update_config] ( identifier[new_config] ): literal[string] identifier[flask_app] . identifier[base_config] . identifier[update] ( identifier[new_config] ) keyword[if] identifier[new_config] . identifier[has_key] ( literal[string] ): identifier[wd] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[new_config] [ literal[string] ]) keyword[if] identifier[nbmanager] . identifier[notebook_dir] != identifier[wd] : keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[wd] ): keyword[raise] identifier[IOError] ( literal[string] % identifier[wd] ) identifier[nbmanager] . identifier[notebook_dir] = identifier[wd]
def update_config(new_config): """ Update config options with the provided dictionary of options. """ flask_app.base_config.update(new_config) # Check for changed working directory. if new_config.has_key('working_directory'): wd = os.path.abspath(new_config['working_directory']) if nbmanager.notebook_dir != wd: if not os.path.exists(wd): raise IOError('Path not found: %s' % wd) # depends on [control=['if'], data=[]] nbmanager.notebook_dir = wd # depends on [control=['if'], data=['wd']] # depends on [control=['if'], data=[]]
def _set_dynamic_bypass(self, v, load=False): """ Setter method for dynamic_bypass, mapped from YANG variable /mpls_state/dynamic_bypass (container) If this variable is read-only (config: false) in the source YANG file, then _set_dynamic_bypass is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_dynamic_bypass() directly. YANG Description: MPLS dynamic bypass """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=dynamic_bypass.dynamic_bypass, is_container='container', presence=False, yang_name="dynamic-bypass", rest_name="dynamic-bypass", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-dynamic-bypass', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """dynamic_bypass must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=dynamic_bypass.dynamic_bypass, is_container='container', presence=False, yang_name="dynamic-bypass", rest_name="dynamic-bypass", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-dynamic-bypass', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""", }) self.__dynamic_bypass = t if hasattr(self, '_set'): self._set()
def function[_set_dynamic_bypass, parameter[self, v, load]]: constant[ Setter method for dynamic_bypass, mapped from YANG variable /mpls_state/dynamic_bypass (container) If this variable is read-only (config: false) in the source YANG file, then _set_dynamic_bypass is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_dynamic_bypass() directly. YANG Description: MPLS dynamic bypass ] if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:] variable[v] assign[=] call[name[v]._utype, parameter[name[v]]] <ast.Try object at 0x7da18f00d4b0> name[self].__dynamic_bypass assign[=] name[t] if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:] call[name[self]._set, parameter[]]
keyword[def] identifier[_set_dynamic_bypass] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ): literal[string] keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ): identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] ) keyword[try] : identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[dynamic_bypass] . identifier[dynamic_bypass] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[False] ) keyword[except] ( identifier[TypeError] , identifier[ValueError] ): keyword[raise] identifier[ValueError] ({ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , }) identifier[self] . identifier[__dynamic_bypass] = identifier[t] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[self] . identifier[_set] ()
def _set_dynamic_bypass(self, v, load=False): """ Setter method for dynamic_bypass, mapped from YANG variable /mpls_state/dynamic_bypass (container) If this variable is read-only (config: false) in the source YANG file, then _set_dynamic_bypass is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_dynamic_bypass() directly. YANG Description: MPLS dynamic bypass """ if hasattr(v, '_utype'): v = v._utype(v) # depends on [control=['if'], data=[]] try: t = YANGDynClass(v, base=dynamic_bypass.dynamic_bypass, is_container='container', presence=False, yang_name='dynamic-bypass', rest_name='dynamic-bypass', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-dynamic-bypass', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False) # depends on [control=['try'], data=[]] except (TypeError, ValueError): raise ValueError({'error-string': 'dynamic_bypass must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=dynamic_bypass.dynamic_bypass, is_container=\'container\', presence=False, yang_name="dynamic-bypass", rest_name="dynamic-bypass", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'callpoint\': u\'mpls-dynamic-bypass\', u\'cli-suppress-show-path\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-mpls-operational\', defining_module=\'brocade-mpls-operational\', yang_type=\'container\', is_config=False)'}) # depends on [control=['except'], data=[]] self.__dynamic_bypass = t if hasattr(self, '_set'): self._set() # depends on [control=['if'], data=[]]
def set_empty_for_all(self, row_column_list): """Keep all specified subplots completely empty. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None """ for row, column in row_column_list: self.set_empty(row, column)
def function[set_empty_for_all, parameter[self, row_column_list]]: constant[Keep all specified subplots completely empty. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None ] for taget[tuple[[<ast.Name object at 0x7da1b22984c0>, <ast.Name object at 0x7da1b2298640>]]] in starred[name[row_column_list]] begin[:] call[name[self].set_empty, parameter[name[row], name[column]]]
keyword[def] identifier[set_empty_for_all] ( identifier[self] , identifier[row_column_list] ): literal[string] keyword[for] identifier[row] , identifier[column] keyword[in] identifier[row_column_list] : identifier[self] . identifier[set_empty] ( identifier[row] , identifier[column] )
def set_empty_for_all(self, row_column_list): """Keep all specified subplots completely empty. :param row_column_list: a list containing (row, column) tuples to specify the subplots, or None to indicate *all* subplots. :type row_column_list: list or None """ for (row, column) in row_column_list: self.set_empty(row, column) # depends on [control=['for'], data=[]]
def movies_opening(self, **kwargs): """Gets the current opening movies from the API. Args: limit (optional): limits the number of movies returned, default=10 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('movies_opening') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
def function[movies_opening, parameter[self]]: constant[Gets the current opening movies from the API. Args: limit (optional): limits the number of movies returned, default=10 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. ] variable[path] assign[=] call[name[self]._get_path, parameter[constant[movies_opening]]] variable[response] assign[=] call[name[self]._GET, parameter[name[path], name[kwargs]]] call[name[self]._set_attrs_to_values, parameter[name[response]]] return[name[response]]
keyword[def] identifier[movies_opening] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[path] = identifier[self] . identifier[_get_path] ( literal[string] ) identifier[response] = identifier[self] . identifier[_GET] ( identifier[path] , identifier[kwargs] ) identifier[self] . identifier[_set_attrs_to_values] ( identifier[response] ) keyword[return] identifier[response]
def movies_opening(self, **kwargs): """Gets the current opening movies from the API. Args: limit (optional): limits the number of movies returned, default=10 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('movies_opening') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
def process(args): """Run the function in args.name given arguments in args.argfile. """ # Set environment to standard to use periods for decimals and avoid localization os.environ["LC_ALL"] = "C" os.environ["LC"] = "C" os.environ["LANG"] = "C" setpath.prepend_bcbiopath() try: fn = getattr(multitasks, args.name) except AttributeError: raise AttributeError("Did not find exposed function in bcbio.distributed.multitasks named '%s'" % args.name) if args.moreargs or args.raw: fnargs = [args.argfile] + args.moreargs work_dir = None argfile = None else: with open(args.argfile) as in_handle: fnargs = yaml.safe_load(in_handle) work_dir = os.path.dirname(args.argfile) fnargs = config_utils.merge_resources(fnargs) argfile = args.outfile if args.outfile else "%s-out%s" % os.path.splitext(args.argfile) if not work_dir: work_dir = os.getcwd() if len(fnargs) > 0 and fnargs[0] == "cwl": fnargs, parallel, out_keys, input_files = _world_from_cwl(args.name, fnargs[1:], work_dir) # Can remove this awkward Docker merge when we do not need custom GATK3 installs fnargs = config_utils.merge_resources(fnargs) argfile = os.path.join(work_dir, "cwl.output.json") else: parallel, out_keys, input_files = None, {}, [] with utils.chdir(work_dir): with contextlib.closing(log.setup_local_logging(parallel={"wrapper": "runfn"})): try: out = fn(*fnargs) except: logger.exception() raise finally: # Clean up any copied and unpacked workflow inputs, avoiding extra disk usage wf_input_dir = os.path.join(work_dir, "wf-inputs") if os.path.exists(wf_input_dir) and os.path.isdir(wf_input_dir): shutil.rmtree(wf_input_dir) if argfile: try: _write_out_argfile(argfile, out, fnargs, parallel, out_keys, input_files, work_dir) except: logger.exception() raise
def function[process, parameter[args]]: constant[Run the function in args.name given arguments in args.argfile. ] call[name[os].environ][constant[LC_ALL]] assign[=] constant[C] call[name[os].environ][constant[LC]] assign[=] constant[C] call[name[os].environ][constant[LANG]] assign[=] constant[C] call[name[setpath].prepend_bcbiopath, parameter[]] <ast.Try object at 0x7da18bcc8190> if <ast.BoolOp object at 0x7da18bcc9f00> begin[:] variable[fnargs] assign[=] binary_operation[list[[<ast.Attribute object at 0x7da18bcca830>]] + name[args].moreargs] variable[work_dir] assign[=] constant[None] variable[argfile] assign[=] constant[None] if <ast.UnaryOp object at 0x7da18bccae60> begin[:] variable[work_dir] assign[=] call[name[os].getcwd, parameter[]] if <ast.BoolOp object at 0x7da1b1711720> begin[:] <ast.Tuple object at 0x7da1b17127d0> assign[=] call[name[_world_from_cwl], parameter[name[args].name, call[name[fnargs]][<ast.Slice object at 0x7da1b1713940>], name[work_dir]]] variable[fnargs] assign[=] call[name[config_utils].merge_resources, parameter[name[fnargs]]] variable[argfile] assign[=] call[name[os].path.join, parameter[name[work_dir], constant[cwl.output.json]]] with call[name[utils].chdir, parameter[name[work_dir]]] begin[:] with call[name[contextlib].closing, parameter[call[name[log].setup_local_logging, parameter[]]]] begin[:] <ast.Try object at 0x7da18f09e9e0> if name[argfile] begin[:] <ast.Try object at 0x7da1b19b8be0>
keyword[def] identifier[process] ( identifier[args] ): literal[string] identifier[os] . identifier[environ] [ literal[string] ]= literal[string] identifier[os] . identifier[environ] [ literal[string] ]= literal[string] identifier[os] . identifier[environ] [ literal[string] ]= literal[string] identifier[setpath] . identifier[prepend_bcbiopath] () keyword[try] : identifier[fn] = identifier[getattr] ( identifier[multitasks] , identifier[args] . identifier[name] ) keyword[except] identifier[AttributeError] : keyword[raise] identifier[AttributeError] ( literal[string] % identifier[args] . identifier[name] ) keyword[if] identifier[args] . identifier[moreargs] keyword[or] identifier[args] . identifier[raw] : identifier[fnargs] =[ identifier[args] . identifier[argfile] ]+ identifier[args] . identifier[moreargs] identifier[work_dir] = keyword[None] identifier[argfile] = keyword[None] keyword[else] : keyword[with] identifier[open] ( identifier[args] . identifier[argfile] ) keyword[as] identifier[in_handle] : identifier[fnargs] = identifier[yaml] . identifier[safe_load] ( identifier[in_handle] ) identifier[work_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[args] . identifier[argfile] ) identifier[fnargs] = identifier[config_utils] . identifier[merge_resources] ( identifier[fnargs] ) identifier[argfile] = identifier[args] . identifier[outfile] keyword[if] identifier[args] . identifier[outfile] keyword[else] literal[string] % identifier[os] . identifier[path] . identifier[splitext] ( identifier[args] . identifier[argfile] ) keyword[if] keyword[not] identifier[work_dir] : identifier[work_dir] = identifier[os] . identifier[getcwd] () keyword[if] identifier[len] ( identifier[fnargs] )> literal[int] keyword[and] identifier[fnargs] [ literal[int] ]== literal[string] : identifier[fnargs] , identifier[parallel] , identifier[out_keys] , identifier[input_files] = identifier[_world_from_cwl] ( identifier[args] . identifier[name] , identifier[fnargs] [ literal[int] :], identifier[work_dir] ) identifier[fnargs] = identifier[config_utils] . identifier[merge_resources] ( identifier[fnargs] ) identifier[argfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , literal[string] ) keyword[else] : identifier[parallel] , identifier[out_keys] , identifier[input_files] = keyword[None] ,{},[] keyword[with] identifier[utils] . identifier[chdir] ( identifier[work_dir] ): keyword[with] identifier[contextlib] . identifier[closing] ( identifier[log] . identifier[setup_local_logging] ( identifier[parallel] ={ literal[string] : literal[string] })): keyword[try] : identifier[out] = identifier[fn] (* identifier[fnargs] ) keyword[except] : identifier[logger] . identifier[exception] () keyword[raise] keyword[finally] : identifier[wf_input_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , literal[string] ) keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[wf_input_dir] ) keyword[and] identifier[os] . identifier[path] . identifier[isdir] ( identifier[wf_input_dir] ): identifier[shutil] . identifier[rmtree] ( identifier[wf_input_dir] ) keyword[if] identifier[argfile] : keyword[try] : identifier[_write_out_argfile] ( identifier[argfile] , identifier[out] , identifier[fnargs] , identifier[parallel] , identifier[out_keys] , identifier[input_files] , identifier[work_dir] ) keyword[except] : identifier[logger] . identifier[exception] () keyword[raise]
def process(args): """Run the function in args.name given arguments in args.argfile. """ # Set environment to standard to use periods for decimals and avoid localization os.environ['LC_ALL'] = 'C' os.environ['LC'] = 'C' os.environ['LANG'] = 'C' setpath.prepend_bcbiopath() try: fn = getattr(multitasks, args.name) # depends on [control=['try'], data=[]] except AttributeError: raise AttributeError("Did not find exposed function in bcbio.distributed.multitasks named '%s'" % args.name) # depends on [control=['except'], data=[]] if args.moreargs or args.raw: fnargs = [args.argfile] + args.moreargs work_dir = None argfile = None # depends on [control=['if'], data=[]] else: with open(args.argfile) as in_handle: fnargs = yaml.safe_load(in_handle) # depends on [control=['with'], data=['in_handle']] work_dir = os.path.dirname(args.argfile) fnargs = config_utils.merge_resources(fnargs) argfile = args.outfile if args.outfile else '%s-out%s' % os.path.splitext(args.argfile) if not work_dir: work_dir = os.getcwd() # depends on [control=['if'], data=[]] if len(fnargs) > 0 and fnargs[0] == 'cwl': (fnargs, parallel, out_keys, input_files) = _world_from_cwl(args.name, fnargs[1:], work_dir) # Can remove this awkward Docker merge when we do not need custom GATK3 installs fnargs = config_utils.merge_resources(fnargs) argfile = os.path.join(work_dir, 'cwl.output.json') # depends on [control=['if'], data=[]] else: (parallel, out_keys, input_files) = (None, {}, []) with utils.chdir(work_dir): with contextlib.closing(log.setup_local_logging(parallel={'wrapper': 'runfn'})): try: out = fn(*fnargs) # depends on [control=['try'], data=[]] except: logger.exception() raise # depends on [control=['except'], data=[]] finally: # Clean up any copied and unpacked workflow inputs, avoiding extra disk usage wf_input_dir = os.path.join(work_dir, 'wf-inputs') if os.path.exists(wf_input_dir) and os.path.isdir(wf_input_dir): shutil.rmtree(wf_input_dir) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]] # depends on [control=['with'], data=[]] if argfile: try: _write_out_argfile(argfile, out, fnargs, parallel, out_keys, input_files, work_dir) # depends on [control=['try'], data=[]] except: logger.exception() raise # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
async def _readline(self, timeout: NumType = None): """ Wraps reader.readuntil() with error handling. """ if self._stream_reader is None or self._stream_writer is None: raise SMTPServerDisconnected("Client not connected") read_task = asyncio.Task( self._stream_reader.readuntil(separator=b"\n"), loop=self._loop ) try: line = await asyncio.wait_for( read_task, timeout, loop=self._loop ) # type: bytes except asyncio.LimitOverrunError: raise SMTPResponseException( SMTPStatus.unrecognized_command, "Line too long." ) except asyncio.TimeoutError as exc: raise SMTPTimeoutError(str(exc)) except asyncio.IncompleteReadError as exc: if exc.partial == b"": # if we got only an EOF, raise SMTPServerDisconnected raise SMTPServerDisconnected("Unexpected EOF received") else: # otherwise, close our connection but try to parse the # response anyways self._stream_writer.close() line = exc.partial return line
<ast.AsyncFunctionDef object at 0x7da2049621d0>
keyword[async] keyword[def] identifier[_readline] ( identifier[self] , identifier[timeout] : identifier[NumType] = keyword[None] ): literal[string] keyword[if] identifier[self] . identifier[_stream_reader] keyword[is] keyword[None] keyword[or] identifier[self] . identifier[_stream_writer] keyword[is] keyword[None] : keyword[raise] identifier[SMTPServerDisconnected] ( literal[string] ) identifier[read_task] = identifier[asyncio] . identifier[Task] ( identifier[self] . identifier[_stream_reader] . identifier[readuntil] ( identifier[separator] = literal[string] ), identifier[loop] = identifier[self] . identifier[_loop] ) keyword[try] : identifier[line] = keyword[await] identifier[asyncio] . identifier[wait_for] ( identifier[read_task] , identifier[timeout] , identifier[loop] = identifier[self] . identifier[_loop] ) keyword[except] identifier[asyncio] . identifier[LimitOverrunError] : keyword[raise] identifier[SMTPResponseException] ( identifier[SMTPStatus] . identifier[unrecognized_command] , literal[string] ) keyword[except] identifier[asyncio] . identifier[TimeoutError] keyword[as] identifier[exc] : keyword[raise] identifier[SMTPTimeoutError] ( identifier[str] ( identifier[exc] )) keyword[except] identifier[asyncio] . identifier[IncompleteReadError] keyword[as] identifier[exc] : keyword[if] identifier[exc] . identifier[partial] == literal[string] : keyword[raise] identifier[SMTPServerDisconnected] ( literal[string] ) keyword[else] : identifier[self] . identifier[_stream_writer] . identifier[close] () identifier[line] = identifier[exc] . identifier[partial] keyword[return] identifier[line]
async def _readline(self, timeout: NumType=None): """ Wraps reader.readuntil() with error handling. """ if self._stream_reader is None or self._stream_writer is None: raise SMTPServerDisconnected('Client not connected') # depends on [control=['if'], data=[]] read_task = asyncio.Task(self._stream_reader.readuntil(separator=b'\n'), loop=self._loop) try: line = await asyncio.wait_for(read_task, timeout, loop=self._loop) # type: bytes # depends on [control=['try'], data=[]] except asyncio.LimitOverrunError: raise SMTPResponseException(SMTPStatus.unrecognized_command, 'Line too long.') # depends on [control=['except'], data=[]] except asyncio.TimeoutError as exc: raise SMTPTimeoutError(str(exc)) # depends on [control=['except'], data=['exc']] except asyncio.IncompleteReadError as exc: if exc.partial == b'': # if we got only an EOF, raise SMTPServerDisconnected raise SMTPServerDisconnected('Unexpected EOF received') # depends on [control=['if'], data=[]] else: # otherwise, close our connection but try to parse the # response anyways self._stream_writer.close() line = exc.partial # depends on [control=['except'], data=['exc']] return line
def Lookup(self, x): """Looks up x and returns the corresponding value of y.""" return self._Bisect(x, self.xs, self.ys)
def function[Lookup, parameter[self, x]]: constant[Looks up x and returns the corresponding value of y.] return[call[name[self]._Bisect, parameter[name[x], name[self].xs, name[self].ys]]]
keyword[def] identifier[Lookup] ( identifier[self] , identifier[x] ): literal[string] keyword[return] identifier[self] . identifier[_Bisect] ( identifier[x] , identifier[self] . identifier[xs] , identifier[self] . identifier[ys] )
def Lookup(self, x): """Looks up x and returns the corresponding value of y.""" return self._Bisect(x, self.xs, self.ys)
def getUserContact(master, contact_types, uid): """ This is a simple getter function that returns a user attribute that matches the contact_types argument, or returns None if no uid/match is found. @param master: BuildMaster used to query the database @type master: BuildMaster instance @param contact_types: list of contact attributes to look for in in a given user, such as 'email' or 'nick' @type contact_types: list of strings @param uid: user that is searched for the contact_types match @type uid: integer @returns: string of contact information or None via deferred """ d = master.db.users.getUser(uid) d.addCallback(_extractContact, contact_types, uid) return d
def function[getUserContact, parameter[master, contact_types, uid]]: constant[ This is a simple getter function that returns a user attribute that matches the contact_types argument, or returns None if no uid/match is found. @param master: BuildMaster used to query the database @type master: BuildMaster instance @param contact_types: list of contact attributes to look for in in a given user, such as 'email' or 'nick' @type contact_types: list of strings @param uid: user that is searched for the contact_types match @type uid: integer @returns: string of contact information or None via deferred ] variable[d] assign[=] call[name[master].db.users.getUser, parameter[name[uid]]] call[name[d].addCallback, parameter[name[_extractContact], name[contact_types], name[uid]]] return[name[d]]
keyword[def] identifier[getUserContact] ( identifier[master] , identifier[contact_types] , identifier[uid] ): literal[string] identifier[d] = identifier[master] . identifier[db] . identifier[users] . identifier[getUser] ( identifier[uid] ) identifier[d] . identifier[addCallback] ( identifier[_extractContact] , identifier[contact_types] , identifier[uid] ) keyword[return] identifier[d]
def getUserContact(master, contact_types, uid): """ This is a simple getter function that returns a user attribute that matches the contact_types argument, or returns None if no uid/match is found. @param master: BuildMaster used to query the database @type master: BuildMaster instance @param contact_types: list of contact attributes to look for in in a given user, such as 'email' or 'nick' @type contact_types: list of strings @param uid: user that is searched for the contact_types match @type uid: integer @returns: string of contact information or None via deferred """ d = master.db.users.getUser(uid) d.addCallback(_extractContact, contact_types, uid) return d
def watt_m(simulated_array, observed_array, replace_nan=None, replace_inf=None, remove_neg=False, remove_zero=False): """Compute Watterson's M (M). .. image:: /pictures/M.png **Range:** -1 ≤ M < 1, does not indicate bias, larger is better. **Notes:** Parameters ---------- simulated_array: one dimensional ndarray An array of simulated data from the time series. observed_array: one dimensional ndarray An array of observed data from the time series. replace_nan: float, optional If given, indicates which value to replace NaN values with in the two arrays. If None, when a NaN value is found at the i-th position in the observed OR simulated array, the i-th value of the observed and simulated array are removed before the computation. replace_inf: float, optional If given, indicates which value to replace Inf values with in the two arrays. If None, when an inf value is found at the i-th position in the observed OR simulated array, the i-th value of the observed and simulated array are removed before the computation. remove_neg: boolean, optional If True, when a negative value is found at the i-th position in the observed OR simulated array, the i-th value of the observed AND simulated array are removed before the computation. remove_zero: boolean, optional If true, when a zero value is found at the i-th position in the observed OR simulated array, the i-th value of the observed AND simulated array are removed before the computation. Returns ------- float Watterson's M value. Examples -------- >>> import HydroErr as he >>> import numpy as np >>> sim = np.array([5, 7, 9, 2, 4.5, 6.7]) >>> obs = np.array([4.7, 6, 10, 2.5, 4, 7]) >>> he.watt_m(sim, obs) 0.8307913876595929 References ---------- - Watterson, I.G., 1996. Non‐dimensional measures of climate model performance. International Journal of Climatology 16(4) 379-391. """ # Treats data simulated_array, observed_array = treat_values( simulated_array, observed_array, replace_nan=replace_nan, replace_inf=replace_inf, remove_neg=remove_neg, remove_zero=remove_zero ) a = 2 / np.pi b = np.mean((simulated_array - observed_array) ** 2) # MSE c = np.std(observed_array, ddof=1) ** 2 + np.std(simulated_array, ddof=1) ** 2 e = (np.mean(simulated_array) - np.mean(observed_array)) ** 2 f = c + e return a * np.arcsin(1 - (b / f))
def function[watt_m, parameter[simulated_array, observed_array, replace_nan, replace_inf, remove_neg, remove_zero]]: constant[Compute Watterson's M (M). .. image:: /pictures/M.png **Range:** -1 ≤ M < 1, does not indicate bias, larger is better. **Notes:** Parameters ---------- simulated_array: one dimensional ndarray An array of simulated data from the time series. observed_array: one dimensional ndarray An array of observed data from the time series. replace_nan: float, optional If given, indicates which value to replace NaN values with in the two arrays. If None, when a NaN value is found at the i-th position in the observed OR simulated array, the i-th value of the observed and simulated array are removed before the computation. replace_inf: float, optional If given, indicates which value to replace Inf values with in the two arrays. If None, when an inf value is found at the i-th position in the observed OR simulated array, the i-th value of the observed and simulated array are removed before the computation. remove_neg: boolean, optional If True, when a negative value is found at the i-th position in the observed OR simulated array, the i-th value of the observed AND simulated array are removed before the computation. remove_zero: boolean, optional If true, when a zero value is found at the i-th position in the observed OR simulated array, the i-th value of the observed AND simulated array are removed before the computation. Returns ------- float Watterson's M value. Examples -------- >>> import HydroErr as he >>> import numpy as np >>> sim = np.array([5, 7, 9, 2, 4.5, 6.7]) >>> obs = np.array([4.7, 6, 10, 2.5, 4, 7]) >>> he.watt_m(sim, obs) 0.8307913876595929 References ---------- - Watterson, I.G., 1996. Non‐dimensional measures of climate model performance. International Journal of Climatology 16(4) 379-391. ] <ast.Tuple object at 0x7da1b0668310> assign[=] call[name[treat_values], parameter[name[simulated_array], name[observed_array]]] variable[a] assign[=] binary_operation[constant[2] / name[np].pi] variable[b] assign[=] call[name[np].mean, parameter[binary_operation[binary_operation[name[simulated_array] - name[observed_array]] ** constant[2]]]] variable[c] assign[=] binary_operation[binary_operation[call[name[np].std, parameter[name[observed_array]]] ** constant[2]] + binary_operation[call[name[np].std, parameter[name[simulated_array]]] ** constant[2]]] variable[e] assign[=] binary_operation[binary_operation[call[name[np].mean, parameter[name[simulated_array]]] - call[name[np].mean, parameter[name[observed_array]]]] ** constant[2]] variable[f] assign[=] binary_operation[name[c] + name[e]] return[binary_operation[name[a] * call[name[np].arcsin, parameter[binary_operation[constant[1] - binary_operation[name[b] / name[f]]]]]]]
keyword[def] identifier[watt_m] ( identifier[simulated_array] , identifier[observed_array] , identifier[replace_nan] = keyword[None] , identifier[replace_inf] = keyword[None] , identifier[remove_neg] = keyword[False] , identifier[remove_zero] = keyword[False] ): literal[string] identifier[simulated_array] , identifier[observed_array] = identifier[treat_values] ( identifier[simulated_array] , identifier[observed_array] , identifier[replace_nan] = identifier[replace_nan] , identifier[replace_inf] = identifier[replace_inf] , identifier[remove_neg] = identifier[remove_neg] , identifier[remove_zero] = identifier[remove_zero] ) identifier[a] = literal[int] / identifier[np] . identifier[pi] identifier[b] = identifier[np] . identifier[mean] (( identifier[simulated_array] - identifier[observed_array] )** literal[int] ) identifier[c] = identifier[np] . identifier[std] ( identifier[observed_array] , identifier[ddof] = literal[int] )** literal[int] + identifier[np] . identifier[std] ( identifier[simulated_array] , identifier[ddof] = literal[int] )** literal[int] identifier[e] =( identifier[np] . identifier[mean] ( identifier[simulated_array] )- identifier[np] . identifier[mean] ( identifier[observed_array] ))** literal[int] identifier[f] = identifier[c] + identifier[e] keyword[return] identifier[a] * identifier[np] . identifier[arcsin] ( literal[int] -( identifier[b] / identifier[f] ))
def watt_m(simulated_array, observed_array, replace_nan=None, replace_inf=None, remove_neg=False, remove_zero=False): """Compute Watterson's M (M). .. image:: /pictures/M.png **Range:** -1 ≤ M < 1, does not indicate bias, larger is better. **Notes:** Parameters ---------- simulated_array: one dimensional ndarray An array of simulated data from the time series. observed_array: one dimensional ndarray An array of observed data from the time series. replace_nan: float, optional If given, indicates which value to replace NaN values with in the two arrays. If None, when a NaN value is found at the i-th position in the observed OR simulated array, the i-th value of the observed and simulated array are removed before the computation. replace_inf: float, optional If given, indicates which value to replace Inf values with in the two arrays. If None, when an inf value is found at the i-th position in the observed OR simulated array, the i-th value of the observed and simulated array are removed before the computation. remove_neg: boolean, optional If True, when a negative value is found at the i-th position in the observed OR simulated array, the i-th value of the observed AND simulated array are removed before the computation. remove_zero: boolean, optional If true, when a zero value is found at the i-th position in the observed OR simulated array, the i-th value of the observed AND simulated array are removed before the computation. Returns ------- float Watterson's M value. Examples -------- >>> import HydroErr as he >>> import numpy as np >>> sim = np.array([5, 7, 9, 2, 4.5, 6.7]) >>> obs = np.array([4.7, 6, 10, 2.5, 4, 7]) >>> he.watt_m(sim, obs) 0.8307913876595929 References ---------- - Watterson, I.G., 1996. Non‐dimensional measures of climate model performance. International Journal of Climatology 16(4) 379-391. """ # Treats data (simulated_array, observed_array) = treat_values(simulated_array, observed_array, replace_nan=replace_nan, replace_inf=replace_inf, remove_neg=remove_neg, remove_zero=remove_zero) a = 2 / np.pi b = np.mean((simulated_array - observed_array) ** 2) # MSE c = np.std(observed_array, ddof=1) ** 2 + np.std(simulated_array, ddof=1) ** 2 e = (np.mean(simulated_array) - np.mean(observed_array)) ** 2 f = c + e return a * np.arcsin(1 - b / f)
def _access_token(self, request: Request=None, page_id: Text=''): """ Guess the access token for that specific request. """ if not page_id: msg = request.message # type: FacebookMessage page_id = msg.get_page_id() page = self.settings() if page['page_id'] == page_id: return page['page_token'] raise PlatformOperationError('Trying to get access token of the ' 'page "{}", which is not configured.' .format(page_id))
def function[_access_token, parameter[self, request, page_id]]: constant[ Guess the access token for that specific request. ] if <ast.UnaryOp object at 0x7da18dc07610> begin[:] variable[msg] assign[=] name[request].message variable[page_id] assign[=] call[name[msg].get_page_id, parameter[]] variable[page] assign[=] call[name[self].settings, parameter[]] if compare[call[name[page]][constant[page_id]] equal[==] name[page_id]] begin[:] return[call[name[page]][constant[page_token]]] <ast.Raise object at 0x7da18dc07df0>
keyword[def] identifier[_access_token] ( identifier[self] , identifier[request] : identifier[Request] = keyword[None] , identifier[page_id] : identifier[Text] = literal[string] ): literal[string] keyword[if] keyword[not] identifier[page_id] : identifier[msg] = identifier[request] . identifier[message] identifier[page_id] = identifier[msg] . identifier[get_page_id] () identifier[page] = identifier[self] . identifier[settings] () keyword[if] identifier[page] [ literal[string] ]== identifier[page_id] : keyword[return] identifier[page] [ literal[string] ] keyword[raise] identifier[PlatformOperationError] ( literal[string] literal[string] . identifier[format] ( identifier[page_id] ))
def _access_token(self, request: Request=None, page_id: Text=''): """ Guess the access token for that specific request. """ if not page_id: msg = request.message # type: FacebookMessage page_id = msg.get_page_id() # depends on [control=['if'], data=[]] page = self.settings() if page['page_id'] == page_id: return page['page_token'] # depends on [control=['if'], data=[]] raise PlatformOperationError('Trying to get access token of the page "{}", which is not configured.'.format(page_id))
def draw_rect(self, bbox, cell_val): """ Fills the bbox with the content values Float bbox values are normalized to have non-zero area """ new_x0 = int(bbox[x0]) new_y0 = int(bbox[y0]) new_x1 = max(new_x0 + 1, int(bbox[x1])) new_y1 = max(new_y0 + 1, int(bbox[y1])) self.grid[new_x0:new_x1, new_y0:new_y1] = cell_val
def function[draw_rect, parameter[self, bbox, cell_val]]: constant[ Fills the bbox with the content values Float bbox values are normalized to have non-zero area ] variable[new_x0] assign[=] call[name[int], parameter[call[name[bbox]][name[x0]]]] variable[new_y0] assign[=] call[name[int], parameter[call[name[bbox]][name[y0]]]] variable[new_x1] assign[=] call[name[max], parameter[binary_operation[name[new_x0] + constant[1]], call[name[int], parameter[call[name[bbox]][name[x1]]]]]] variable[new_y1] assign[=] call[name[max], parameter[binary_operation[name[new_y0] + constant[1]], call[name[int], parameter[call[name[bbox]][name[y1]]]]]] call[name[self].grid][tuple[[<ast.Slice object at 0x7da18f00ea40>, <ast.Slice object at 0x7da1b1233df0>]]] assign[=] name[cell_val]
keyword[def] identifier[draw_rect] ( identifier[self] , identifier[bbox] , identifier[cell_val] ): literal[string] identifier[new_x0] = identifier[int] ( identifier[bbox] [ identifier[x0] ]) identifier[new_y0] = identifier[int] ( identifier[bbox] [ identifier[y0] ]) identifier[new_x1] = identifier[max] ( identifier[new_x0] + literal[int] , identifier[int] ( identifier[bbox] [ identifier[x1] ])) identifier[new_y1] = identifier[max] ( identifier[new_y0] + literal[int] , identifier[int] ( identifier[bbox] [ identifier[y1] ])) identifier[self] . identifier[grid] [ identifier[new_x0] : identifier[new_x1] , identifier[new_y0] : identifier[new_y1] ]= identifier[cell_val]
def draw_rect(self, bbox, cell_val): """ Fills the bbox with the content values Float bbox values are normalized to have non-zero area """ new_x0 = int(bbox[x0]) new_y0 = int(bbox[y0]) new_x1 = max(new_x0 + 1, int(bbox[x1])) new_y1 = max(new_y0 + 1, int(bbox[y1])) self.grid[new_x0:new_x1, new_y0:new_y1] = cell_val
def set_sampling_interval(self, interval): """ This method sets the sampling interval for the Firmata loop method :param interval: time in milliseconds :returns: No return value """ task = asyncio.ensure_future(self.core.set_sampling_interval(interval)) self.loop.run_until_complete(task)
def function[set_sampling_interval, parameter[self, interval]]: constant[ This method sets the sampling interval for the Firmata loop method :param interval: time in milliseconds :returns: No return value ] variable[task] assign[=] call[name[asyncio].ensure_future, parameter[call[name[self].core.set_sampling_interval, parameter[name[interval]]]]] call[name[self].loop.run_until_complete, parameter[name[task]]]
keyword[def] identifier[set_sampling_interval] ( identifier[self] , identifier[interval] ): literal[string] identifier[task] = identifier[asyncio] . identifier[ensure_future] ( identifier[self] . identifier[core] . identifier[set_sampling_interval] ( identifier[interval] )) identifier[self] . identifier[loop] . identifier[run_until_complete] ( identifier[task] )
def set_sampling_interval(self, interval): """ This method sets the sampling interval for the Firmata loop method :param interval: time in milliseconds :returns: No return value """ task = asyncio.ensure_future(self.core.set_sampling_interval(interval)) self.loop.run_until_complete(task)
def handle_logging(self): """ To allow devs to log as early as possible, logging will already be handled here """ configure_logging(self.get_scrapy_options()) # Disable duplicates self.__scrapy_options["LOG_ENABLED"] = False # Now, after log-level is correctly set, lets log them. for msg in self.log_output: if msg["level"] is "error": self.log.error(msg["msg"]) elif msg["level"] is "info": self.log.info(msg["msg"]) elif msg["level"] is "debug": self.log.debug(msg["msg"])
def function[handle_logging, parameter[self]]: constant[ To allow devs to log as early as possible, logging will already be handled here ] call[name[configure_logging], parameter[call[name[self].get_scrapy_options, parameter[]]]] call[name[self].__scrapy_options][constant[LOG_ENABLED]] assign[=] constant[False] for taget[name[msg]] in starred[name[self].log_output] begin[:] if compare[call[name[msg]][constant[level]] is constant[error]] begin[:] call[name[self].log.error, parameter[call[name[msg]][constant[msg]]]]
keyword[def] identifier[handle_logging] ( identifier[self] ): literal[string] identifier[configure_logging] ( identifier[self] . identifier[get_scrapy_options] ()) identifier[self] . identifier[__scrapy_options] [ literal[string] ]= keyword[False] keyword[for] identifier[msg] keyword[in] identifier[self] . identifier[log_output] : keyword[if] identifier[msg] [ literal[string] ] keyword[is] literal[string] : identifier[self] . identifier[log] . identifier[error] ( identifier[msg] [ literal[string] ]) keyword[elif] identifier[msg] [ literal[string] ] keyword[is] literal[string] : identifier[self] . identifier[log] . identifier[info] ( identifier[msg] [ literal[string] ]) keyword[elif] identifier[msg] [ literal[string] ] keyword[is] literal[string] : identifier[self] . identifier[log] . identifier[debug] ( identifier[msg] [ literal[string] ])
def handle_logging(self): """ To allow devs to log as early as possible, logging will already be handled here """ configure_logging(self.get_scrapy_options()) # Disable duplicates self.__scrapy_options['LOG_ENABLED'] = False # Now, after log-level is correctly set, lets log them. for msg in self.log_output: if msg['level'] is 'error': self.log.error(msg['msg']) # depends on [control=['if'], data=[]] elif msg['level'] is 'info': self.log.info(msg['msg']) # depends on [control=['if'], data=[]] elif msg['level'] is 'debug': self.log.debug(msg['msg']) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['msg']]
def receives(self, idA, idB): """ Returns the dignities where A receives B. A receives B when (1) B aspects A and (2) B is in dignities of A. """ objA = self.chart.get(idA) objB = self.chart.get(idB) asp = aspects.isAspecting(objB, objA, const.MAJOR_ASPECTS) return self.inDignities(idB, idA) if asp else []
def function[receives, parameter[self, idA, idB]]: constant[ Returns the dignities where A receives B. A receives B when (1) B aspects A and (2) B is in dignities of A. ] variable[objA] assign[=] call[name[self].chart.get, parameter[name[idA]]] variable[objB] assign[=] call[name[self].chart.get, parameter[name[idB]]] variable[asp] assign[=] call[name[aspects].isAspecting, parameter[name[objB], name[objA], name[const].MAJOR_ASPECTS]] return[<ast.IfExp object at 0x7da1b11a1c60>]
keyword[def] identifier[receives] ( identifier[self] , identifier[idA] , identifier[idB] ): literal[string] identifier[objA] = identifier[self] . identifier[chart] . identifier[get] ( identifier[idA] ) identifier[objB] = identifier[self] . identifier[chart] . identifier[get] ( identifier[idB] ) identifier[asp] = identifier[aspects] . identifier[isAspecting] ( identifier[objB] , identifier[objA] , identifier[const] . identifier[MAJOR_ASPECTS] ) keyword[return] identifier[self] . identifier[inDignities] ( identifier[idB] , identifier[idA] ) keyword[if] identifier[asp] keyword[else] []
def receives(self, idA, idB): """ Returns the dignities where A receives B. A receives B when (1) B aspects A and (2) B is in dignities of A. """ objA = self.chart.get(idA) objB = self.chart.get(idB) asp = aspects.isAspecting(objB, objA, const.MAJOR_ASPECTS) return self.inDignities(idB, idA) if asp else []
def Expand(self): """Reads the contents of the current node and the full subtree. It then makes the subtree available until the next xmlTextReaderRead() call """ ret = libxml2mod.xmlTextReaderExpand(self._o) if ret is None:raise treeError('xmlTextReaderExpand() failed') __tmp = xmlNode(_obj=ret) return __tmp
def function[Expand, parameter[self]]: constant[Reads the contents of the current node and the full subtree. It then makes the subtree available until the next xmlTextReaderRead() call ] variable[ret] assign[=] call[name[libxml2mod].xmlTextReaderExpand, parameter[name[self]._o]] if compare[name[ret] is constant[None]] begin[:] <ast.Raise object at 0x7da1b1f8ddb0> variable[__tmp] assign[=] call[name[xmlNode], parameter[]] return[name[__tmp]]
keyword[def] identifier[Expand] ( identifier[self] ): literal[string] identifier[ret] = identifier[libxml2mod] . identifier[xmlTextReaderExpand] ( identifier[self] . identifier[_o] ) keyword[if] identifier[ret] keyword[is] keyword[None] : keyword[raise] identifier[treeError] ( literal[string] ) identifier[__tmp] = identifier[xmlNode] ( identifier[_obj] = identifier[ret] ) keyword[return] identifier[__tmp]
def Expand(self): """Reads the contents of the current node and the full subtree. It then makes the subtree available until the next xmlTextReaderRead() call """ ret = libxml2mod.xmlTextReaderExpand(self._o) if ret is None: raise treeError('xmlTextReaderExpand() failed') # depends on [control=['if'], data=[]] __tmp = xmlNode(_obj=ret) return __tmp
def events_for_secretreveal( transfers_pair: List[MediationPairState], secret: Secret, pseudo_random_generator: random.Random, ) -> List[Event]: """ Reveal the secret off-chain. The secret is revealed off-chain even if there is a pending transaction to reveal it on-chain, this allows the unlock to happen off-chain, which is faster. This node is named N, suppose there is a mediated transfer with two refund transfers, one from B and one from C: A-N-B...B-N-C..C-N-D Under normal operation N will first learn the secret from D, then reveal to C, wait for C to inform the secret is known before revealing it to B, and again wait for B before revealing the secret to A. If B somehow sent a reveal secret before C and D, then the secret will be revealed to A, but not C and D, meaning the secret won't be propagated forward. Even if D sent a reveal secret at about the same time, the secret will only be revealed to B upon confirmation from C. If the proof doesn't arrive in time and the lock's expiration is at risk, N won't lose tokens since it knows the secret can go on-chain at any time. """ events: List[Event] = list() for pair in reversed(transfers_pair): payee_knows_secret = pair.payee_state in STATE_SECRET_KNOWN payer_knows_secret = pair.payer_state in STATE_SECRET_KNOWN is_transfer_pending = pair.payer_state == 'payer_pending' should_send_secret = ( payee_knows_secret and not payer_knows_secret and is_transfer_pending ) if should_send_secret: message_identifier = message_identifier_from_prng(pseudo_random_generator) pair.payer_state = 'payer_secret_revealed' payer_transfer = pair.payer_transfer revealsecret = SendSecretReveal( recipient=payer_transfer.balance_proof.sender, channel_identifier=CHANNEL_IDENTIFIER_GLOBAL_QUEUE, message_identifier=message_identifier, secret=secret, ) events.append(revealsecret) return events
def function[events_for_secretreveal, parameter[transfers_pair, secret, pseudo_random_generator]]: constant[ Reveal the secret off-chain. The secret is revealed off-chain even if there is a pending transaction to reveal it on-chain, this allows the unlock to happen off-chain, which is faster. This node is named N, suppose there is a mediated transfer with two refund transfers, one from B and one from C: A-N-B...B-N-C..C-N-D Under normal operation N will first learn the secret from D, then reveal to C, wait for C to inform the secret is known before revealing it to B, and again wait for B before revealing the secret to A. If B somehow sent a reveal secret before C and D, then the secret will be revealed to A, but not C and D, meaning the secret won't be propagated forward. Even if D sent a reveal secret at about the same time, the secret will only be revealed to B upon confirmation from C. If the proof doesn't arrive in time and the lock's expiration is at risk, N won't lose tokens since it knows the secret can go on-chain at any time. ] <ast.AnnAssign object at 0x7da1b196f1c0> for taget[name[pair]] in starred[call[name[reversed], parameter[name[transfers_pair]]]] begin[:] variable[payee_knows_secret] assign[=] compare[name[pair].payee_state in name[STATE_SECRET_KNOWN]] variable[payer_knows_secret] assign[=] compare[name[pair].payer_state in name[STATE_SECRET_KNOWN]] variable[is_transfer_pending] assign[=] compare[name[pair].payer_state equal[==] constant[payer_pending]] variable[should_send_secret] assign[=] <ast.BoolOp object at 0x7da1b196c790> if name[should_send_secret] begin[:] variable[message_identifier] assign[=] call[name[message_identifier_from_prng], parameter[name[pseudo_random_generator]]] name[pair].payer_state assign[=] constant[payer_secret_revealed] variable[payer_transfer] assign[=] name[pair].payer_transfer variable[revealsecret] assign[=] call[name[SendSecretReveal], parameter[]] call[name[events].append, parameter[name[revealsecret]]] return[name[events]]
keyword[def] identifier[events_for_secretreveal] ( identifier[transfers_pair] : identifier[List] [ identifier[MediationPairState] ], identifier[secret] : identifier[Secret] , identifier[pseudo_random_generator] : identifier[random] . identifier[Random] , )-> identifier[List] [ identifier[Event] ]: literal[string] identifier[events] : identifier[List] [ identifier[Event] ]= identifier[list] () keyword[for] identifier[pair] keyword[in] identifier[reversed] ( identifier[transfers_pair] ): identifier[payee_knows_secret] = identifier[pair] . identifier[payee_state] keyword[in] identifier[STATE_SECRET_KNOWN] identifier[payer_knows_secret] = identifier[pair] . identifier[payer_state] keyword[in] identifier[STATE_SECRET_KNOWN] identifier[is_transfer_pending] = identifier[pair] . identifier[payer_state] == literal[string] identifier[should_send_secret] =( identifier[payee_knows_secret] keyword[and] keyword[not] identifier[payer_knows_secret] keyword[and] identifier[is_transfer_pending] ) keyword[if] identifier[should_send_secret] : identifier[message_identifier] = identifier[message_identifier_from_prng] ( identifier[pseudo_random_generator] ) identifier[pair] . identifier[payer_state] = literal[string] identifier[payer_transfer] = identifier[pair] . identifier[payer_transfer] identifier[revealsecret] = identifier[SendSecretReveal] ( identifier[recipient] = identifier[payer_transfer] . identifier[balance_proof] . identifier[sender] , identifier[channel_identifier] = identifier[CHANNEL_IDENTIFIER_GLOBAL_QUEUE] , identifier[message_identifier] = identifier[message_identifier] , identifier[secret] = identifier[secret] , ) identifier[events] . identifier[append] ( identifier[revealsecret] ) keyword[return] identifier[events]
def events_for_secretreveal(transfers_pair: List[MediationPairState], secret: Secret, pseudo_random_generator: random.Random) -> List[Event]: """ Reveal the secret off-chain. The secret is revealed off-chain even if there is a pending transaction to reveal it on-chain, this allows the unlock to happen off-chain, which is faster. This node is named N, suppose there is a mediated transfer with two refund transfers, one from B and one from C: A-N-B...B-N-C..C-N-D Under normal operation N will first learn the secret from D, then reveal to C, wait for C to inform the secret is known before revealing it to B, and again wait for B before revealing the secret to A. If B somehow sent a reveal secret before C and D, then the secret will be revealed to A, but not C and D, meaning the secret won't be propagated forward. Even if D sent a reveal secret at about the same time, the secret will only be revealed to B upon confirmation from C. If the proof doesn't arrive in time and the lock's expiration is at risk, N won't lose tokens since it knows the secret can go on-chain at any time. """ events: List[Event] = list() for pair in reversed(transfers_pair): payee_knows_secret = pair.payee_state in STATE_SECRET_KNOWN payer_knows_secret = pair.payer_state in STATE_SECRET_KNOWN is_transfer_pending = pair.payer_state == 'payer_pending' should_send_secret = payee_knows_secret and (not payer_knows_secret) and is_transfer_pending if should_send_secret: message_identifier = message_identifier_from_prng(pseudo_random_generator) pair.payer_state = 'payer_secret_revealed' payer_transfer = pair.payer_transfer revealsecret = SendSecretReveal(recipient=payer_transfer.balance_proof.sender, channel_identifier=CHANNEL_IDENTIFIER_GLOBAL_QUEUE, message_identifier=message_identifier, secret=secret) events.append(revealsecret) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pair']] return events
def expect_column_values_to_match_regex_list(self, column, regex_list, match_on="any", mostly=None, result_format=None, include_config=False, catch_exceptions=None, meta=None ): """Expect the column entries to be strings that can be matched to either any of or all of a list of regular expressions. Matches can be anywhere in the string. expect_column_values_to_match_regex_list is a :func:`column_map_expectation <great_expectations.data_asset.dataset.Dataset.column_map_expectation>`. Args: column (str): \ The column name. regex_list (list): \ The list of regular expressions which the column entries should match Keyword Args: match_on= (string): \ "any" or "all". Use "any" if the value should match at least one regular expression in the list. Use "all" if it should match each regular expression in the list. mostly (None or a float between 0 and 1): \ Return `"success": True` if at least mostly percent of values match the expectation. \ For more detail, see :ref:`mostly`. Other Parameters: result_format (str or None): \ Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`. include_config (boolean): \ If True, then include the expectation config as part of the result object. \ For more detail, see :ref:`include_config`. catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. \ For more detail, see :ref:`catch_exceptions`. meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \ For more detail, see :ref:`meta`. Returns: A JSON-serializable expectation result object. Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`. See Also: expect_column_values_to_match_regex expect_column_values_to_not_match_regex """ raise NotImplementedError
def function[expect_column_values_to_match_regex_list, parameter[self, column, regex_list, match_on, mostly, result_format, include_config, catch_exceptions, meta]]: constant[Expect the column entries to be strings that can be matched to either any of or all of a list of regular expressions. Matches can be anywhere in the string. expect_column_values_to_match_regex_list is a :func:`column_map_expectation <great_expectations.data_asset.dataset.Dataset.column_map_expectation>`. Args: column (str): The column name. regex_list (list): The list of regular expressions which the column entries should match Keyword Args: match_on= (string): "any" or "all". Use "any" if the value should match at least one regular expression in the list. Use "all" if it should match each regular expression in the list. mostly (None or a float between 0 and 1): Return `"success": True` if at least mostly percent of values match the expectation. For more detail, see :ref:`mostly`. Other Parameters: result_format (str or None): Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`. include_config (boolean): If True, then include the expectation config as part of the result object. For more detail, see :ref:`include_config`. catch_exceptions (boolean or None): If True, then catch exceptions and include them as part of the result object. For more detail, see :ref:`catch_exceptions`. meta (dict or None): A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. For more detail, see :ref:`meta`. Returns: A JSON-serializable expectation result object. Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`. See Also: expect_column_values_to_match_regex expect_column_values_to_not_match_regex ] <ast.Raise object at 0x7da1b17be0b0>
keyword[def] identifier[expect_column_values_to_match_regex_list] ( identifier[self] , identifier[column] , identifier[regex_list] , identifier[match_on] = literal[string] , identifier[mostly] = keyword[None] , identifier[result_format] = keyword[None] , identifier[include_config] = keyword[False] , identifier[catch_exceptions] = keyword[None] , identifier[meta] = keyword[None] ): literal[string] keyword[raise] identifier[NotImplementedError]
def expect_column_values_to_match_regex_list(self, column, regex_list, match_on='any', mostly=None, result_format=None, include_config=False, catch_exceptions=None, meta=None): """Expect the column entries to be strings that can be matched to either any of or all of a list of regular expressions. Matches can be anywhere in the string. expect_column_values_to_match_regex_list is a :func:`column_map_expectation <great_expectations.data_asset.dataset.Dataset.column_map_expectation>`. Args: column (str): The column name. regex_list (list): The list of regular expressions which the column entries should match Keyword Args: match_on= (string): "any" or "all". Use "any" if the value should match at least one regular expression in the list. Use "all" if it should match each regular expression in the list. mostly (None or a float between 0 and 1): Return `"success": True` if at least mostly percent of values match the expectation. For more detail, see :ref:`mostly`. Other Parameters: result_format (str or None): Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`. include_config (boolean): If True, then include the expectation config as part of the result object. For more detail, see :ref:`include_config`. catch_exceptions (boolean or None): If True, then catch exceptions and include them as part of the result object. For more detail, see :ref:`catch_exceptions`. meta (dict or None): A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. For more detail, see :ref:`meta`. Returns: A JSON-serializable expectation result object. Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`. See Also: expect_column_values_to_match_regex expect_column_values_to_not_match_regex """ raise NotImplementedError
def assert_ordered(iterable, key=lambda x: x, comp=operator.le): """ Assert that for all items in the iterable, they're in order based on comp >>> list(assert_ordered(range(5))) [0, 1, 2, 3, 4] >>> list(assert_ordered(range(5), comp=operator.ge)) Traceback (most recent call last): ... AssertionError: 0 < 1 >>> list(assert_ordered(range(5, 0, -1), key=operator.neg)) [5, 4, 3, 2, 1] """ err_tmpl = ( "{pair[0]} > {pair[1]}" if comp is operator.le else "{pair[0]} < {pair[1]}" if comp is operator.ge else "not {comp} {pair}" ) for pair in more_itertools.pairwise(iterable): keyed = tuple(map(key, pair)) assert comp(*keyed), err_tmpl.format(**locals()) yield pair[0] yield pair[1]
def function[assert_ordered, parameter[iterable, key, comp]]: constant[ Assert that for all items in the iterable, they're in order based on comp >>> list(assert_ordered(range(5))) [0, 1, 2, 3, 4] >>> list(assert_ordered(range(5), comp=operator.ge)) Traceback (most recent call last): ... AssertionError: 0 < 1 >>> list(assert_ordered(range(5, 0, -1), key=operator.neg)) [5, 4, 3, 2, 1] ] variable[err_tmpl] assign[=] <ast.IfExp object at 0x7da1b03ba290> for taget[name[pair]] in starred[call[name[more_itertools].pairwise, parameter[name[iterable]]]] begin[:] variable[keyed] assign[=] call[name[tuple], parameter[call[name[map], parameter[name[key], name[pair]]]]] assert[call[name[comp], parameter[<ast.Starred object at 0x7da1b03bb340>]]] <ast.Yield object at 0x7da1b03b9b10> <ast.Yield object at 0x7da1b03b9750>
keyword[def] identifier[assert_ordered] ( identifier[iterable] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] , identifier[comp] = identifier[operator] . identifier[le] ): literal[string] identifier[err_tmpl] =( literal[string] keyword[if] identifier[comp] keyword[is] identifier[operator] . identifier[le] keyword[else] literal[string] keyword[if] identifier[comp] keyword[is] identifier[operator] . identifier[ge] keyword[else] literal[string] ) keyword[for] identifier[pair] keyword[in] identifier[more_itertools] . identifier[pairwise] ( identifier[iterable] ): identifier[keyed] = identifier[tuple] ( identifier[map] ( identifier[key] , identifier[pair] )) keyword[assert] identifier[comp] (* identifier[keyed] ), identifier[err_tmpl] . identifier[format] (** identifier[locals] ()) keyword[yield] identifier[pair] [ literal[int] ] keyword[yield] identifier[pair] [ literal[int] ]
def assert_ordered(iterable, key=lambda x: x, comp=operator.le): """ Assert that for all items in the iterable, they're in order based on comp >>> list(assert_ordered(range(5))) [0, 1, 2, 3, 4] >>> list(assert_ordered(range(5), comp=operator.ge)) Traceback (most recent call last): ... AssertionError: 0 < 1 >>> list(assert_ordered(range(5, 0, -1), key=operator.neg)) [5, 4, 3, 2, 1] """ err_tmpl = '{pair[0]} > {pair[1]}' if comp is operator.le else '{pair[0]} < {pair[1]}' if comp is operator.ge else 'not {comp} {pair}' for pair in more_itertools.pairwise(iterable): keyed = tuple(map(key, pair)) assert comp(*keyed), err_tmpl.format(**locals()) yield pair[0] # depends on [control=['for'], data=['pair']] yield pair[1]
async def _send_text(self, request: Request, stack: Stack): """ Send text layers to the user. Each layer will go in its own bubble. Also, Facebook limits messages to 320 chars, so if any message is longer than that it will be split into as many messages as needed to be accepted by Facebook. """ parts = [] for layer in stack.layers: if isinstance(layer, lyr.MultiText): lines = await render(layer.text, request, multi_line=True) for line in lines: for part in wrap(line, 320): parts.append(part) elif isinstance(layer, (lyr.Text, lyr.RawText)): text = await render(layer.text, request) for part in wrap(text, 320): parts.append(part) for part in parts[:-1]: await self._send(request, { 'text': part, }, stack) part = parts[-1] msg = { 'text': part, } await self._add_qr(stack, msg, request) await self._send(request, msg, stack)
<ast.AsyncFunctionDef object at 0x7da20c6c70d0>
keyword[async] keyword[def] identifier[_send_text] ( identifier[self] , identifier[request] : identifier[Request] , identifier[stack] : identifier[Stack] ): literal[string] identifier[parts] =[] keyword[for] identifier[layer] keyword[in] identifier[stack] . identifier[layers] : keyword[if] identifier[isinstance] ( identifier[layer] , identifier[lyr] . identifier[MultiText] ): identifier[lines] = keyword[await] identifier[render] ( identifier[layer] . identifier[text] , identifier[request] , identifier[multi_line] = keyword[True] ) keyword[for] identifier[line] keyword[in] identifier[lines] : keyword[for] identifier[part] keyword[in] identifier[wrap] ( identifier[line] , literal[int] ): identifier[parts] . identifier[append] ( identifier[part] ) keyword[elif] identifier[isinstance] ( identifier[layer] ,( identifier[lyr] . identifier[Text] , identifier[lyr] . identifier[RawText] )): identifier[text] = keyword[await] identifier[render] ( identifier[layer] . identifier[text] , identifier[request] ) keyword[for] identifier[part] keyword[in] identifier[wrap] ( identifier[text] , literal[int] ): identifier[parts] . identifier[append] ( identifier[part] ) keyword[for] identifier[part] keyword[in] identifier[parts] [:- literal[int] ]: keyword[await] identifier[self] . identifier[_send] ( identifier[request] ,{ literal[string] : identifier[part] , }, identifier[stack] ) identifier[part] = identifier[parts] [- literal[int] ] identifier[msg] ={ literal[string] : identifier[part] , } keyword[await] identifier[self] . identifier[_add_qr] ( identifier[stack] , identifier[msg] , identifier[request] ) keyword[await] identifier[self] . identifier[_send] ( identifier[request] , identifier[msg] , identifier[stack] )
async def _send_text(self, request: Request, stack: Stack): """ Send text layers to the user. Each layer will go in its own bubble. Also, Facebook limits messages to 320 chars, so if any message is longer than that it will be split into as many messages as needed to be accepted by Facebook. """ parts = [] for layer in stack.layers: if isinstance(layer, lyr.MultiText): lines = await render(layer.text, request, multi_line=True) for line in lines: for part in wrap(line, 320): parts.append(part) # depends on [control=['for'], data=['part']] # depends on [control=['for'], data=['line']] # depends on [control=['if'], data=[]] elif isinstance(layer, (lyr.Text, lyr.RawText)): text = await render(layer.text, request) for part in wrap(text, 320): parts.append(part) # depends on [control=['for'], data=['part']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['layer']] for part in parts[:-1]: await self._send(request, {'text': part}, stack) # depends on [control=['for'], data=['part']] part = parts[-1] msg = {'text': part} await self._add_qr(stack, msg, request) await self._send(request, msg, stack)
def getRaw(self, instance, **kwargs): """Returns raw field value (possible wrapped in BaseUnit) """ value = ObjectField.get(self, instance, **kwargs) # getattr(instance, "Remarks") returns a BaseUnit if callable(value): value = value() return value
def function[getRaw, parameter[self, instance]]: constant[Returns raw field value (possible wrapped in BaseUnit) ] variable[value] assign[=] call[name[ObjectField].get, parameter[name[self], name[instance]]] if call[name[callable], parameter[name[value]]] begin[:] variable[value] assign[=] call[name[value], parameter[]] return[name[value]]
keyword[def] identifier[getRaw] ( identifier[self] , identifier[instance] ,** identifier[kwargs] ): literal[string] identifier[value] = identifier[ObjectField] . identifier[get] ( identifier[self] , identifier[instance] ,** identifier[kwargs] ) keyword[if] identifier[callable] ( identifier[value] ): identifier[value] = identifier[value] () keyword[return] identifier[value]
def getRaw(self, instance, **kwargs): """Returns raw field value (possible wrapped in BaseUnit) """ value = ObjectField.get(self, instance, **kwargs) # getattr(instance, "Remarks") returns a BaseUnit if callable(value): value = value() # depends on [control=['if'], data=[]] return value
def geigh(H,S): "Solve the generalized eigensystem Hc = ESc" A = cholorth(S) E,U = np.linalg.eigh(simx(H,A)) return E,np.dot(A,U)
def function[geigh, parameter[H, S]]: constant[Solve the generalized eigensystem Hc = ESc] variable[A] assign[=] call[name[cholorth], parameter[name[S]]] <ast.Tuple object at 0x7da20c6e5030> assign[=] call[name[np].linalg.eigh, parameter[call[name[simx], parameter[name[H], name[A]]]]] return[tuple[[<ast.Name object at 0x7da20c6e4fa0>, <ast.Call object at 0x7da20c6e7c40>]]]
keyword[def] identifier[geigh] ( identifier[H] , identifier[S] ): literal[string] identifier[A] = identifier[cholorth] ( identifier[S] ) identifier[E] , identifier[U] = identifier[np] . identifier[linalg] . identifier[eigh] ( identifier[simx] ( identifier[H] , identifier[A] )) keyword[return] identifier[E] , identifier[np] . identifier[dot] ( identifier[A] , identifier[U] )
def geigh(H, S): """Solve the generalized eigensystem Hc = ESc""" A = cholorth(S) (E, U) = np.linalg.eigh(simx(H, A)) return (E, np.dot(A, U))
def git_commit(repo_dir, message=None, amend=False, stage=True): """Commit any changes, optionally staging all changes beforehand.""" if stage: git_add_all(repo_dir) command = ['git', 'commit', '--allow-empty'] if amend: command.append('--amend') if not message: command.append('--no-edit') if message: command.extend(['--message', pipes.quote(message)]) elif not amend: # if not amending and no message, allow an empty message command.extend(['--message=', '--allow-empty-message']) return execute_git_command(command, repo_dir=repo_dir)
def function[git_commit, parameter[repo_dir, message, amend, stage]]: constant[Commit any changes, optionally staging all changes beforehand.] if name[stage] begin[:] call[name[git_add_all], parameter[name[repo_dir]]] variable[command] assign[=] list[[<ast.Constant object at 0x7da1b094bcd0>, <ast.Constant object at 0x7da1b094b250>, <ast.Constant object at 0x7da1b0949f30>]] if name[amend] begin[:] call[name[command].append, parameter[constant[--amend]]] if <ast.UnaryOp object at 0x7da1b094ac80> begin[:] call[name[command].append, parameter[constant[--no-edit]]] if name[message] begin[:] call[name[command].extend, parameter[list[[<ast.Constant object at 0x7da1b09ea5c0>, <ast.Call object at 0x7da1b09ea7d0>]]]] return[call[name[execute_git_command], parameter[name[command]]]]
keyword[def] identifier[git_commit] ( identifier[repo_dir] , identifier[message] = keyword[None] , identifier[amend] = keyword[False] , identifier[stage] = keyword[True] ): literal[string] keyword[if] identifier[stage] : identifier[git_add_all] ( identifier[repo_dir] ) identifier[command] =[ literal[string] , literal[string] , literal[string] ] keyword[if] identifier[amend] : identifier[command] . identifier[append] ( literal[string] ) keyword[if] keyword[not] identifier[message] : identifier[command] . identifier[append] ( literal[string] ) keyword[if] identifier[message] : identifier[command] . identifier[extend] ([ literal[string] , identifier[pipes] . identifier[quote] ( identifier[message] )]) keyword[elif] keyword[not] identifier[amend] : identifier[command] . identifier[extend] ([ literal[string] , literal[string] ]) keyword[return] identifier[execute_git_command] ( identifier[command] , identifier[repo_dir] = identifier[repo_dir] )
def git_commit(repo_dir, message=None, amend=False, stage=True): """Commit any changes, optionally staging all changes beforehand.""" if stage: git_add_all(repo_dir) # depends on [control=['if'], data=[]] command = ['git', 'commit', '--allow-empty'] if amend: command.append('--amend') if not message: command.append('--no-edit') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if message: command.extend(['--message', pipes.quote(message)]) # depends on [control=['if'], data=[]] elif not amend: # if not amending and no message, allow an empty message command.extend(['--message=', '--allow-empty-message']) # depends on [control=['if'], data=[]] return execute_git_command(command, repo_dir=repo_dir)
def apply(self, macro): """ Show what a macro would do Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/macros#show-changes-to-ticket>`__. :param macro: Macro object or id. """ return self._query_zendesk(self.endpoint.apply, 'result', id=macro)
def function[apply, parameter[self, macro]]: constant[ Show what a macro would do Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/macros#show-changes-to-ticket>`__. :param macro: Macro object or id. ] return[call[name[self]._query_zendesk, parameter[name[self].endpoint.apply, constant[result]]]]
keyword[def] identifier[apply] ( identifier[self] , identifier[macro] ): literal[string] keyword[return] identifier[self] . identifier[_query_zendesk] ( identifier[self] . identifier[endpoint] . identifier[apply] , literal[string] , identifier[id] = identifier[macro] )
def apply(self, macro): """ Show what a macro would do Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/macros#show-changes-to-ticket>`__. :param macro: Macro object or id. """ return self._query_zendesk(self.endpoint.apply, 'result', id=macro)
def rsdl_sn(self, U): """Compute dual residual normalisation term. Overriding this method is required if methods :meth:`cnst_A`, :meth:`cnst_AT`, :meth:`cnst_B`, and :meth:`cnst_c` are not overridden. """ return self.rho * np.linalg.norm(self.cnst_AT(U))
def function[rsdl_sn, parameter[self, U]]: constant[Compute dual residual normalisation term. Overriding this method is required if methods :meth:`cnst_A`, :meth:`cnst_AT`, :meth:`cnst_B`, and :meth:`cnst_c` are not overridden. ] return[binary_operation[name[self].rho * call[name[np].linalg.norm, parameter[call[name[self].cnst_AT, parameter[name[U]]]]]]]
keyword[def] identifier[rsdl_sn] ( identifier[self] , identifier[U] ): literal[string] keyword[return] identifier[self] . identifier[rho] * identifier[np] . identifier[linalg] . identifier[norm] ( identifier[self] . identifier[cnst_AT] ( identifier[U] ))
def rsdl_sn(self, U): """Compute dual residual normalisation term. Overriding this method is required if methods :meth:`cnst_A`, :meth:`cnst_AT`, :meth:`cnst_B`, and :meth:`cnst_c` are not overridden. """ return self.rho * np.linalg.norm(self.cnst_AT(U))
def _set_network(self, v, load=False): """ Setter method for network, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/network (list) If this variable is read-only (config: false) in the source YANG file, then _set_network is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_network() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("network_ipv4_address",network.network, yang_name="network", rest_name="network", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='network-ipv4-address', extensions={u'tailf-common': {u'info': u'Specify a network to announce via BGP', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'AfIpv4Network'}}), is_container='list', yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify a network to announce via BGP', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'AfIpv4Network'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """network must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("network_ipv4_address",network.network, yang_name="network", rest_name="network", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='network-ipv4-address', extensions={u'tailf-common': {u'info': u'Specify a network to announce via BGP', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'AfIpv4Network'}}), is_container='list', yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify a network to announce via BGP', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'AfIpv4Network'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='list', is_config=True)""", }) self.__network = t if hasattr(self, '_set'): self._set()
def function[_set_network, parameter[self, v, load]]: constant[ Setter method for network, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/network (list) If this variable is read-only (config: false) in the source YANG file, then _set_network is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_network() directly. ] if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:] variable[v] assign[=] call[name[v]._utype, parameter[name[v]]] <ast.Try object at 0x7da1b26ac3a0> name[self].__network assign[=] name[t] if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:] call[name[self]._set, parameter[]]
keyword[def] identifier[_set_network] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ): literal[string] keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ): identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] ) keyword[try] : identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[YANGListType] ( literal[string] , identifier[network] . identifier[network] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[is_container] = literal[string] , identifier[user_ordered] = keyword[False] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[yang_keys] = literal[string] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : literal[string] }}), identifier[is_container] = literal[string] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] ) keyword[except] ( identifier[TypeError] , identifier[ValueError] ): keyword[raise] identifier[ValueError] ({ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , }) identifier[self] . identifier[__network] = identifier[t] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[self] . identifier[_set] ()
def _set_network(self, v, load=False): """ Setter method for network, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/network (list) If this variable is read-only (config: false) in the source YANG file, then _set_network is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_network() directly. """ if hasattr(v, '_utype'): v = v._utype(v) # depends on [control=['if'], data=[]] try: t = YANGDynClass(v, base=YANGListType('network_ipv4_address', network.network, yang_name='network', rest_name='network', parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='network-ipv4-address', extensions={u'tailf-common': {u'info': u'Specify a network to announce via BGP', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'AfIpv4Network'}}), is_container='list', yang_name='network', rest_name='network', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Specify a network to announce via BGP', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'AfIpv4Network'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='list', is_config=True) # depends on [control=['try'], data=[]] except (TypeError, ValueError): raise ValueError({'error-string': 'network must be of a type compatible with list', 'defined-type': 'list', 'generated-type': 'YANGDynClass(base=YANGListType("network_ipv4_address",network.network, yang_name="network", rest_name="network", parent=self, is_container=\'list\', user_ordered=False, path_helper=self._path_helper, yang_keys=\'network-ipv4-address\', extensions={u\'tailf-common\': {u\'info\': u\'Specify a network to announce via BGP\', u\'cli-no-key-completion\': None, u\'cli-suppress-mode\': None, u\'cli-suppress-list-no\': None, u\'cli-suppress-key-abbreviation\': None, u\'callpoint\': u\'AfIpv4Network\'}}), is_container=\'list\', yang_name="network", rest_name="network", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Specify a network to announce via BGP\', u\'cli-no-key-completion\': None, u\'cli-suppress-mode\': None, u\'cli-suppress-list-no\': None, u\'cli-suppress-key-abbreviation\': None, u\'callpoint\': u\'AfIpv4Network\'}}, namespace=\'urn:brocade.com:mgmt:brocade-bgp\', defining_module=\'brocade-bgp\', yang_type=\'list\', is_config=True)'}) # depends on [control=['except'], data=[]] self.__network = t if hasattr(self, '_set'): self._set() # depends on [control=['if'], data=[]]
def eval_str_to_list(input_str: str) -> List[str]: """Turn str into str or tuple.""" inner_cast = ast.literal_eval(input_str) # type: List[str] if isinstance(inner_cast, list): return inner_cast else: raise ValueError
def function[eval_str_to_list, parameter[input_str]]: constant[Turn str into str or tuple.] variable[inner_cast] assign[=] call[name[ast].literal_eval, parameter[name[input_str]]] if call[name[isinstance], parameter[name[inner_cast], name[list]]] begin[:] return[name[inner_cast]]
keyword[def] identifier[eval_str_to_list] ( identifier[input_str] : identifier[str] )-> identifier[List] [ identifier[str] ]: literal[string] identifier[inner_cast] = identifier[ast] . identifier[literal_eval] ( identifier[input_str] ) keyword[if] identifier[isinstance] ( identifier[inner_cast] , identifier[list] ): keyword[return] identifier[inner_cast] keyword[else] : keyword[raise] identifier[ValueError]
def eval_str_to_list(input_str: str) -> List[str]: """Turn str into str or tuple.""" inner_cast = ast.literal_eval(input_str) # type: List[str] if isinstance(inner_cast, list): return inner_cast # depends on [control=['if'], data=[]] else: raise ValueError
def open_filezip(file_path, find_str): """ Open the wrapped file. Read directly from the zip without extracting its content. """ if zipfile.is_zipfile(file_path): zipf = zipfile.ZipFile(file_path) interesting_files = [f for f in zipf.infolist() if find_str in f] for inside_file in interesting_files: yield zipf.open(inside_file)
def function[open_filezip, parameter[file_path, find_str]]: constant[ Open the wrapped file. Read directly from the zip without extracting its content. ] if call[name[zipfile].is_zipfile, parameter[name[file_path]]] begin[:] variable[zipf] assign[=] call[name[zipfile].ZipFile, parameter[name[file_path]]] variable[interesting_files] assign[=] <ast.ListComp object at 0x7da18f58d120> for taget[name[inside_file]] in starred[name[interesting_files]] begin[:] <ast.Yield object at 0x7da18f58e140>
keyword[def] identifier[open_filezip] ( identifier[file_path] , identifier[find_str] ): literal[string] keyword[if] identifier[zipfile] . identifier[is_zipfile] ( identifier[file_path] ): identifier[zipf] = identifier[zipfile] . identifier[ZipFile] ( identifier[file_path] ) identifier[interesting_files] =[ identifier[f] keyword[for] identifier[f] keyword[in] identifier[zipf] . identifier[infolist] () keyword[if] identifier[find_str] keyword[in] identifier[f] ] keyword[for] identifier[inside_file] keyword[in] identifier[interesting_files] : keyword[yield] identifier[zipf] . identifier[open] ( identifier[inside_file] )
def open_filezip(file_path, find_str): """ Open the wrapped file. Read directly from the zip without extracting its content. """ if zipfile.is_zipfile(file_path): zipf = zipfile.ZipFile(file_path) interesting_files = [f for f in zipf.infolist() if find_str in f] for inside_file in interesting_files: yield zipf.open(inside_file) # depends on [control=['for'], data=['inside_file']] # depends on [control=['if'], data=[]]
def addPathway( self, pathway_id, pathway_label, pathway_type=None, pathway_description=None): """ Adds a pathway as a class. If no specific type is specified, it will default to a subclass of "GO:cellular_process" and "PW:pathway". :param pathway_id: :param pathway_label: :param pathway_type: :param pathway_description: :return: """ if pathway_type is None: pathway_type = self.globaltt['cellular_process'] self.model.addClassToGraph( pathway_id, pathway_label, pathway_type, pathway_description) self.model.addSubClass(pathway_id, self.globaltt['pathway']) return
def function[addPathway, parameter[self, pathway_id, pathway_label, pathway_type, pathway_description]]: constant[ Adds a pathway as a class. If no specific type is specified, it will default to a subclass of "GO:cellular_process" and "PW:pathway". :param pathway_id: :param pathway_label: :param pathway_type: :param pathway_description: :return: ] if compare[name[pathway_type] is constant[None]] begin[:] variable[pathway_type] assign[=] call[name[self].globaltt][constant[cellular_process]] call[name[self].model.addClassToGraph, parameter[name[pathway_id], name[pathway_label], name[pathway_type], name[pathway_description]]] call[name[self].model.addSubClass, parameter[name[pathway_id], call[name[self].globaltt][constant[pathway]]]] return[None]
keyword[def] identifier[addPathway] ( identifier[self] , identifier[pathway_id] , identifier[pathway_label] , identifier[pathway_type] = keyword[None] , identifier[pathway_description] = keyword[None] ): literal[string] keyword[if] identifier[pathway_type] keyword[is] keyword[None] : identifier[pathway_type] = identifier[self] . identifier[globaltt] [ literal[string] ] identifier[self] . identifier[model] . identifier[addClassToGraph] ( identifier[pathway_id] , identifier[pathway_label] , identifier[pathway_type] , identifier[pathway_description] ) identifier[self] . identifier[model] . identifier[addSubClass] ( identifier[pathway_id] , identifier[self] . identifier[globaltt] [ literal[string] ]) keyword[return]
def addPathway(self, pathway_id, pathway_label, pathway_type=None, pathway_description=None): """ Adds a pathway as a class. If no specific type is specified, it will default to a subclass of "GO:cellular_process" and "PW:pathway". :param pathway_id: :param pathway_label: :param pathway_type: :param pathway_description: :return: """ if pathway_type is None: pathway_type = self.globaltt['cellular_process'] # depends on [control=['if'], data=['pathway_type']] self.model.addClassToGraph(pathway_id, pathway_label, pathway_type, pathway_description) self.model.addSubClass(pathway_id, self.globaltt['pathway']) return
def resample(grid, wl, flux): """ Resample spectrum onto desired grid """ flux_rs = (interpolate.interp1d(wl, flux))(grid) return flux_rs
def function[resample, parameter[grid, wl, flux]]: constant[ Resample spectrum onto desired grid ] variable[flux_rs] assign[=] call[call[name[interpolate].interp1d, parameter[name[wl], name[flux]]], parameter[name[grid]]] return[name[flux_rs]]
keyword[def] identifier[resample] ( identifier[grid] , identifier[wl] , identifier[flux] ): literal[string] identifier[flux_rs] =( identifier[interpolate] . identifier[interp1d] ( identifier[wl] , identifier[flux] ))( identifier[grid] ) keyword[return] identifier[flux_rs]
def resample(grid, wl, flux): """ Resample spectrum onto desired grid """ flux_rs = interpolate.interp1d(wl, flux)(grid) return flux_rs
def infer_newX(model, Y_new, optimize=True, init='L2'): """ Infer the distribution of X for the new observed data *Y_new*. :param model: the GPy model used in inference :type model: GPy.core.Model :param Y_new: the new observed data for inference :type Y_new: numpy.ndarray :param optimize: whether to optimize the location of new X (True by default) :type optimize: boolean :return: a tuple containing the estimated posterior distribution of X and the model that optimize X :rtype: (GPy.core.parameterization.variational.VariationalPosterior, GPy.core.Model) """ infr_m = InferenceX(model, Y_new, init=init) if optimize: infr_m.optimize() return infr_m.X, infr_m
def function[infer_newX, parameter[model, Y_new, optimize, init]]: constant[ Infer the distribution of X for the new observed data *Y_new*. :param model: the GPy model used in inference :type model: GPy.core.Model :param Y_new: the new observed data for inference :type Y_new: numpy.ndarray :param optimize: whether to optimize the location of new X (True by default) :type optimize: boolean :return: a tuple containing the estimated posterior distribution of X and the model that optimize X :rtype: (GPy.core.parameterization.variational.VariationalPosterior, GPy.core.Model) ] variable[infr_m] assign[=] call[name[InferenceX], parameter[name[model], name[Y_new]]] if name[optimize] begin[:] call[name[infr_m].optimize, parameter[]] return[tuple[[<ast.Attribute object at 0x7da1b1b2a680>, <ast.Name object at 0x7da1b1b29300>]]]
keyword[def] identifier[infer_newX] ( identifier[model] , identifier[Y_new] , identifier[optimize] = keyword[True] , identifier[init] = literal[string] ): literal[string] identifier[infr_m] = identifier[InferenceX] ( identifier[model] , identifier[Y_new] , identifier[init] = identifier[init] ) keyword[if] identifier[optimize] : identifier[infr_m] . identifier[optimize] () keyword[return] identifier[infr_m] . identifier[X] , identifier[infr_m]
def infer_newX(model, Y_new, optimize=True, init='L2'): """ Infer the distribution of X for the new observed data *Y_new*. :param model: the GPy model used in inference :type model: GPy.core.Model :param Y_new: the new observed data for inference :type Y_new: numpy.ndarray :param optimize: whether to optimize the location of new X (True by default) :type optimize: boolean :return: a tuple containing the estimated posterior distribution of X and the model that optimize X :rtype: (GPy.core.parameterization.variational.VariationalPosterior, GPy.core.Model) """ infr_m = InferenceX(model, Y_new, init=init) if optimize: infr_m.optimize() # depends on [control=['if'], data=[]] return (infr_m.X, infr_m)
def check_platform(self, dataset): ''' int platform_variable; //............................................ RECOMMENDED - a container variable storing information about the platform. If more than one, can expand each attribute into a variable. For example, platform_call_sign and platform_nodc_code. See instrument_parameter_variable for an example. platform_variable:long_name = "" ; //........................ RECOMMENDED - Provide a descriptive, long name for this variable. platform_variable:comment = "" ; //.......................... RECOMMENDED - Add useful, additional information here. platform_variable:call_sign = "" ; //........................ RECOMMENDED - This attribute identifies the call sign of the platform. platform_variable:ncei_code = ""; //......................... RECOMMENDED - This attribute identifies the NCEI code of the platform. Look at http://www.nodc.noaa.gov/cgi-bin/OAS/prd/platform to find if NCEI codes are available. platform_variable:wmo_code = "";//........................... RECOMMENDED - This attribute identifies the wmo code of the platform. Information on getting WMO codes is available at http://www.wmo.int/pages/prog/amp/mmop/wmo-number-rules.html platform_variable:imo_code = "";//.......................... RECOMMENDED - This attribute identifies the International Maritime Organization (IMO) number assigned by Lloyd's register. ''' # Check for the platform variable platforms = util.get_platform_variables(dataset) if not platforms: return Result(BaseCheck.MEDIUM, False, 'A container variable storing information about the platform exists', ['Create a variable to store the platform information']) results = [] for platform in platforms: test_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended attributes for platform variable {}'.format(platform)) pvar = dataset.variables[platform] test_ctx.assert_true(getattr(pvar, 'long_name', '') != '', 'long_name attribute should exist and not be empty') if hasattr(pvar, 'comment'): test_ctx.assert_true(getattr(pvar, 'comment', '') != '', 'comment attribute should not be empty if specified') # We only check to see if nodc_code, wmo_code and imo_code are empty. They are only recommended if they exist for the platform. found_identifier = False if hasattr(pvar, 'ncei_code'): test_ctx.assert_true(getattr(pvar, 'ncei_code', '') != '', 'ncei_code should not be empty if specified') found_identifier = True if hasattr(pvar, 'wmo_code'): test_ctx.assert_true(getattr(pvar, 'wmo_code', '') != '', 'wmo_code should not be empty if specified') found_identifier = True if hasattr(pvar, 'imo_code'): test_ctx.assert_true(getattr(pvar, 'imo_code', '') != '', 'imo_code should not be empty if specified') found_identifier = True if hasattr(pvar, 'call_sign'): test_ctx.assert_true(getattr(pvar, 'call_sign', '') != '', 'call_sign attribute should not be empty if specified') found_identifier = True test_ctx.assert_true(found_identifier, 'At least one attribute should be defined to identify the platform: ncei_code, wmo_code, imo_code, call_sign.') results.append(test_ctx.to_result()) return results
def function[check_platform, parameter[self, dataset]]: constant[ int platform_variable; //............................................ RECOMMENDED - a container variable storing information about the platform. If more than one, can expand each attribute into a variable. For example, platform_call_sign and platform_nodc_code. See instrument_parameter_variable for an example. platform_variable:long_name = "" ; //........................ RECOMMENDED - Provide a descriptive, long name for this variable. platform_variable:comment = "" ; //.......................... RECOMMENDED - Add useful, additional information here. platform_variable:call_sign = "" ; //........................ RECOMMENDED - This attribute identifies the call sign of the platform. platform_variable:ncei_code = ""; //......................... RECOMMENDED - This attribute identifies the NCEI code of the platform. Look at http://www.nodc.noaa.gov/cgi-bin/OAS/prd/platform to find if NCEI codes are available. platform_variable:wmo_code = "";//........................... RECOMMENDED - This attribute identifies the wmo code of the platform. Information on getting WMO codes is available at http://www.wmo.int/pages/prog/amp/mmop/wmo-number-rules.html platform_variable:imo_code = "";//.......................... RECOMMENDED - This attribute identifies the International Maritime Organization (IMO) number assigned by Lloyd's register. ] variable[platforms] assign[=] call[name[util].get_platform_variables, parameter[name[dataset]]] if <ast.UnaryOp object at 0x7da1b26af8e0> begin[:] return[call[name[Result], parameter[name[BaseCheck].MEDIUM, constant[False], constant[A container variable storing information about the platform exists], list[[<ast.Constant object at 0x7da1b26acd60>]]]]] variable[results] assign[=] list[[]] for taget[name[platform]] in starred[name[platforms]] begin[:] variable[test_ctx] assign[=] call[name[TestCtx], parameter[name[BaseCheck].MEDIUM, call[constant[Recommended attributes for platform variable {}].format, parameter[name[platform]]]]] variable[pvar] assign[=] call[name[dataset].variables][name[platform]] call[name[test_ctx].assert_true, parameter[compare[call[name[getattr], parameter[name[pvar], constant[long_name], constant[]]] not_equal[!=] constant[]], constant[long_name attribute should exist and not be empty]]] if call[name[hasattr], parameter[name[pvar], constant[comment]]] begin[:] call[name[test_ctx].assert_true, parameter[compare[call[name[getattr], parameter[name[pvar], constant[comment], constant[]]] not_equal[!=] constant[]], constant[comment attribute should not be empty if specified]]] variable[found_identifier] assign[=] constant[False] if call[name[hasattr], parameter[name[pvar], constant[ncei_code]]] begin[:] call[name[test_ctx].assert_true, parameter[compare[call[name[getattr], parameter[name[pvar], constant[ncei_code], constant[]]] not_equal[!=] constant[]], constant[ncei_code should not be empty if specified]]] variable[found_identifier] assign[=] constant[True] if call[name[hasattr], parameter[name[pvar], constant[wmo_code]]] begin[:] call[name[test_ctx].assert_true, parameter[compare[call[name[getattr], parameter[name[pvar], constant[wmo_code], constant[]]] not_equal[!=] constant[]], constant[wmo_code should not be empty if specified]]] variable[found_identifier] assign[=] constant[True] if call[name[hasattr], parameter[name[pvar], constant[imo_code]]] begin[:] call[name[test_ctx].assert_true, parameter[compare[call[name[getattr], parameter[name[pvar], constant[imo_code], constant[]]] not_equal[!=] constant[]], constant[imo_code should not be empty if specified]]] variable[found_identifier] assign[=] constant[True] if call[name[hasattr], parameter[name[pvar], constant[call_sign]]] begin[:] call[name[test_ctx].assert_true, parameter[compare[call[name[getattr], parameter[name[pvar], constant[call_sign], constant[]]] not_equal[!=] constant[]], constant[call_sign attribute should not be empty if specified]]] variable[found_identifier] assign[=] constant[True] call[name[test_ctx].assert_true, parameter[name[found_identifier], constant[At least one attribute should be defined to identify the platform: ncei_code, wmo_code, imo_code, call_sign.]]] call[name[results].append, parameter[call[name[test_ctx].to_result, parameter[]]]] return[name[results]]
keyword[def] identifier[check_platform] ( identifier[self] , identifier[dataset] ): literal[string] identifier[platforms] = identifier[util] . identifier[get_platform_variables] ( identifier[dataset] ) keyword[if] keyword[not] identifier[platforms] : keyword[return] identifier[Result] ( identifier[BaseCheck] . identifier[MEDIUM] , keyword[False] , literal[string] , [ literal[string] ]) identifier[results] =[] keyword[for] identifier[platform] keyword[in] identifier[platforms] : identifier[test_ctx] = identifier[TestCtx] ( identifier[BaseCheck] . identifier[MEDIUM] , literal[string] . identifier[format] ( identifier[platform] )) identifier[pvar] = identifier[dataset] . identifier[variables] [ identifier[platform] ] identifier[test_ctx] . identifier[assert_true] ( identifier[getattr] ( identifier[pvar] , literal[string] , literal[string] )!= literal[string] , literal[string] ) keyword[if] identifier[hasattr] ( identifier[pvar] , literal[string] ): identifier[test_ctx] . identifier[assert_true] ( identifier[getattr] ( identifier[pvar] , literal[string] , literal[string] )!= literal[string] , literal[string] ) identifier[found_identifier] = keyword[False] keyword[if] identifier[hasattr] ( identifier[pvar] , literal[string] ): identifier[test_ctx] . identifier[assert_true] ( identifier[getattr] ( identifier[pvar] , literal[string] , literal[string] )!= literal[string] , literal[string] ) identifier[found_identifier] = keyword[True] keyword[if] identifier[hasattr] ( identifier[pvar] , literal[string] ): identifier[test_ctx] . identifier[assert_true] ( identifier[getattr] ( identifier[pvar] , literal[string] , literal[string] )!= literal[string] , literal[string] ) identifier[found_identifier] = keyword[True] keyword[if] identifier[hasattr] ( identifier[pvar] , literal[string] ): identifier[test_ctx] . identifier[assert_true] ( identifier[getattr] ( identifier[pvar] , literal[string] , literal[string] )!= literal[string] , literal[string] ) identifier[found_identifier] = keyword[True] keyword[if] identifier[hasattr] ( identifier[pvar] , literal[string] ): identifier[test_ctx] . identifier[assert_true] ( identifier[getattr] ( identifier[pvar] , literal[string] , literal[string] )!= literal[string] , literal[string] ) identifier[found_identifier] = keyword[True] identifier[test_ctx] . identifier[assert_true] ( identifier[found_identifier] , literal[string] ) identifier[results] . identifier[append] ( identifier[test_ctx] . identifier[to_result] ()) keyword[return] identifier[results]
def check_platform(self, dataset): """ int platform_variable; //............................................ RECOMMENDED - a container variable storing information about the platform. If more than one, can expand each attribute into a variable. For example, platform_call_sign and platform_nodc_code. See instrument_parameter_variable for an example. platform_variable:long_name = "" ; //........................ RECOMMENDED - Provide a descriptive, long name for this variable. platform_variable:comment = "" ; //.......................... RECOMMENDED - Add useful, additional information here. platform_variable:call_sign = "" ; //........................ RECOMMENDED - This attribute identifies the call sign of the platform. platform_variable:ncei_code = ""; //......................... RECOMMENDED - This attribute identifies the NCEI code of the platform. Look at http://www.nodc.noaa.gov/cgi-bin/OAS/prd/platform to find if NCEI codes are available. platform_variable:wmo_code = "";//........................... RECOMMENDED - This attribute identifies the wmo code of the platform. Information on getting WMO codes is available at http://www.wmo.int/pages/prog/amp/mmop/wmo-number-rules.html platform_variable:imo_code = "";//.......................... RECOMMENDED - This attribute identifies the International Maritime Organization (IMO) number assigned by Lloyd's register. """ # Check for the platform variable platforms = util.get_platform_variables(dataset) if not platforms: return Result(BaseCheck.MEDIUM, False, 'A container variable storing information about the platform exists', ['Create a variable to store the platform information']) # depends on [control=['if'], data=[]] results = [] for platform in platforms: test_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended attributes for platform variable {}'.format(platform)) pvar = dataset.variables[platform] test_ctx.assert_true(getattr(pvar, 'long_name', '') != '', 'long_name attribute should exist and not be empty') if hasattr(pvar, 'comment'): test_ctx.assert_true(getattr(pvar, 'comment', '') != '', 'comment attribute should not be empty if specified') # depends on [control=['if'], data=[]] # We only check to see if nodc_code, wmo_code and imo_code are empty. They are only recommended if they exist for the platform. found_identifier = False if hasattr(pvar, 'ncei_code'): test_ctx.assert_true(getattr(pvar, 'ncei_code', '') != '', 'ncei_code should not be empty if specified') found_identifier = True # depends on [control=['if'], data=[]] if hasattr(pvar, 'wmo_code'): test_ctx.assert_true(getattr(pvar, 'wmo_code', '') != '', 'wmo_code should not be empty if specified') found_identifier = True # depends on [control=['if'], data=[]] if hasattr(pvar, 'imo_code'): test_ctx.assert_true(getattr(pvar, 'imo_code', '') != '', 'imo_code should not be empty if specified') found_identifier = True # depends on [control=['if'], data=[]] if hasattr(pvar, 'call_sign'): test_ctx.assert_true(getattr(pvar, 'call_sign', '') != '', 'call_sign attribute should not be empty if specified') found_identifier = True # depends on [control=['if'], data=[]] test_ctx.assert_true(found_identifier, 'At least one attribute should be defined to identify the platform: ncei_code, wmo_code, imo_code, call_sign.') results.append(test_ctx.to_result()) # depends on [control=['for'], data=['platform']] return results
def _check_symbols(self, symbols): """the size must be the same as the length of the array numbers and all elements must be strings""" if len(symbols) != self.size: raise TypeError("The number of symbols in the graph does not " "match the length of the atomic numbers array.") for symbol in symbols: if not isinstance(symbol, str): raise TypeError("All symbols must be strings.")
def function[_check_symbols, parameter[self, symbols]]: constant[the size must be the same as the length of the array numbers and all elements must be strings] if compare[call[name[len], parameter[name[symbols]]] not_equal[!=] name[self].size] begin[:] <ast.Raise object at 0x7da207f01900> for taget[name[symbol]] in starred[name[symbols]] begin[:] if <ast.UnaryOp object at 0x7da207f03af0> begin[:] <ast.Raise object at 0x7da207f02530>
keyword[def] identifier[_check_symbols] ( identifier[self] , identifier[symbols] ): literal[string] keyword[if] identifier[len] ( identifier[symbols] )!= identifier[self] . identifier[size] : keyword[raise] identifier[TypeError] ( literal[string] literal[string] ) keyword[for] identifier[symbol] keyword[in] identifier[symbols] : keyword[if] keyword[not] identifier[isinstance] ( identifier[symbol] , identifier[str] ): keyword[raise] identifier[TypeError] ( literal[string] )
def _check_symbols(self, symbols): """the size must be the same as the length of the array numbers and all elements must be strings""" if len(symbols) != self.size: raise TypeError('The number of symbols in the graph does not match the length of the atomic numbers array.') # depends on [control=['if'], data=[]] for symbol in symbols: if not isinstance(symbol, str): raise TypeError('All symbols must be strings.') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['symbol']]
def warm_spell_duration_index(tasmax, tx90, window=6, freq='YS'): r"""Warm spell duration index Number of days with at least six consecutive days where the daily maximum temperature is above the 90th percentile. The 90th percentile should be computed for a 5-day window centred on each calendar day in the 1961-1990 period. Parameters ---------- tasmax : xarray.DataArray Maximum daily temperature [℃] or [K] tx90 : float 90th percentile of daily maximum temperature [℃] or [K] window : int Minimum number of days with temperature below threshold to qualify as a warm spell. freq : str, optional Resampling frequency Returns ------- xarray.DataArray Count of days with at least six consecutive days where the daily maximum temperature is above the 90th percentile [days]. References ---------- From the Expert Team on Climate Change Detection, Monitoring and Indices (ETCCDMI). Used in Alexander, L. V., et al. (2006), Global observed changes in daily climate extremes of temperature and precipitation, J. Geophys. Res., 111, D05109, doi: 10.1029/2005JD006290. """ if 'dayofyear' not in tx90.coords.keys(): raise AttributeError("tx90 should have dayofyear coordinates.") # The day of year value of the tasmax series. doy = tasmax.indexes['time'].dayofyear # adjustment of tx90 to tasmax doy range tx90 = utils.adjust_doy_calendar(tx90, tasmax) # Create an array with the shape and coords of tasmax, but with values set to tx90 according to the doy index. thresh = xr.full_like(tasmax, np.nan) thresh.data = tx90.sel(dayofyear=doy) above = (tasmax > thresh) return above.resample(time=freq).apply(rl.windowed_run_count, window=window, dim='time')
def function[warm_spell_duration_index, parameter[tasmax, tx90, window, freq]]: constant[Warm spell duration index Number of days with at least six consecutive days where the daily maximum temperature is above the 90th percentile. The 90th percentile should be computed for a 5-day window centred on each calendar day in the 1961-1990 period. Parameters ---------- tasmax : xarray.DataArray Maximum daily temperature [℃] or [K] tx90 : float 90th percentile of daily maximum temperature [℃] or [K] window : int Minimum number of days with temperature below threshold to qualify as a warm spell. freq : str, optional Resampling frequency Returns ------- xarray.DataArray Count of days with at least six consecutive days where the daily maximum temperature is above the 90th percentile [days]. References ---------- From the Expert Team on Climate Change Detection, Monitoring and Indices (ETCCDMI). Used in Alexander, L. V., et al. (2006), Global observed changes in daily climate extremes of temperature and precipitation, J. Geophys. Res., 111, D05109, doi: 10.1029/2005JD006290. ] if compare[constant[dayofyear] <ast.NotIn object at 0x7da2590d7190> call[name[tx90].coords.keys, parameter[]]] begin[:] <ast.Raise object at 0x7da20c76e8f0> variable[doy] assign[=] call[name[tasmax].indexes][constant[time]].dayofyear variable[tx90] assign[=] call[name[utils].adjust_doy_calendar, parameter[name[tx90], name[tasmax]]] variable[thresh] assign[=] call[name[xr].full_like, parameter[name[tasmax], name[np].nan]] name[thresh].data assign[=] call[name[tx90].sel, parameter[]] variable[above] assign[=] compare[name[tasmax] greater[>] name[thresh]] return[call[call[name[above].resample, parameter[]].apply, parameter[name[rl].windowed_run_count]]]
keyword[def] identifier[warm_spell_duration_index] ( identifier[tasmax] , identifier[tx90] , identifier[window] = literal[int] , identifier[freq] = literal[string] ): literal[string] keyword[if] literal[string] keyword[not] keyword[in] identifier[tx90] . identifier[coords] . identifier[keys] (): keyword[raise] identifier[AttributeError] ( literal[string] ) identifier[doy] = identifier[tasmax] . identifier[indexes] [ literal[string] ]. identifier[dayofyear] identifier[tx90] = identifier[utils] . identifier[adjust_doy_calendar] ( identifier[tx90] , identifier[tasmax] ) identifier[thresh] = identifier[xr] . identifier[full_like] ( identifier[tasmax] , identifier[np] . identifier[nan] ) identifier[thresh] . identifier[data] = identifier[tx90] . identifier[sel] ( identifier[dayofyear] = identifier[doy] ) identifier[above] =( identifier[tasmax] > identifier[thresh] ) keyword[return] identifier[above] . identifier[resample] ( identifier[time] = identifier[freq] ). identifier[apply] ( identifier[rl] . identifier[windowed_run_count] , identifier[window] = identifier[window] , identifier[dim] = literal[string] )
def warm_spell_duration_index(tasmax, tx90, window=6, freq='YS'): """Warm spell duration index Number of days with at least six consecutive days where the daily maximum temperature is above the 90th percentile. The 90th percentile should be computed for a 5-day window centred on each calendar day in the 1961-1990 period. Parameters ---------- tasmax : xarray.DataArray Maximum daily temperature [℃] or [K] tx90 : float 90th percentile of daily maximum temperature [℃] or [K] window : int Minimum number of days with temperature below threshold to qualify as a warm spell. freq : str, optional Resampling frequency Returns ------- xarray.DataArray Count of days with at least six consecutive days where the daily maximum temperature is above the 90th percentile [days]. References ---------- From the Expert Team on Climate Change Detection, Monitoring and Indices (ETCCDMI). Used in Alexander, L. V., et al. (2006), Global observed changes in daily climate extremes of temperature and precipitation, J. Geophys. Res., 111, D05109, doi: 10.1029/2005JD006290. """ if 'dayofyear' not in tx90.coords.keys(): raise AttributeError('tx90 should have dayofyear coordinates.') # depends on [control=['if'], data=[]] # The day of year value of the tasmax series. doy = tasmax.indexes['time'].dayofyear # adjustment of tx90 to tasmax doy range tx90 = utils.adjust_doy_calendar(tx90, tasmax) # Create an array with the shape and coords of tasmax, but with values set to tx90 according to the doy index. thresh = xr.full_like(tasmax, np.nan) thresh.data = tx90.sel(dayofyear=doy) above = tasmax > thresh return above.resample(time=freq).apply(rl.windowed_run_count, window=window, dim='time')
def _encrypt_data_key(self, data_key, algorithm, encryption_context=None): """Encrypts a data key and returns the ciphertext. :param data_key: Unencrypted data key :type data_key: :class:`aws_encryption_sdk.structures.RawDataKey` or :class:`aws_encryption_sdk.structures.DataKey` :param algorithm: Placeholder to maintain API compatibility with parent :param dict encryption_context: Encryption context to pass to KMS :returns: Data key containing encrypted data key :rtype: aws_encryption_sdk.structures.EncryptedDataKey :raises EncryptKeyError: if Master Key is unable to encrypt data key """ kms_params = {"KeyId": self._key_id, "Plaintext": data_key.data_key} if encryption_context: kms_params["EncryptionContext"] = encryption_context if self.config.grant_tokens: kms_params["GrantTokens"] = self.config.grant_tokens # Catch any boto3 errors and normalize to expected EncryptKeyError try: response = self.config.client.encrypt(**kms_params) ciphertext = response["CiphertextBlob"] key_id = response["KeyId"] except (ClientError, KeyError): error_message = "Master Key {key_id} unable to encrypt data key".format(key_id=self._key_id) _LOGGER.exception(error_message) raise EncryptKeyError(error_message) return EncryptedDataKey( key_provider=MasterKeyInfo(provider_id=self.provider_id, key_info=key_id), encrypted_data_key=ciphertext )
def function[_encrypt_data_key, parameter[self, data_key, algorithm, encryption_context]]: constant[Encrypts a data key and returns the ciphertext. :param data_key: Unencrypted data key :type data_key: :class:`aws_encryption_sdk.structures.RawDataKey` or :class:`aws_encryption_sdk.structures.DataKey` :param algorithm: Placeholder to maintain API compatibility with parent :param dict encryption_context: Encryption context to pass to KMS :returns: Data key containing encrypted data key :rtype: aws_encryption_sdk.structures.EncryptedDataKey :raises EncryptKeyError: if Master Key is unable to encrypt data key ] variable[kms_params] assign[=] dictionary[[<ast.Constant object at 0x7da2046231c0>, <ast.Constant object at 0x7da204620670>], [<ast.Attribute object at 0x7da2054a62c0>, <ast.Attribute object at 0x7da2054a4d30>]] if name[encryption_context] begin[:] call[name[kms_params]][constant[EncryptionContext]] assign[=] name[encryption_context] if name[self].config.grant_tokens begin[:] call[name[kms_params]][constant[GrantTokens]] assign[=] name[self].config.grant_tokens <ast.Try object at 0x7da2054a7670> return[call[name[EncryptedDataKey], parameter[]]]
keyword[def] identifier[_encrypt_data_key] ( identifier[self] , identifier[data_key] , identifier[algorithm] , identifier[encryption_context] = keyword[None] ): literal[string] identifier[kms_params] ={ literal[string] : identifier[self] . identifier[_key_id] , literal[string] : identifier[data_key] . identifier[data_key] } keyword[if] identifier[encryption_context] : identifier[kms_params] [ literal[string] ]= identifier[encryption_context] keyword[if] identifier[self] . identifier[config] . identifier[grant_tokens] : identifier[kms_params] [ literal[string] ]= identifier[self] . identifier[config] . identifier[grant_tokens] keyword[try] : identifier[response] = identifier[self] . identifier[config] . identifier[client] . identifier[encrypt] (** identifier[kms_params] ) identifier[ciphertext] = identifier[response] [ literal[string] ] identifier[key_id] = identifier[response] [ literal[string] ] keyword[except] ( identifier[ClientError] , identifier[KeyError] ): identifier[error_message] = literal[string] . identifier[format] ( identifier[key_id] = identifier[self] . identifier[_key_id] ) identifier[_LOGGER] . identifier[exception] ( identifier[error_message] ) keyword[raise] identifier[EncryptKeyError] ( identifier[error_message] ) keyword[return] identifier[EncryptedDataKey] ( identifier[key_provider] = identifier[MasterKeyInfo] ( identifier[provider_id] = identifier[self] . identifier[provider_id] , identifier[key_info] = identifier[key_id] ), identifier[encrypted_data_key] = identifier[ciphertext] )
def _encrypt_data_key(self, data_key, algorithm, encryption_context=None): """Encrypts a data key and returns the ciphertext. :param data_key: Unencrypted data key :type data_key: :class:`aws_encryption_sdk.structures.RawDataKey` or :class:`aws_encryption_sdk.structures.DataKey` :param algorithm: Placeholder to maintain API compatibility with parent :param dict encryption_context: Encryption context to pass to KMS :returns: Data key containing encrypted data key :rtype: aws_encryption_sdk.structures.EncryptedDataKey :raises EncryptKeyError: if Master Key is unable to encrypt data key """ kms_params = {'KeyId': self._key_id, 'Plaintext': data_key.data_key} if encryption_context: kms_params['EncryptionContext'] = encryption_context # depends on [control=['if'], data=[]] if self.config.grant_tokens: kms_params['GrantTokens'] = self.config.grant_tokens # depends on [control=['if'], data=[]] # Catch any boto3 errors and normalize to expected EncryptKeyError try: response = self.config.client.encrypt(**kms_params) ciphertext = response['CiphertextBlob'] key_id = response['KeyId'] # depends on [control=['try'], data=[]] except (ClientError, KeyError): error_message = 'Master Key {key_id} unable to encrypt data key'.format(key_id=self._key_id) _LOGGER.exception(error_message) raise EncryptKeyError(error_message) # depends on [control=['except'], data=[]] return EncryptedDataKey(key_provider=MasterKeyInfo(provider_id=self.provider_id, key_info=key_id), encrypted_data_key=ciphertext)
def find_function_by_name(self, name): """Return the cfg of the requested function by name. """ cfg_rv = None for cfg in self._cfgs: if cfg.name == name: cfg_rv = cfg break return cfg_rv
def function[find_function_by_name, parameter[self, name]]: constant[Return the cfg of the requested function by name. ] variable[cfg_rv] assign[=] constant[None] for taget[name[cfg]] in starred[name[self]._cfgs] begin[:] if compare[name[cfg].name equal[==] name[name]] begin[:] variable[cfg_rv] assign[=] name[cfg] break return[name[cfg_rv]]
keyword[def] identifier[find_function_by_name] ( identifier[self] , identifier[name] ): literal[string] identifier[cfg_rv] = keyword[None] keyword[for] identifier[cfg] keyword[in] identifier[self] . identifier[_cfgs] : keyword[if] identifier[cfg] . identifier[name] == identifier[name] : identifier[cfg_rv] = identifier[cfg] keyword[break] keyword[return] identifier[cfg_rv]
def find_function_by_name(self, name): """Return the cfg of the requested function by name. """ cfg_rv = None for cfg in self._cfgs: if cfg.name == name: cfg_rv = cfg break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cfg']] return cfg_rv
def _save_one(self, model, ctx): """ Saves the created instance. """ assert isinstance(ctx, ResourceQueryContext) self._orm.add(model) self._orm.flush()
def function[_save_one, parameter[self, model, ctx]]: constant[ Saves the created instance. ] assert[call[name[isinstance], parameter[name[ctx], name[ResourceQueryContext]]]] call[name[self]._orm.add, parameter[name[model]]] call[name[self]._orm.flush, parameter[]]
keyword[def] identifier[_save_one] ( identifier[self] , identifier[model] , identifier[ctx] ): literal[string] keyword[assert] identifier[isinstance] ( identifier[ctx] , identifier[ResourceQueryContext] ) identifier[self] . identifier[_orm] . identifier[add] ( identifier[model] ) identifier[self] . identifier[_orm] . identifier[flush] ()
def _save_one(self, model, ctx): """ Saves the created instance. """ assert isinstance(ctx, ResourceQueryContext) self._orm.add(model) self._orm.flush()
def coerce_quotes(quotes): """Coerce a quote type into an acceptable value, or raise an error.""" orig, quotes = quotes, str(quotes) if quotes else None if quotes not in [None, '"', "'"]: raise ValueError("{!r} is not a valid quote type".format(orig)) return quotes
def function[coerce_quotes, parameter[quotes]]: constant[Coerce a quote type into an acceptable value, or raise an error.] <ast.Tuple object at 0x7da1b12ca320> assign[=] tuple[[<ast.Name object at 0x7da1b12c8df0>, <ast.IfExp object at 0x7da1b12c9840>]] if compare[name[quotes] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da1b12c8fa0>, <ast.Constant object at 0x7da1b12c9090>, <ast.Constant object at 0x7da1b12c8910>]]] begin[:] <ast.Raise object at 0x7da1b12c8040> return[name[quotes]]
keyword[def] identifier[coerce_quotes] ( identifier[quotes] ): literal[string] identifier[orig] , identifier[quotes] = identifier[quotes] , identifier[str] ( identifier[quotes] ) keyword[if] identifier[quotes] keyword[else] keyword[None] keyword[if] identifier[quotes] keyword[not] keyword[in] [ keyword[None] , literal[string] , literal[string] ]: keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[orig] )) keyword[return] identifier[quotes]
def coerce_quotes(quotes): """Coerce a quote type into an acceptable value, or raise an error.""" (orig, quotes) = (quotes, str(quotes) if quotes else None) if quotes not in [None, '"', "'"]: raise ValueError('{!r} is not a valid quote type'.format(orig)) # depends on [control=['if'], data=[]] return quotes
def group_info(name): ''' .. versionadded:: 2016.11.0 Lists all packages in the specified group CLI Example: .. code-block:: bash salt '*' pkg.group_info 'xorg' ''' pkgtypes = ('mandatory', 'optional', 'default', 'conditional') ret = {} for pkgtype in pkgtypes: ret[pkgtype] = set() cmd = ['pacman', '-Sgg', name] out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if not line: continue try: pkg = line.split()[1] except ValueError: log.error('Problem parsing pacman -Sgg: Unexpected formatting in ' 'line: \'%s\'', line) else: ret['default'].add(pkg) for pkgtype in pkgtypes: ret[pkgtype] = sorted(ret[pkgtype]) return ret
def function[group_info, parameter[name]]: constant[ .. versionadded:: 2016.11.0 Lists all packages in the specified group CLI Example: .. code-block:: bash salt '*' pkg.group_info 'xorg' ] variable[pkgtypes] assign[=] tuple[[<ast.Constant object at 0x7da1b21f9960>, <ast.Constant object at 0x7da1b21f8cd0>, <ast.Constant object at 0x7da1b21f8e80>, <ast.Constant object at 0x7da1b21f9fc0>]] variable[ret] assign[=] dictionary[[], []] for taget[name[pkgtype]] in starred[name[pkgtypes]] begin[:] call[name[ret]][name[pkgtype]] assign[=] call[name[set], parameter[]] variable[cmd] assign[=] list[[<ast.Constant object at 0x7da1b21f9870>, <ast.Constant object at 0x7da1b21f88b0>, <ast.Name object at 0x7da1b21f9bd0>]] variable[out] assign[=] call[call[name[__salt__]][constant[cmd.run]], parameter[name[cmd]]] for taget[name[line]] in starred[call[name[salt].utils.itertools.split, parameter[name[out], constant[ ]]]] begin[:] if <ast.UnaryOp object at 0x7da1b21f8ca0> begin[:] continue <ast.Try object at 0x7da1b21f84c0> for taget[name[pkgtype]] in starred[name[pkgtypes]] begin[:] call[name[ret]][name[pkgtype]] assign[=] call[name[sorted], parameter[call[name[ret]][name[pkgtype]]]] return[name[ret]]
keyword[def] identifier[group_info] ( identifier[name] ): literal[string] identifier[pkgtypes] =( literal[string] , literal[string] , literal[string] , literal[string] ) identifier[ret] ={} keyword[for] identifier[pkgtype] keyword[in] identifier[pkgtypes] : identifier[ret] [ identifier[pkgtype] ]= identifier[set] () identifier[cmd] =[ literal[string] , literal[string] , identifier[name] ] identifier[out] = identifier[__salt__] [ literal[string] ]( identifier[cmd] , identifier[output_loglevel] = literal[string] , identifier[python_shell] = keyword[False] ) keyword[for] identifier[line] keyword[in] identifier[salt] . identifier[utils] . identifier[itertools] . identifier[split] ( identifier[out] , literal[string] ): keyword[if] keyword[not] identifier[line] : keyword[continue] keyword[try] : identifier[pkg] = identifier[line] . identifier[split] ()[ literal[int] ] keyword[except] identifier[ValueError] : identifier[log] . identifier[error] ( literal[string] literal[string] , identifier[line] ) keyword[else] : identifier[ret] [ literal[string] ]. identifier[add] ( identifier[pkg] ) keyword[for] identifier[pkgtype] keyword[in] identifier[pkgtypes] : identifier[ret] [ identifier[pkgtype] ]= identifier[sorted] ( identifier[ret] [ identifier[pkgtype] ]) keyword[return] identifier[ret]
def group_info(name): """ .. versionadded:: 2016.11.0 Lists all packages in the specified group CLI Example: .. code-block:: bash salt '*' pkg.group_info 'xorg' """ pkgtypes = ('mandatory', 'optional', 'default', 'conditional') ret = {} for pkgtype in pkgtypes: ret[pkgtype] = set() # depends on [control=['for'], data=['pkgtype']] cmd = ['pacman', '-Sgg', name] out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): if not line: continue # depends on [control=['if'], data=[]] try: pkg = line.split()[1] # depends on [control=['try'], data=[]] except ValueError: log.error("Problem parsing pacman -Sgg: Unexpected formatting in line: '%s'", line) # depends on [control=['except'], data=[]] else: ret['default'].add(pkg) # depends on [control=['for'], data=['line']] for pkgtype in pkgtypes: ret[pkgtype] = sorted(ret[pkgtype]) # depends on [control=['for'], data=['pkgtype']] return ret
def _get_imsize(self, im_name): """ get image size info Returns: ---------- tuple of (height, width) """ img = cv2.imread(im_name) return (img.shape[0], img.shape[1])
def function[_get_imsize, parameter[self, im_name]]: constant[ get image size info Returns: ---------- tuple of (height, width) ] variable[img] assign[=] call[name[cv2].imread, parameter[name[im_name]]] return[tuple[[<ast.Subscript object at 0x7da20c76f820>, <ast.Subscript object at 0x7da20c76c430>]]]
keyword[def] identifier[_get_imsize] ( identifier[self] , identifier[im_name] ): literal[string] identifier[img] = identifier[cv2] . identifier[imread] ( identifier[im_name] ) keyword[return] ( identifier[img] . identifier[shape] [ literal[int] ], identifier[img] . identifier[shape] [ literal[int] ])
def _get_imsize(self, im_name): """ get image size info Returns: ---------- tuple of (height, width) """ img = cv2.imread(im_name) return (img.shape[0], img.shape[1])
def map(self, func, iterable, chunksize=None): """A parallel equivalent of the map() builtin function. It blocks till the result is ready. This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting chunksize to a positive integer.""" return self.map_async(func, iterable, chunksize).get()
def function[map, parameter[self, func, iterable, chunksize]]: constant[A parallel equivalent of the map() builtin function. It blocks till the result is ready. This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting chunksize to a positive integer.] return[call[call[name[self].map_async, parameter[name[func], name[iterable], name[chunksize]]].get, parameter[]]]
keyword[def] identifier[map] ( identifier[self] , identifier[func] , identifier[iterable] , identifier[chunksize] = keyword[None] ): literal[string] keyword[return] identifier[self] . identifier[map_async] ( identifier[func] , identifier[iterable] , identifier[chunksize] ). identifier[get] ()
def map(self, func, iterable, chunksize=None): """A parallel equivalent of the map() builtin function. It blocks till the result is ready. This method chops the iterable into a number of chunks which it submits to the process pool as separate tasks. The (approximate) size of these chunks can be specified by setting chunksize to a positive integer.""" return self.map_async(func, iterable, chunksize).get()
def some(args): """ %prog some fastafile listfile outfastafile generate a subset of fastafile, based on a list """ p = OptionParser(some.__doc__) p.add_option("--exclude", default=False, action="store_true", help="Output sequences not in the list file [default: %default]") p.add_option("--uniprot", default=False, action="store_true", help="Header is from uniprot [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(p.print_help()) fastafile, listfile, outfastafile = args outfastahandle = must_open(outfastafile, "w") qualfile = get_qual(fastafile) names = set(x.strip() for x in open(listfile)) if qualfile: outqualfile = outfastafile + ".qual" outqualhandle = open(outqualfile, "w") parser = iter_fasta_qual(fastafile, qualfile) else: parser = SeqIO.parse(fastafile, "fasta") num_records = 0 for rec in parser: name = rec.id if opts.uniprot: name = name.split("|")[-1] if opts.exclude: if name in names: continue else: if name not in names: continue SeqIO.write([rec], outfastahandle, "fasta") if qualfile: SeqIO.write([rec], outqualhandle, "qual") num_records += 1 logging.debug("A total of %d records written to `%s`" % \ (num_records, outfastafile))
def function[some, parameter[args]]: constant[ %prog some fastafile listfile outfastafile generate a subset of fastafile, based on a list ] variable[p] assign[=] call[name[OptionParser], parameter[name[some].__doc__]] call[name[p].add_option, parameter[constant[--exclude]]] call[name[p].add_option, parameter[constant[--uniprot]]] <ast.Tuple object at 0x7da1b094faf0> assign[=] call[name[p].parse_args, parameter[name[args]]] if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[3]] begin[:] call[name[sys].exit, parameter[call[name[p].print_help, parameter[]]]] <ast.Tuple object at 0x7da1b094d210> assign[=] name[args] variable[outfastahandle] assign[=] call[name[must_open], parameter[name[outfastafile], constant[w]]] variable[qualfile] assign[=] call[name[get_qual], parameter[name[fastafile]]] variable[names] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da1b094f850>]] if name[qualfile] begin[:] variable[outqualfile] assign[=] binary_operation[name[outfastafile] + constant[.qual]] variable[outqualhandle] assign[=] call[name[open], parameter[name[outqualfile], constant[w]]] variable[parser] assign[=] call[name[iter_fasta_qual], parameter[name[fastafile], name[qualfile]]] variable[num_records] assign[=] constant[0] for taget[name[rec]] in starred[name[parser]] begin[:] variable[name] assign[=] name[rec].id if name[opts].uniprot begin[:] variable[name] assign[=] call[call[name[name].split, parameter[constant[|]]]][<ast.UnaryOp object at 0x7da1b094ce50>] if name[opts].exclude begin[:] if compare[name[name] in name[names]] begin[:] continue call[name[SeqIO].write, parameter[list[[<ast.Name object at 0x7da1b094cee0>]], name[outfastahandle], constant[fasta]]] if name[qualfile] begin[:] call[name[SeqIO].write, parameter[list[[<ast.Name object at 0x7da20c6e4e20>]], name[outqualhandle], constant[qual]]] <ast.AugAssign object at 0x7da20c6e7e80> call[name[logging].debug, parameter[binary_operation[constant[A total of %d records written to `%s`] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c6e5630>, <ast.Name object at 0x7da20c6e6f50>]]]]]
keyword[def] identifier[some] ( identifier[args] ): literal[string] identifier[p] = identifier[OptionParser] ( identifier[some] . identifier[__doc__] ) identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] , identifier[help] = literal[string] ) identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] , identifier[help] = literal[string] ) identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] ) keyword[if] identifier[len] ( identifier[args] )!= literal[int] : identifier[sys] . identifier[exit] ( identifier[p] . identifier[print_help] ()) identifier[fastafile] , identifier[listfile] , identifier[outfastafile] = identifier[args] identifier[outfastahandle] = identifier[must_open] ( identifier[outfastafile] , literal[string] ) identifier[qualfile] = identifier[get_qual] ( identifier[fastafile] ) identifier[names] = identifier[set] ( identifier[x] . identifier[strip] () keyword[for] identifier[x] keyword[in] identifier[open] ( identifier[listfile] )) keyword[if] identifier[qualfile] : identifier[outqualfile] = identifier[outfastafile] + literal[string] identifier[outqualhandle] = identifier[open] ( identifier[outqualfile] , literal[string] ) identifier[parser] = identifier[iter_fasta_qual] ( identifier[fastafile] , identifier[qualfile] ) keyword[else] : identifier[parser] = identifier[SeqIO] . identifier[parse] ( identifier[fastafile] , literal[string] ) identifier[num_records] = literal[int] keyword[for] identifier[rec] keyword[in] identifier[parser] : identifier[name] = identifier[rec] . identifier[id] keyword[if] identifier[opts] . identifier[uniprot] : identifier[name] = identifier[name] . identifier[split] ( literal[string] )[- literal[int] ] keyword[if] identifier[opts] . identifier[exclude] : keyword[if] identifier[name] keyword[in] identifier[names] : keyword[continue] keyword[else] : keyword[if] identifier[name] keyword[not] keyword[in] identifier[names] : keyword[continue] identifier[SeqIO] . identifier[write] ([ identifier[rec] ], identifier[outfastahandle] , literal[string] ) keyword[if] identifier[qualfile] : identifier[SeqIO] . identifier[write] ([ identifier[rec] ], identifier[outqualhandle] , literal[string] ) identifier[num_records] += literal[int] identifier[logging] . identifier[debug] ( literal[string] %( identifier[num_records] , identifier[outfastafile] ))
def some(args): """ %prog some fastafile listfile outfastafile generate a subset of fastafile, based on a list """ p = OptionParser(some.__doc__) p.add_option('--exclude', default=False, action='store_true', help='Output sequences not in the list file [default: %default]') p.add_option('--uniprot', default=False, action='store_true', help='Header is from uniprot [default: %default]') (opts, args) = p.parse_args(args) if len(args) != 3: sys.exit(p.print_help()) # depends on [control=['if'], data=[]] (fastafile, listfile, outfastafile) = args outfastahandle = must_open(outfastafile, 'w') qualfile = get_qual(fastafile) names = set((x.strip() for x in open(listfile))) if qualfile: outqualfile = outfastafile + '.qual' outqualhandle = open(outqualfile, 'w') parser = iter_fasta_qual(fastafile, qualfile) # depends on [control=['if'], data=[]] else: parser = SeqIO.parse(fastafile, 'fasta') num_records = 0 for rec in parser: name = rec.id if opts.uniprot: name = name.split('|')[-1] # depends on [control=['if'], data=[]] if opts.exclude: if name in names: continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif name not in names: continue # depends on [control=['if'], data=[]] SeqIO.write([rec], outfastahandle, 'fasta') if qualfile: SeqIO.write([rec], outqualhandle, 'qual') # depends on [control=['if'], data=[]] num_records += 1 # depends on [control=['for'], data=['rec']] logging.debug('A total of %d records written to `%s`' % (num_records, outfastafile))
def _normalize(self, address): """ Normalize prefixes, suffixes and other to make matching original to returned easier. """ normalized_address = [] if self.logger: self.logger.debug("Normalizing Address: {0}".format(address)) for token in address.split(): if token.upper() in self.parser.suffixes.keys(): normalized_address.append(self.parser.suffixes[token.upper()].lower()) elif token.upper() in self.parser.suffixes.values(): normalized_address.append(token.lower()) elif token.upper().replace('.', '') in self.parser.suffixes.values(): normalized_address.append(token.lower().replace('.', '')) elif token.lower() in self.parser.prefixes.keys(): normalized_address.append(self.parser.prefixes[token.lower()].lower()) elif token.upper() in self.parser.prefixes.values(): normalized_address.append(token.lower()[:-1]) elif token.upper() + '.' in self.parser.prefixes.values(): normalized_address.append(token.lower()) else: normalized_address.append(token.lower()) return normalized_address
def function[_normalize, parameter[self, address]]: constant[ Normalize prefixes, suffixes and other to make matching original to returned easier. ] variable[normalized_address] assign[=] list[[]] if name[self].logger begin[:] call[name[self].logger.debug, parameter[call[constant[Normalizing Address: {0}].format, parameter[name[address]]]]] for taget[name[token]] in starred[call[name[address].split, parameter[]]] begin[:] if compare[call[name[token].upper, parameter[]] in call[name[self].parser.suffixes.keys, parameter[]]] begin[:] call[name[normalized_address].append, parameter[call[call[name[self].parser.suffixes][call[name[token].upper, parameter[]]].lower, parameter[]]]] return[name[normalized_address]]
keyword[def] identifier[_normalize] ( identifier[self] , identifier[address] ): literal[string] identifier[normalized_address] =[] keyword[if] identifier[self] . identifier[logger] : identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[address] )) keyword[for] identifier[token] keyword[in] identifier[address] . identifier[split] (): keyword[if] identifier[token] . identifier[upper] () keyword[in] identifier[self] . identifier[parser] . identifier[suffixes] . identifier[keys] (): identifier[normalized_address] . identifier[append] ( identifier[self] . identifier[parser] . identifier[suffixes] [ identifier[token] . identifier[upper] ()]. identifier[lower] ()) keyword[elif] identifier[token] . identifier[upper] () keyword[in] identifier[self] . identifier[parser] . identifier[suffixes] . identifier[values] (): identifier[normalized_address] . identifier[append] ( identifier[token] . identifier[lower] ()) keyword[elif] identifier[token] . identifier[upper] (). identifier[replace] ( literal[string] , literal[string] ) keyword[in] identifier[self] . identifier[parser] . identifier[suffixes] . identifier[values] (): identifier[normalized_address] . identifier[append] ( identifier[token] . identifier[lower] (). identifier[replace] ( literal[string] , literal[string] )) keyword[elif] identifier[token] . identifier[lower] () keyword[in] identifier[self] . identifier[parser] . identifier[prefixes] . identifier[keys] (): identifier[normalized_address] . identifier[append] ( identifier[self] . identifier[parser] . identifier[prefixes] [ identifier[token] . identifier[lower] ()]. identifier[lower] ()) keyword[elif] identifier[token] . identifier[upper] () keyword[in] identifier[self] . identifier[parser] . identifier[prefixes] . identifier[values] (): identifier[normalized_address] . identifier[append] ( identifier[token] . identifier[lower] ()[:- literal[int] ]) keyword[elif] identifier[token] . identifier[upper] ()+ literal[string] keyword[in] identifier[self] . identifier[parser] . identifier[prefixes] . identifier[values] (): identifier[normalized_address] . identifier[append] ( identifier[token] . identifier[lower] ()) keyword[else] : identifier[normalized_address] . identifier[append] ( identifier[token] . identifier[lower] ()) keyword[return] identifier[normalized_address]
def _normalize(self, address): """ Normalize prefixes, suffixes and other to make matching original to returned easier. """ normalized_address = [] if self.logger: self.logger.debug('Normalizing Address: {0}'.format(address)) # depends on [control=['if'], data=[]] for token in address.split(): if token.upper() in self.parser.suffixes.keys(): normalized_address.append(self.parser.suffixes[token.upper()].lower()) # depends on [control=['if'], data=[]] elif token.upper() in self.parser.suffixes.values(): normalized_address.append(token.lower()) # depends on [control=['if'], data=[]] elif token.upper().replace('.', '') in self.parser.suffixes.values(): normalized_address.append(token.lower().replace('.', '')) # depends on [control=['if'], data=[]] elif token.lower() in self.parser.prefixes.keys(): normalized_address.append(self.parser.prefixes[token.lower()].lower()) # depends on [control=['if'], data=[]] elif token.upper() in self.parser.prefixes.values(): normalized_address.append(token.lower()[:-1]) # depends on [control=['if'], data=[]] elif token.upper() + '.' in self.parser.prefixes.values(): normalized_address.append(token.lower()) # depends on [control=['if'], data=[]] else: normalized_address.append(token.lower()) # depends on [control=['for'], data=['token']] return normalized_address
def verify_compact_verbose(self, jws=None, keys=None, allow_none=False, sigalg=None): """ Verify a JWT signature and return dict with validation results :param jws: A signed JSON Web Token :param keys: A list of keys that can possibly be used to verify the signature :param allow_none: If signature algorithm 'none' is allowed :param sigalg: Expected sigalg :return: Dictionary with 2 keys 'msg' required, 'key' optional. The value of 'msg' is the unpacked and verified message. The value of 'key' is the key used to verify the message """ if jws: jwt = JWSig().unpack(jws) if len(jwt) != 3: raise WrongNumberOfParts(len(jwt)) self.jwt = jwt elif not self.jwt: raise ValueError('Missing singed JWT') else: jwt = self.jwt try: _alg = jwt.headers["alg"] except KeyError: _alg = None else: if _alg is None or _alg.lower() == "none": if allow_none: self.msg = jwt.payload() return {'msg': self.msg} else: raise SignerAlgError("none not allowed") if "alg" in self and self['alg'] and _alg: if isinstance(self['alg'], list): if _alg not in self["alg"] : raise SignerAlgError( "Wrong signing algorithm, expected {} got {}".format( self['alg'], _alg)) elif _alg != self['alg']: raise SignerAlgError( "Wrong signing algorithm, expected {} got {}".format( self['alg'], _alg)) if sigalg and sigalg != _alg: raise SignerAlgError("Expected {0} got {1}".format( sigalg, jwt.headers["alg"])) self["alg"] = _alg if keys: _keys = self.pick_keys(keys) else: _keys = self.pick_keys(self._get_keys()) if not _keys: if "kid" in self: raise NoSuitableSigningKeys( "No key with kid: %s" % (self["kid"])) elif "kid" in self.jwt.headers: raise NoSuitableSigningKeys( "No key with kid: %s" % (self.jwt.headers["kid"])) else: raise NoSuitableSigningKeys("No key for algorithm: %s" % _alg) verifier = SIGNER_ALGS[_alg] for key in _keys: if isinstance(key, AsymmetricKey): _key = key.public_key() else: _key = key.key try: if not verifier.verify(jwt.sign_input(), jwt.signature(), _key): continue except (BadSignature, IndexError): pass except (ValueError, TypeError) as err: logger.warning('Exception "{}" caught'.format(err)) else: logger.debug( "Verified message using key with kid=%s" % key.kid) self.msg = jwt.payload() self.key = key return {'msg': self.msg, 'key': key} raise BadSignature()
def function[verify_compact_verbose, parameter[self, jws, keys, allow_none, sigalg]]: constant[ Verify a JWT signature and return dict with validation results :param jws: A signed JSON Web Token :param keys: A list of keys that can possibly be used to verify the signature :param allow_none: If signature algorithm 'none' is allowed :param sigalg: Expected sigalg :return: Dictionary with 2 keys 'msg' required, 'key' optional. The value of 'msg' is the unpacked and verified message. The value of 'key' is the key used to verify the message ] if name[jws] begin[:] variable[jwt] assign[=] call[call[name[JWSig], parameter[]].unpack, parameter[name[jws]]] if compare[call[name[len], parameter[name[jwt]]] not_equal[!=] constant[3]] begin[:] <ast.Raise object at 0x7da1b04323e0> name[self].jwt assign[=] name[jwt] <ast.Try object at 0x7da1b04320e0> if <ast.BoolOp object at 0x7da1b04322c0> begin[:] if call[name[isinstance], parameter[call[name[self]][constant[alg]], name[list]]] begin[:] if compare[name[_alg] <ast.NotIn object at 0x7da2590d7190> call[name[self]][constant[alg]]] begin[:] <ast.Raise object at 0x7da1b0431b10> if <ast.BoolOp object at 0x7da1b04309d0> begin[:] <ast.Raise object at 0x7da1b0431120> call[name[self]][constant[alg]] assign[=] name[_alg] if name[keys] begin[:] variable[_keys] assign[=] call[name[self].pick_keys, parameter[name[keys]]] if <ast.UnaryOp object at 0x7da1b05bc850> begin[:] if compare[constant[kid] in name[self]] begin[:] <ast.Raise object at 0x7da1b05beda0> variable[verifier] assign[=] call[name[SIGNER_ALGS]][name[_alg]] for taget[name[key]] in starred[name[_keys]] begin[:] if call[name[isinstance], parameter[name[key], name[AsymmetricKey]]] begin[:] variable[_key] assign[=] call[name[key].public_key, parameter[]] <ast.Try object at 0x7da1b05bd180> <ast.Raise object at 0x7da1b05bf5e0>
keyword[def] identifier[verify_compact_verbose] ( identifier[self] , identifier[jws] = keyword[None] , identifier[keys] = keyword[None] , identifier[allow_none] = keyword[False] , identifier[sigalg] = keyword[None] ): literal[string] keyword[if] identifier[jws] : identifier[jwt] = identifier[JWSig] (). identifier[unpack] ( identifier[jws] ) keyword[if] identifier[len] ( identifier[jwt] )!= literal[int] : keyword[raise] identifier[WrongNumberOfParts] ( identifier[len] ( identifier[jwt] )) identifier[self] . identifier[jwt] = identifier[jwt] keyword[elif] keyword[not] identifier[self] . identifier[jwt] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[else] : identifier[jwt] = identifier[self] . identifier[jwt] keyword[try] : identifier[_alg] = identifier[jwt] . identifier[headers] [ literal[string] ] keyword[except] identifier[KeyError] : identifier[_alg] = keyword[None] keyword[else] : keyword[if] identifier[_alg] keyword[is] keyword[None] keyword[or] identifier[_alg] . identifier[lower] ()== literal[string] : keyword[if] identifier[allow_none] : identifier[self] . identifier[msg] = identifier[jwt] . identifier[payload] () keyword[return] { literal[string] : identifier[self] . identifier[msg] } keyword[else] : keyword[raise] identifier[SignerAlgError] ( literal[string] ) keyword[if] literal[string] keyword[in] identifier[self] keyword[and] identifier[self] [ literal[string] ] keyword[and] identifier[_alg] : keyword[if] identifier[isinstance] ( identifier[self] [ literal[string] ], identifier[list] ): keyword[if] identifier[_alg] keyword[not] keyword[in] identifier[self] [ literal[string] ]: keyword[raise] identifier[SignerAlgError] ( literal[string] . identifier[format] ( identifier[self] [ literal[string] ], identifier[_alg] )) keyword[elif] identifier[_alg] != identifier[self] [ literal[string] ]: keyword[raise] identifier[SignerAlgError] ( literal[string] . identifier[format] ( identifier[self] [ literal[string] ], identifier[_alg] )) keyword[if] identifier[sigalg] keyword[and] identifier[sigalg] != identifier[_alg] : keyword[raise] identifier[SignerAlgError] ( literal[string] . identifier[format] ( identifier[sigalg] , identifier[jwt] . identifier[headers] [ literal[string] ])) identifier[self] [ literal[string] ]= identifier[_alg] keyword[if] identifier[keys] : identifier[_keys] = identifier[self] . identifier[pick_keys] ( identifier[keys] ) keyword[else] : identifier[_keys] = identifier[self] . identifier[pick_keys] ( identifier[self] . identifier[_get_keys] ()) keyword[if] keyword[not] identifier[_keys] : keyword[if] literal[string] keyword[in] identifier[self] : keyword[raise] identifier[NoSuitableSigningKeys] ( literal[string] %( identifier[self] [ literal[string] ])) keyword[elif] literal[string] keyword[in] identifier[self] . identifier[jwt] . identifier[headers] : keyword[raise] identifier[NoSuitableSigningKeys] ( literal[string] %( identifier[self] . identifier[jwt] . identifier[headers] [ literal[string] ])) keyword[else] : keyword[raise] identifier[NoSuitableSigningKeys] ( literal[string] % identifier[_alg] ) identifier[verifier] = identifier[SIGNER_ALGS] [ identifier[_alg] ] keyword[for] identifier[key] keyword[in] identifier[_keys] : keyword[if] identifier[isinstance] ( identifier[key] , identifier[AsymmetricKey] ): identifier[_key] = identifier[key] . identifier[public_key] () keyword[else] : identifier[_key] = identifier[key] . identifier[key] keyword[try] : keyword[if] keyword[not] identifier[verifier] . identifier[verify] ( identifier[jwt] . identifier[sign_input] (), identifier[jwt] . identifier[signature] (), identifier[_key] ): keyword[continue] keyword[except] ( identifier[BadSignature] , identifier[IndexError] ): keyword[pass] keyword[except] ( identifier[ValueError] , identifier[TypeError] ) keyword[as] identifier[err] : identifier[logger] . identifier[warning] ( literal[string] . identifier[format] ( identifier[err] )) keyword[else] : identifier[logger] . identifier[debug] ( literal[string] % identifier[key] . identifier[kid] ) identifier[self] . identifier[msg] = identifier[jwt] . identifier[payload] () identifier[self] . identifier[key] = identifier[key] keyword[return] { literal[string] : identifier[self] . identifier[msg] , literal[string] : identifier[key] } keyword[raise] identifier[BadSignature] ()
def verify_compact_verbose(self, jws=None, keys=None, allow_none=False, sigalg=None): """ Verify a JWT signature and return dict with validation results :param jws: A signed JSON Web Token :param keys: A list of keys that can possibly be used to verify the signature :param allow_none: If signature algorithm 'none' is allowed :param sigalg: Expected sigalg :return: Dictionary with 2 keys 'msg' required, 'key' optional. The value of 'msg' is the unpacked and verified message. The value of 'key' is the key used to verify the message """ if jws: jwt = JWSig().unpack(jws) if len(jwt) != 3: raise WrongNumberOfParts(len(jwt)) # depends on [control=['if'], data=[]] self.jwt = jwt # depends on [control=['if'], data=[]] elif not self.jwt: raise ValueError('Missing singed JWT') # depends on [control=['if'], data=[]] else: jwt = self.jwt try: _alg = jwt.headers['alg'] # depends on [control=['try'], data=[]] except KeyError: _alg = None # depends on [control=['except'], data=[]] else: if _alg is None or _alg.lower() == 'none': if allow_none: self.msg = jwt.payload() return {'msg': self.msg} # depends on [control=['if'], data=[]] else: raise SignerAlgError('none not allowed') # depends on [control=['if'], data=[]] if 'alg' in self and self['alg'] and _alg: if isinstance(self['alg'], list): if _alg not in self['alg']: raise SignerAlgError('Wrong signing algorithm, expected {} got {}'.format(self['alg'], _alg)) # depends on [control=['if'], data=['_alg']] # depends on [control=['if'], data=[]] elif _alg != self['alg']: raise SignerAlgError('Wrong signing algorithm, expected {} got {}'.format(self['alg'], _alg)) # depends on [control=['if'], data=['_alg']] # depends on [control=['if'], data=[]] if sigalg and sigalg != _alg: raise SignerAlgError('Expected {0} got {1}'.format(sigalg, jwt.headers['alg'])) # depends on [control=['if'], data=[]] self['alg'] = _alg if keys: _keys = self.pick_keys(keys) # depends on [control=['if'], data=[]] else: _keys = self.pick_keys(self._get_keys()) if not _keys: if 'kid' in self: raise NoSuitableSigningKeys('No key with kid: %s' % self['kid']) # depends on [control=['if'], data=['self']] elif 'kid' in self.jwt.headers: raise NoSuitableSigningKeys('No key with kid: %s' % self.jwt.headers['kid']) # depends on [control=['if'], data=[]] else: raise NoSuitableSigningKeys('No key for algorithm: %s' % _alg) # depends on [control=['if'], data=[]] verifier = SIGNER_ALGS[_alg] for key in _keys: if isinstance(key, AsymmetricKey): _key = key.public_key() # depends on [control=['if'], data=[]] else: _key = key.key try: if not verifier.verify(jwt.sign_input(), jwt.signature(), _key): continue # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except (BadSignature, IndexError): pass # depends on [control=['except'], data=[]] except (ValueError, TypeError) as err: logger.warning('Exception "{}" caught'.format(err)) # depends on [control=['except'], data=['err']] else: logger.debug('Verified message using key with kid=%s' % key.kid) self.msg = jwt.payload() self.key = key return {'msg': self.msg, 'key': key} # depends on [control=['for'], data=['key']] raise BadSignature()
def followers(self): """获取话题关注者 :return: 话题关注者,返回生成器 :rtype: Author.Iterable """ from .author import Author, ANONYMOUS self._make_soup() gotten_data_num = 20 data = { '_xsrf': self.xsrf, 'start': '', 'offset': 0 } while gotten_data_num == 20: res = self._session.post( Topic_Get_More_Follower_Url.format(self.id), data=data) j = res.json()['msg'] gotten_data_num = j[0] data['offset'] += gotten_data_num soup = BeautifulSoup(j[1]) divs = soup.find_all('div', class_='zm-person-item') for div in divs: h2 = div.h2 url = Zhihu_URL + h2.a['href'] name = h2.a.text motto = h2.parent.div.text.strip() try: yield Author(url, name, motto, session=self._session) except ValueError: # invalid url yield ANONYMOUS data['start'] = int(re_get_number.match(divs[-1]['id']).group(1))
def function[followers, parameter[self]]: constant[获取话题关注者 :return: 话题关注者,返回生成器 :rtype: Author.Iterable ] from relative_module[author] import module[Author], module[ANONYMOUS] call[name[self]._make_soup, parameter[]] variable[gotten_data_num] assign[=] constant[20] variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b08e4f70>, <ast.Constant object at 0x7da1b08e4670>, <ast.Constant object at 0x7da1b08e4f40>], [<ast.Attribute object at 0x7da1b08e7d90>, <ast.Constant object at 0x7da1b08e57e0>, <ast.Constant object at 0x7da1b08e7e50>]] while compare[name[gotten_data_num] equal[==] constant[20]] begin[:] variable[res] assign[=] call[name[self]._session.post, parameter[call[name[Topic_Get_More_Follower_Url].format, parameter[name[self].id]]]] variable[j] assign[=] call[call[name[res].json, parameter[]]][constant[msg]] variable[gotten_data_num] assign[=] call[name[j]][constant[0]] <ast.AugAssign object at 0x7da1b08e5ed0> variable[soup] assign[=] call[name[BeautifulSoup], parameter[call[name[j]][constant[1]]]] variable[divs] assign[=] call[name[soup].find_all, parameter[constant[div]]] for taget[name[div]] in starred[name[divs]] begin[:] variable[h2] assign[=] name[div].h2 variable[url] assign[=] binary_operation[name[Zhihu_URL] + call[name[h2].a][constant[href]]] variable[name] assign[=] name[h2].a.text variable[motto] assign[=] call[name[h2].parent.div.text.strip, parameter[]] <ast.Try object at 0x7da1b26ae890> call[name[data]][constant[start]] assign[=] call[name[int], parameter[call[call[name[re_get_number].match, parameter[call[call[name[divs]][<ast.UnaryOp object at 0x7da204961750>]][constant[id]]]].group, parameter[constant[1]]]]]
keyword[def] identifier[followers] ( identifier[self] ): literal[string] keyword[from] . identifier[author] keyword[import] identifier[Author] , identifier[ANONYMOUS] identifier[self] . identifier[_make_soup] () identifier[gotten_data_num] = literal[int] identifier[data] ={ literal[string] : identifier[self] . identifier[xsrf] , literal[string] : literal[string] , literal[string] : literal[int] } keyword[while] identifier[gotten_data_num] == literal[int] : identifier[res] = identifier[self] . identifier[_session] . identifier[post] ( identifier[Topic_Get_More_Follower_Url] . identifier[format] ( identifier[self] . identifier[id] ), identifier[data] = identifier[data] ) identifier[j] = identifier[res] . identifier[json] ()[ literal[string] ] identifier[gotten_data_num] = identifier[j] [ literal[int] ] identifier[data] [ literal[string] ]+= identifier[gotten_data_num] identifier[soup] = identifier[BeautifulSoup] ( identifier[j] [ literal[int] ]) identifier[divs] = identifier[soup] . identifier[find_all] ( literal[string] , identifier[class_] = literal[string] ) keyword[for] identifier[div] keyword[in] identifier[divs] : identifier[h2] = identifier[div] . identifier[h2] identifier[url] = identifier[Zhihu_URL] + identifier[h2] . identifier[a] [ literal[string] ] identifier[name] = identifier[h2] . identifier[a] . identifier[text] identifier[motto] = identifier[h2] . identifier[parent] . identifier[div] . identifier[text] . identifier[strip] () keyword[try] : keyword[yield] identifier[Author] ( identifier[url] , identifier[name] , identifier[motto] , identifier[session] = identifier[self] . identifier[_session] ) keyword[except] identifier[ValueError] : keyword[yield] identifier[ANONYMOUS] identifier[data] [ literal[string] ]= identifier[int] ( identifier[re_get_number] . identifier[match] ( identifier[divs] [- literal[int] ][ literal[string] ]). identifier[group] ( literal[int] ))
def followers(self): """获取话题关注者 :return: 话题关注者,返回生成器 :rtype: Author.Iterable """ from .author import Author, ANONYMOUS self._make_soup() gotten_data_num = 20 data = {'_xsrf': self.xsrf, 'start': '', 'offset': 0} while gotten_data_num == 20: res = self._session.post(Topic_Get_More_Follower_Url.format(self.id), data=data) j = res.json()['msg'] gotten_data_num = j[0] data['offset'] += gotten_data_num soup = BeautifulSoup(j[1]) divs = soup.find_all('div', class_='zm-person-item') for div in divs: h2 = div.h2 url = Zhihu_URL + h2.a['href'] name = h2.a.text motto = h2.parent.div.text.strip() try: yield Author(url, name, motto, session=self._session) # depends on [control=['try'], data=[]] except ValueError: # invalid url yield ANONYMOUS # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['div']] data['start'] = int(re_get_number.match(divs[-1]['id']).group(1)) # depends on [control=['while'], data=['gotten_data_num']]
def get_ips(self, instance_id): """Retrieves the private and public ip addresses for a given instance. Note: Azure normally provides access to vms from a shared load balancer IP and mapping of ssh ports on the vms. So by default, the Azure provider returns strings of the form 'ip:port'. However, 'stock' elasticluster and ansible don't support this, so _use_public_ips uses Azure PublicIPs to expose each vm on the internet with its own IP and using the standard SSH port. :return: list (IPs) """ self._restore_from_storage(instance_id) if self._start_failed: raise Exception('get_ips for node %s: failing due to' ' previous errors.' % instance_id) ret = list() v_m = self._qualified_name_to_vm(instance_id) if not v_m: raise Exception("Can't find instance_id %s" % instance_id) if self._config._use_public_ips: ret.append(v_m._public_ip) else: ret.append("%s:%s" % (v_m._public_ip, v_m._ssh_port)) log.debug('get_ips (instance %s) returning %s', instance_id, ', '.join(ret)) return ret
def function[get_ips, parameter[self, instance_id]]: constant[Retrieves the private and public ip addresses for a given instance. Note: Azure normally provides access to vms from a shared load balancer IP and mapping of ssh ports on the vms. So by default, the Azure provider returns strings of the form 'ip:port'. However, 'stock' elasticluster and ansible don't support this, so _use_public_ips uses Azure PublicIPs to expose each vm on the internet with its own IP and using the standard SSH port. :return: list (IPs) ] call[name[self]._restore_from_storage, parameter[name[instance_id]]] if name[self]._start_failed begin[:] <ast.Raise object at 0x7da20cabd6f0> variable[ret] assign[=] call[name[list], parameter[]] variable[v_m] assign[=] call[name[self]._qualified_name_to_vm, parameter[name[instance_id]]] if <ast.UnaryOp object at 0x7da20cabdfc0> begin[:] <ast.Raise object at 0x7da20cabe7d0> if name[self]._config._use_public_ips begin[:] call[name[ret].append, parameter[name[v_m]._public_ip]] call[name[log].debug, parameter[constant[get_ips (instance %s) returning %s], name[instance_id], call[constant[, ].join, parameter[name[ret]]]]] return[name[ret]]
keyword[def] identifier[get_ips] ( identifier[self] , identifier[instance_id] ): literal[string] identifier[self] . identifier[_restore_from_storage] ( identifier[instance_id] ) keyword[if] identifier[self] . identifier[_start_failed] : keyword[raise] identifier[Exception] ( literal[string] literal[string] % identifier[instance_id] ) identifier[ret] = identifier[list] () identifier[v_m] = identifier[self] . identifier[_qualified_name_to_vm] ( identifier[instance_id] ) keyword[if] keyword[not] identifier[v_m] : keyword[raise] identifier[Exception] ( literal[string] % identifier[instance_id] ) keyword[if] identifier[self] . identifier[_config] . identifier[_use_public_ips] : identifier[ret] . identifier[append] ( identifier[v_m] . identifier[_public_ip] ) keyword[else] : identifier[ret] . identifier[append] ( literal[string] %( identifier[v_m] . identifier[_public_ip] , identifier[v_m] . identifier[_ssh_port] )) identifier[log] . identifier[debug] ( literal[string] , identifier[instance_id] , literal[string] . identifier[join] ( identifier[ret] )) keyword[return] identifier[ret]
def get_ips(self, instance_id): """Retrieves the private and public ip addresses for a given instance. Note: Azure normally provides access to vms from a shared load balancer IP and mapping of ssh ports on the vms. So by default, the Azure provider returns strings of the form 'ip:port'. However, 'stock' elasticluster and ansible don't support this, so _use_public_ips uses Azure PublicIPs to expose each vm on the internet with its own IP and using the standard SSH port. :return: list (IPs) """ self._restore_from_storage(instance_id) if self._start_failed: raise Exception('get_ips for node %s: failing due to previous errors.' % instance_id) # depends on [control=['if'], data=[]] ret = list() v_m = self._qualified_name_to_vm(instance_id) if not v_m: raise Exception("Can't find instance_id %s" % instance_id) # depends on [control=['if'], data=[]] if self._config._use_public_ips: ret.append(v_m._public_ip) # depends on [control=['if'], data=[]] else: ret.append('%s:%s' % (v_m._public_ip, v_m._ssh_port)) log.debug('get_ips (instance %s) returning %s', instance_id, ', '.join(ret)) return ret
def valid_max_age(number): "Validate a cookie Max-Age" if isinstance(number, basestring): try: number = long(number) except (ValueError, TypeError): return False if number >= 0 and number % 1 == 0: return True return False
def function[valid_max_age, parameter[number]]: constant[Validate a cookie Max-Age] if call[name[isinstance], parameter[name[number], name[basestring]]] begin[:] <ast.Try object at 0x7da18f00ca90> if <ast.BoolOp object at 0x7da18f00f790> begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[valid_max_age] ( identifier[number] ): literal[string] keyword[if] identifier[isinstance] ( identifier[number] , identifier[basestring] ): keyword[try] : identifier[number] = identifier[long] ( identifier[number] ) keyword[except] ( identifier[ValueError] , identifier[TypeError] ): keyword[return] keyword[False] keyword[if] identifier[number] >= literal[int] keyword[and] identifier[number] % literal[int] == literal[int] : keyword[return] keyword[True] keyword[return] keyword[False]
def valid_max_age(number): """Validate a cookie Max-Age""" if isinstance(number, basestring): try: number = long(number) # depends on [control=['try'], data=[]] except (ValueError, TypeError): return False # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] if number >= 0 and number % 1 == 0: return True # depends on [control=['if'], data=[]] return False
def uniform(self, key, min_value=0., max_value=1.): """Returns a random number between min_value and max_value""" return min_value + self._random(key) * (max_value - min_value)
def function[uniform, parameter[self, key, min_value, max_value]]: constant[Returns a random number between min_value and max_value] return[binary_operation[name[min_value] + binary_operation[call[name[self]._random, parameter[name[key]]] * binary_operation[name[max_value] - name[min_value]]]]]
keyword[def] identifier[uniform] ( identifier[self] , identifier[key] , identifier[min_value] = literal[int] , identifier[max_value] = literal[int] ): literal[string] keyword[return] identifier[min_value] + identifier[self] . identifier[_random] ( identifier[key] )*( identifier[max_value] - identifier[min_value] )
def uniform(self, key, min_value=0.0, max_value=1.0): """Returns a random number between min_value and max_value""" return min_value + self._random(key) * (max_value - min_value)
def haversine_distance(origin, destination): """ Calculate the Haversine distance. Parameters ---------- origin : tuple of float (lat, long) destination : tuple of float (lat, long) Returns ------- distance_in_km : float Examples -------- >>> munich = (48.1372, 11.5756) >>> berlin = (52.5186, 13.4083) >>> round(haversine_distance(munich, berlin), 1) 504.2 >>> new_york_city = (40.712777777778, -74.005833333333) # NYC >>> round(haversine_distance(berlin, new_york_city), 1) 6385.3 """ lat1, lon1 = origin lat2, lon2 = destination if not (-90.0 <= lat1 <= 90): raise ValueError('lat1={:2.2f}, but must be in [-90,+90]'.format(lat1)) if not (-90.0 <= lat2 <= 90): raise ValueError('lat2={:2.2f}, but must be in [-90,+90]'.format(lat2)) if not (-180.0 <= lon1 <= 180): raise ValueError('lon1={:2.2f}, but must be in [-180,+180]' .format(lat1)) if not (-180.0 <= lon2 <= 180): raise ValueError('lon1={:2.2f}, but must be in [-180,+180]' .format(lat1)) radius = 6371 # km dlat = math_stl.radians(lat2 - lat1) dlon = math_stl.radians(lon2 - lon1) a = (math_stl.sin(dlat / 2) * math_stl.sin(dlat / 2) + math_stl.cos(math_stl.radians(lat1)) * math_stl.cos(math_stl.radians(lat2)) * math_stl.sin(dlon / 2) * math_stl.sin(dlon / 2)) c = 2 * math_stl.atan2(math_stl.sqrt(a), math_stl.sqrt(1 - a)) d = radius * c return d
def function[haversine_distance, parameter[origin, destination]]: constant[ Calculate the Haversine distance. Parameters ---------- origin : tuple of float (lat, long) destination : tuple of float (lat, long) Returns ------- distance_in_km : float Examples -------- >>> munich = (48.1372, 11.5756) >>> berlin = (52.5186, 13.4083) >>> round(haversine_distance(munich, berlin), 1) 504.2 >>> new_york_city = (40.712777777778, -74.005833333333) # NYC >>> round(haversine_distance(berlin, new_york_city), 1) 6385.3 ] <ast.Tuple object at 0x7da18ede7940> assign[=] name[origin] <ast.Tuple object at 0x7da18ede4850> assign[=] name[destination] if <ast.UnaryOp object at 0x7da18ede4400> begin[:] <ast.Raise object at 0x7da18ede4ee0> if <ast.UnaryOp object at 0x7da18ede61a0> begin[:] <ast.Raise object at 0x7da18ede5630> if <ast.UnaryOp object at 0x7da18ede65f0> begin[:] <ast.Raise object at 0x7da18ede4130> if <ast.UnaryOp object at 0x7da18ede7e20> begin[:] <ast.Raise object at 0x7da18ede4d00> variable[radius] assign[=] constant[6371] variable[dlat] assign[=] call[name[math_stl].radians, parameter[binary_operation[name[lat2] - name[lat1]]]] variable[dlon] assign[=] call[name[math_stl].radians, parameter[binary_operation[name[lon2] - name[lon1]]]] variable[a] assign[=] binary_operation[binary_operation[call[name[math_stl].sin, parameter[binary_operation[name[dlat] / constant[2]]]] * call[name[math_stl].sin, parameter[binary_operation[name[dlat] / constant[2]]]]] + binary_operation[binary_operation[binary_operation[call[name[math_stl].cos, parameter[call[name[math_stl].radians, parameter[name[lat1]]]]] * call[name[math_stl].cos, parameter[call[name[math_stl].radians, parameter[name[lat2]]]]]] * call[name[math_stl].sin, parameter[binary_operation[name[dlon] / constant[2]]]]] * call[name[math_stl].sin, parameter[binary_operation[name[dlon] / constant[2]]]]]] variable[c] assign[=] binary_operation[constant[2] * call[name[math_stl].atan2, parameter[call[name[math_stl].sqrt, parameter[name[a]]], call[name[math_stl].sqrt, parameter[binary_operation[constant[1] - name[a]]]]]]] variable[d] assign[=] binary_operation[name[radius] * name[c]] return[name[d]]
keyword[def] identifier[haversine_distance] ( identifier[origin] , identifier[destination] ): literal[string] identifier[lat1] , identifier[lon1] = identifier[origin] identifier[lat2] , identifier[lon2] = identifier[destination] keyword[if] keyword[not] (- literal[int] <= identifier[lat1] <= literal[int] ): keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[lat1] )) keyword[if] keyword[not] (- literal[int] <= identifier[lat2] <= literal[int] ): keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[lat2] )) keyword[if] keyword[not] (- literal[int] <= identifier[lon1] <= literal[int] ): keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[lat1] )) keyword[if] keyword[not] (- literal[int] <= identifier[lon2] <= literal[int] ): keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[lat1] )) identifier[radius] = literal[int] identifier[dlat] = identifier[math_stl] . identifier[radians] ( identifier[lat2] - identifier[lat1] ) identifier[dlon] = identifier[math_stl] . identifier[radians] ( identifier[lon2] - identifier[lon1] ) identifier[a] =( identifier[math_stl] . identifier[sin] ( identifier[dlat] / literal[int] )* identifier[math_stl] . identifier[sin] ( identifier[dlat] / literal[int] )+ identifier[math_stl] . identifier[cos] ( identifier[math_stl] . identifier[radians] ( identifier[lat1] ))* identifier[math_stl] . identifier[cos] ( identifier[math_stl] . identifier[radians] ( identifier[lat2] ))* identifier[math_stl] . identifier[sin] ( identifier[dlon] / literal[int] )* identifier[math_stl] . identifier[sin] ( identifier[dlon] / literal[int] )) identifier[c] = literal[int] * identifier[math_stl] . identifier[atan2] ( identifier[math_stl] . identifier[sqrt] ( identifier[a] ), identifier[math_stl] . identifier[sqrt] ( literal[int] - identifier[a] )) identifier[d] = identifier[radius] * identifier[c] keyword[return] identifier[d]
def haversine_distance(origin, destination): """ Calculate the Haversine distance. Parameters ---------- origin : tuple of float (lat, long) destination : tuple of float (lat, long) Returns ------- distance_in_km : float Examples -------- >>> munich = (48.1372, 11.5756) >>> berlin = (52.5186, 13.4083) >>> round(haversine_distance(munich, berlin), 1) 504.2 >>> new_york_city = (40.712777777778, -74.005833333333) # NYC >>> round(haversine_distance(berlin, new_york_city), 1) 6385.3 """ (lat1, lon1) = origin (lat2, lon2) = destination if not -90.0 <= lat1 <= 90: raise ValueError('lat1={:2.2f}, but must be in [-90,+90]'.format(lat1)) # depends on [control=['if'], data=[]] if not -90.0 <= lat2 <= 90: raise ValueError('lat2={:2.2f}, but must be in [-90,+90]'.format(lat2)) # depends on [control=['if'], data=[]] if not -180.0 <= lon1 <= 180: raise ValueError('lon1={:2.2f}, but must be in [-180,+180]'.format(lat1)) # depends on [control=['if'], data=[]] if not -180.0 <= lon2 <= 180: raise ValueError('lon1={:2.2f}, but must be in [-180,+180]'.format(lat1)) # depends on [control=['if'], data=[]] radius = 6371 # km dlat = math_stl.radians(lat2 - lat1) dlon = math_stl.radians(lon2 - lon1) a = math_stl.sin(dlat / 2) * math_stl.sin(dlat / 2) + math_stl.cos(math_stl.radians(lat1)) * math_stl.cos(math_stl.radians(lat2)) * math_stl.sin(dlon / 2) * math_stl.sin(dlon / 2) c = 2 * math_stl.atan2(math_stl.sqrt(a), math_stl.sqrt(1 - a)) d = radius * c return d
def post(self, request, *args, **kwargs): """ Handles POST requests. """ return self.lock(request, *args, **kwargs)
def function[post, parameter[self, request]]: constant[ Handles POST requests. ] return[call[name[self].lock, parameter[name[request], <ast.Starred object at 0x7da18ede6470>]]]
keyword[def] identifier[post] ( identifier[self] , identifier[request] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[self] . identifier[lock] ( identifier[request] ,* identifier[args] ,** identifier[kwargs] )
def post(self, request, *args, **kwargs): """ Handles POST requests. """ return self.lock(request, *args, **kwargs)
def set_cache_dir(directory): """Set the directory to cache JSON responses from most API endpoints. """ global cache_dir if directory is None: cache_dir = None return if not os.path.exists(directory): os.makedirs(directory) if not os.path.isdir(directory): raise ValueError("not a directory") cache_dir = directory
def function[set_cache_dir, parameter[directory]]: constant[Set the directory to cache JSON responses from most API endpoints. ] <ast.Global object at 0x7da1b274a2c0> if compare[name[directory] is constant[None]] begin[:] variable[cache_dir] assign[=] constant[None] return[None] if <ast.UnaryOp object at 0x7da1b2748b80> begin[:] call[name[os].makedirs, parameter[name[directory]]] if <ast.UnaryOp object at 0x7da1b274bca0> begin[:] <ast.Raise object at 0x7da1b274a7d0> variable[cache_dir] assign[=] name[directory]
keyword[def] identifier[set_cache_dir] ( identifier[directory] ): literal[string] keyword[global] identifier[cache_dir] keyword[if] identifier[directory] keyword[is] keyword[None] : identifier[cache_dir] = keyword[None] keyword[return] keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[directory] ): identifier[os] . identifier[makedirs] ( identifier[directory] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[directory] ): keyword[raise] identifier[ValueError] ( literal[string] ) identifier[cache_dir] = identifier[directory]
def set_cache_dir(directory): """Set the directory to cache JSON responses from most API endpoints. """ global cache_dir if directory is None: cache_dir = None return # depends on [control=['if'], data=[]] if not os.path.exists(directory): os.makedirs(directory) # depends on [control=['if'], data=[]] if not os.path.isdir(directory): raise ValueError('not a directory') # depends on [control=['if'], data=[]] cache_dir = directory
def get_mmcif(code, outfile=None): """ Get mmcif file associated with code from PDBE. Parameters ---------- code : str PDB code. outfile : str Filepath. Writes returned value to this file. Returns ------- mmcif_file : str Filepath to the mmcif file. """ pdbe_url = "http://www.ebi.ac.uk/pdbe/entry-files/download/{0}.cif".format(code) r = requests.get(pdbe_url) if r.status_code == 200: mmcif_string = r.text else: print("Could not download mmcif file for {0}".format(code)) mmcif_string = None # Write to file. if outfile and mmcif_string: with open(outfile, 'w') as foo: foo.write(mmcif_string) return mmcif_string
def function[get_mmcif, parameter[code, outfile]]: constant[ Get mmcif file associated with code from PDBE. Parameters ---------- code : str PDB code. outfile : str Filepath. Writes returned value to this file. Returns ------- mmcif_file : str Filepath to the mmcif file. ] variable[pdbe_url] assign[=] call[constant[http://www.ebi.ac.uk/pdbe/entry-files/download/{0}.cif].format, parameter[name[code]]] variable[r] assign[=] call[name[requests].get, parameter[name[pdbe_url]]] if compare[name[r].status_code equal[==] constant[200]] begin[:] variable[mmcif_string] assign[=] name[r].text if <ast.BoolOp object at 0x7da1b281ad40> begin[:] with call[name[open], parameter[name[outfile], constant[w]]] begin[:] call[name[foo].write, parameter[name[mmcif_string]]] return[name[mmcif_string]]
keyword[def] identifier[get_mmcif] ( identifier[code] , identifier[outfile] = keyword[None] ): literal[string] identifier[pdbe_url] = literal[string] . identifier[format] ( identifier[code] ) identifier[r] = identifier[requests] . identifier[get] ( identifier[pdbe_url] ) keyword[if] identifier[r] . identifier[status_code] == literal[int] : identifier[mmcif_string] = identifier[r] . identifier[text] keyword[else] : identifier[print] ( literal[string] . identifier[format] ( identifier[code] )) identifier[mmcif_string] = keyword[None] keyword[if] identifier[outfile] keyword[and] identifier[mmcif_string] : keyword[with] identifier[open] ( identifier[outfile] , literal[string] ) keyword[as] identifier[foo] : identifier[foo] . identifier[write] ( identifier[mmcif_string] ) keyword[return] identifier[mmcif_string]
def get_mmcif(code, outfile=None): """ Get mmcif file associated with code from PDBE. Parameters ---------- code : str PDB code. outfile : str Filepath. Writes returned value to this file. Returns ------- mmcif_file : str Filepath to the mmcif file. """ pdbe_url = 'http://www.ebi.ac.uk/pdbe/entry-files/download/{0}.cif'.format(code) r = requests.get(pdbe_url) if r.status_code == 200: mmcif_string = r.text # depends on [control=['if'], data=[]] else: print('Could not download mmcif file for {0}'.format(code)) mmcif_string = None # Write to file. if outfile and mmcif_string: with open(outfile, 'w') as foo: foo.write(mmcif_string) # depends on [control=['with'], data=['foo']] # depends on [control=['if'], data=[]] return mmcif_string
def splitpath(self): """ p.splitpath() -> Return ``(p.parent, p.name)``. .. seealso:: :attr:`parent`, :attr:`name`, :func:`os.path.split` """ parent, child = self.module.split(self) return self._next_class(parent), child
def function[splitpath, parameter[self]]: constant[ p.splitpath() -> Return ``(p.parent, p.name)``. .. seealso:: :attr:`parent`, :attr:`name`, :func:`os.path.split` ] <ast.Tuple object at 0x7da18f09eaa0> assign[=] call[name[self].module.split, parameter[name[self]]] return[tuple[[<ast.Call object at 0x7da18f09c4f0>, <ast.Name object at 0x7da18f09e320>]]]
keyword[def] identifier[splitpath] ( identifier[self] ): literal[string] identifier[parent] , identifier[child] = identifier[self] . identifier[module] . identifier[split] ( identifier[self] ) keyword[return] identifier[self] . identifier[_next_class] ( identifier[parent] ), identifier[child]
def splitpath(self): """ p.splitpath() -> Return ``(p.parent, p.name)``. .. seealso:: :attr:`parent`, :attr:`name`, :func:`os.path.split` """ (parent, child) = self.module.split(self) return (self._next_class(parent), child)
def tally(self, chain): """Adds current value to trace.""" try: # I changed str(x) to '%f'%x to solve a bug appearing due to # locale settings. In french for instance, str prints a comma # instead of a colon to indicate the decimal, which confuses # the database into thinking that there are more values than there # is. A better solution would be to use another delimiter than the # comma. -DH valstring = ', '.join( ['%f' % x for x in np.ravel(self._getfunc())]) except: valstring = str(self._getfunc()) # Add value to database query = "INSERT INTO [%s] (recid, trace, %s) values (NULL, %s, %s)" % \ (self.name, self._vstr, chain, valstring) self.db.cur.execute(query)
def function[tally, parameter[self, chain]]: constant[Adds current value to trace.] <ast.Try object at 0x7da18c4cd690> variable[query] assign[=] binary_operation[constant[INSERT INTO [%s] (recid, trace, %s) values (NULL, %s, %s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18c4cd900>, <ast.Attribute object at 0x7da18c4cd2d0>, <ast.Name object at 0x7da18c4ceb00>, <ast.Name object at 0x7da18c4cf370>]]] call[name[self].db.cur.execute, parameter[name[query]]]
keyword[def] identifier[tally] ( identifier[self] , identifier[chain] ): literal[string] keyword[try] : identifier[valstring] = literal[string] . identifier[join] ( [ literal[string] % identifier[x] keyword[for] identifier[x] keyword[in] identifier[np] . identifier[ravel] ( identifier[self] . identifier[_getfunc] ())]) keyword[except] : identifier[valstring] = identifier[str] ( identifier[self] . identifier[_getfunc] ()) identifier[query] = literal[string] %( identifier[self] . identifier[name] , identifier[self] . identifier[_vstr] , identifier[chain] , identifier[valstring] ) identifier[self] . identifier[db] . identifier[cur] . identifier[execute] ( identifier[query] )
def tally(self, chain): """Adds current value to trace.""" try: # I changed str(x) to '%f'%x to solve a bug appearing due to # locale settings. In french for instance, str prints a comma # instead of a colon to indicate the decimal, which confuses # the database into thinking that there are more values than there # is. A better solution would be to use another delimiter than the # comma. -DH valstring = ', '.join(['%f' % x for x in np.ravel(self._getfunc())]) # depends on [control=['try'], data=[]] except: valstring = str(self._getfunc()) # depends on [control=['except'], data=[]] # Add value to database query = 'INSERT INTO [%s] (recid, trace, %s) values (NULL, %s, %s)' % (self.name, self._vstr, chain, valstring) self.db.cur.execute(query)
def _add_scalar(self, scalar): """Returns E(a + b), given self=E(a) and b. Args: scalar: an int or float b, to be added to `self`. Returns: EncryptedNumber: E(a + b), calculated by encrypting b and taking the product of E(a) and E(b) modulo :attr:`~PaillierPublicKey.n` ** 2. Raises: ValueError: if scalar is out of range or precision. """ encoded = EncodedNumber.encode(self.public_key, scalar, max_exponent=self.exponent) return self._add_encoded(encoded)
def function[_add_scalar, parameter[self, scalar]]: constant[Returns E(a + b), given self=E(a) and b. Args: scalar: an int or float b, to be added to `self`. Returns: EncryptedNumber: E(a + b), calculated by encrypting b and taking the product of E(a) and E(b) modulo :attr:`~PaillierPublicKey.n` ** 2. Raises: ValueError: if scalar is out of range or precision. ] variable[encoded] assign[=] call[name[EncodedNumber].encode, parameter[name[self].public_key, name[scalar]]] return[call[name[self]._add_encoded, parameter[name[encoded]]]]
keyword[def] identifier[_add_scalar] ( identifier[self] , identifier[scalar] ): literal[string] identifier[encoded] = identifier[EncodedNumber] . identifier[encode] ( identifier[self] . identifier[public_key] , identifier[scalar] , identifier[max_exponent] = identifier[self] . identifier[exponent] ) keyword[return] identifier[self] . identifier[_add_encoded] ( identifier[encoded] )
def _add_scalar(self, scalar): """Returns E(a + b), given self=E(a) and b. Args: scalar: an int or float b, to be added to `self`. Returns: EncryptedNumber: E(a + b), calculated by encrypting b and taking the product of E(a) and E(b) modulo :attr:`~PaillierPublicKey.n` ** 2. Raises: ValueError: if scalar is out of range or precision. """ encoded = EncodedNumber.encode(self.public_key, scalar, max_exponent=self.exponent) return self._add_encoded(encoded)
def find_by_ids(self, _ids, projection=None, **kwargs): """ Does a big _id:$in query on any iterator """ id_list = [ObjectId(_id) for _id in _ids] if len(_ids) == 0: return [] # FIXME : this should be an empty cursor ! # Optimized path when only fetching the _id field. # Be mindful this might not filter missing documents that may not have been returned, had we done the query. if projection is not None and list(projection.keys()) == ["_id"]: return [self({"_id": x}, fetched_fields={"_id": True}) for x in id_list] else: return self.find({"_id": {"$in": id_list}}, projection=projection, **kwargs)
def function[find_by_ids, parameter[self, _ids, projection]]: constant[ Does a big _id:$in query on any iterator ] variable[id_list] assign[=] <ast.ListComp object at 0x7da1b26d6440> if compare[call[name[len], parameter[name[_ids]]] equal[==] constant[0]] begin[:] return[list[[]]] if <ast.BoolOp object at 0x7da1b26d6e00> begin[:] return[<ast.ListComp object at 0x7da1b26d4280>]
keyword[def] identifier[find_by_ids] ( identifier[self] , identifier[_ids] , identifier[projection] = keyword[None] ,** identifier[kwargs] ): literal[string] identifier[id_list] =[ identifier[ObjectId] ( identifier[_id] ) keyword[for] identifier[_id] keyword[in] identifier[_ids] ] keyword[if] identifier[len] ( identifier[_ids] )== literal[int] : keyword[return] [] keyword[if] identifier[projection] keyword[is] keyword[not] keyword[None] keyword[and] identifier[list] ( identifier[projection] . identifier[keys] ())==[ literal[string] ]: keyword[return] [ identifier[self] ({ literal[string] : identifier[x] }, identifier[fetched_fields] ={ literal[string] : keyword[True] }) keyword[for] identifier[x] keyword[in] identifier[id_list] ] keyword[else] : keyword[return] identifier[self] . identifier[find] ({ literal[string] :{ literal[string] : identifier[id_list] }}, identifier[projection] = identifier[projection] ,** identifier[kwargs] )
def find_by_ids(self, _ids, projection=None, **kwargs): """ Does a big _id:$in query on any iterator """ id_list = [ObjectId(_id) for _id in _ids] if len(_ids) == 0: return [] # FIXME : this should be an empty cursor ! # depends on [control=['if'], data=[]] # Optimized path when only fetching the _id field. # Be mindful this might not filter missing documents that may not have been returned, had we done the query. if projection is not None and list(projection.keys()) == ['_id']: return [self({'_id': x}, fetched_fields={'_id': True}) for x in id_list] # depends on [control=['if'], data=[]] else: return self.find({'_id': {'$in': id_list}}, projection=projection, **kwargs)
def save_file(filename, source, folder="Downloads"): """ Download and save a file at path :param filename: The name of the file :param source: The location of the resource online :param folder: The directory the file will be saved in :return: None """ r = requests.get(source, stream=True) if r.status_code == 200: if not path.isdir(folder): makedirs(folder, exist_ok=True) with open("%s/%s" % (folder, filename), 'wb') as f: for chunk in r: f.write(chunk)
def function[save_file, parameter[filename, source, folder]]: constant[ Download and save a file at path :param filename: The name of the file :param source: The location of the resource online :param folder: The directory the file will be saved in :return: None ] variable[r] assign[=] call[name[requests].get, parameter[name[source]]] if compare[name[r].status_code equal[==] constant[200]] begin[:] if <ast.UnaryOp object at 0x7da2054a7370> begin[:] call[name[makedirs], parameter[name[folder]]] with call[name[open], parameter[binary_operation[constant[%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da2054a4580>, <ast.Name object at 0x7da2054a7fd0>]]], constant[wb]]] begin[:] for taget[name[chunk]] in starred[name[r]] begin[:] call[name[f].write, parameter[name[chunk]]]
keyword[def] identifier[save_file] ( identifier[filename] , identifier[source] , identifier[folder] = literal[string] ): literal[string] identifier[r] = identifier[requests] . identifier[get] ( identifier[source] , identifier[stream] = keyword[True] ) keyword[if] identifier[r] . identifier[status_code] == literal[int] : keyword[if] keyword[not] identifier[path] . identifier[isdir] ( identifier[folder] ): identifier[makedirs] ( identifier[folder] , identifier[exist_ok] = keyword[True] ) keyword[with] identifier[open] ( literal[string] %( identifier[folder] , identifier[filename] ), literal[string] ) keyword[as] identifier[f] : keyword[for] identifier[chunk] keyword[in] identifier[r] : identifier[f] . identifier[write] ( identifier[chunk] )
def save_file(filename, source, folder='Downloads'): """ Download and save a file at path :param filename: The name of the file :param source: The location of the resource online :param folder: The directory the file will be saved in :return: None """ r = requests.get(source, stream=True) if r.status_code == 200: if not path.isdir(folder): makedirs(folder, exist_ok=True) # depends on [control=['if'], data=[]] with open('%s/%s' % (folder, filename), 'wb') as f: for chunk in r: f.write(chunk) # depends on [control=['for'], data=['chunk']] # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
def get_subjects(self): """ Returns the list of subject names present in the schema registry. """ res = requests.get(self._url('/subjects')) raise_if_failed(res) return res.json()
def function[get_subjects, parameter[self]]: constant[ Returns the list of subject names present in the schema registry. ] variable[res] assign[=] call[name[requests].get, parameter[call[name[self]._url, parameter[constant[/subjects]]]]] call[name[raise_if_failed], parameter[name[res]]] return[call[name[res].json, parameter[]]]
keyword[def] identifier[get_subjects] ( identifier[self] ): literal[string] identifier[res] = identifier[requests] . identifier[get] ( identifier[self] . identifier[_url] ( literal[string] )) identifier[raise_if_failed] ( identifier[res] ) keyword[return] identifier[res] . identifier[json] ()
def get_subjects(self): """ Returns the list of subject names present in the schema registry. """ res = requests.get(self._url('/subjects')) raise_if_failed(res) return res.json()
def calculate(self, batch_info): """ Calculate value of a metric """ value = self._value_function(batch_info['data'], batch_info['target'], batch_info['output']) self.storage.append(value)
def function[calculate, parameter[self, batch_info]]: constant[ Calculate value of a metric ] variable[value] assign[=] call[name[self]._value_function, parameter[call[name[batch_info]][constant[data]], call[name[batch_info]][constant[target]], call[name[batch_info]][constant[output]]]] call[name[self].storage.append, parameter[name[value]]]
keyword[def] identifier[calculate] ( identifier[self] , identifier[batch_info] ): literal[string] identifier[value] = identifier[self] . identifier[_value_function] ( identifier[batch_info] [ literal[string] ], identifier[batch_info] [ literal[string] ], identifier[batch_info] [ literal[string] ]) identifier[self] . identifier[storage] . identifier[append] ( identifier[value] )
def calculate(self, batch_info): """ Calculate value of a metric """ value = self._value_function(batch_info['data'], batch_info['target'], batch_info['output']) self.storage.append(value)
def run_command_orig(cmd): """ No idea how th f to get this to work """ process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = process.communicate() if process.returncode == 0: os.killpg(os.getpgid(pro.pid), signal.SIGTERM) else: raise BadRCError("Bad rc (%s) for cmd '%s': %s" % (process.returncode, cmd, stdout + stderr)) return stdout
def function[run_command_orig, parameter[cmd]]: constant[ No idea how th f to get this to work ] variable[process] assign[=] call[name[subprocess].Popen, parameter[name[cmd]]] <ast.Tuple object at 0x7da2054a7550> assign[=] call[name[process].communicate, parameter[]] if compare[name[process].returncode equal[==] constant[0]] begin[:] call[name[os].killpg, parameter[call[name[os].getpgid, parameter[name[pro].pid]], name[signal].SIGTERM]] return[name[stdout]]
keyword[def] identifier[run_command_orig] ( identifier[cmd] ): literal[string] identifier[process] = identifier[subprocess] . identifier[Popen] ( identifier[cmd] , identifier[shell] = keyword[True] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] , identifier[stderr] = identifier[subprocess] . identifier[PIPE] ) identifier[stdout] , identifier[stderr] = identifier[process] . identifier[communicate] () keyword[if] identifier[process] . identifier[returncode] == literal[int] : identifier[os] . identifier[killpg] ( identifier[os] . identifier[getpgid] ( identifier[pro] . identifier[pid] ), identifier[signal] . identifier[SIGTERM] ) keyword[else] : keyword[raise] identifier[BadRCError] ( literal[string] %( identifier[process] . identifier[returncode] , identifier[cmd] , identifier[stdout] + identifier[stderr] )) keyword[return] identifier[stdout]
def run_command_orig(cmd): """ No idea how th f to get this to work """ process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = process.communicate() if process.returncode == 0: os.killpg(os.getpgid(pro.pid), signal.SIGTERM) # depends on [control=['if'], data=[]] else: raise BadRCError("Bad rc (%s) for cmd '%s': %s" % (process.returncode, cmd, stdout + stderr)) return stdout
def cables_from_source(path, predicate=None): """\ Returns a generator with ``ICable`` instances. `path` Either a directory or a CSV file. `predicate` A predicate that is invoked for each cable reference identifier. If the predicate evaluates to ``False`` the cable is ignored. By default, all cables are used. I.e. ``cables_from_source('cables.csv', lambda r: r.startswith('09'))`` would return cables where the reference identifier starts with ``09``. """ return cables_from_directory(path, predicate) if os.path.isdir(path) else cables_from_csv(path, predicate)
def function[cables_from_source, parameter[path, predicate]]: constant[ Returns a generator with ``ICable`` instances. `path` Either a directory or a CSV file. `predicate` A predicate that is invoked for each cable reference identifier. If the predicate evaluates to ``False`` the cable is ignored. By default, all cables are used. I.e. ``cables_from_source('cables.csv', lambda r: r.startswith('09'))`` would return cables where the reference identifier starts with ``09``. ] return[<ast.IfExp object at 0x7da20c6e64d0>]
keyword[def] identifier[cables_from_source] ( identifier[path] , identifier[predicate] = keyword[None] ): literal[string] keyword[return] identifier[cables_from_directory] ( identifier[path] , identifier[predicate] ) keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[path] ) keyword[else] identifier[cables_from_csv] ( identifier[path] , identifier[predicate] )
def cables_from_source(path, predicate=None): """ Returns a generator with ``ICable`` instances. `path` Either a directory or a CSV file. `predicate` A predicate that is invoked for each cable reference identifier. If the predicate evaluates to ``False`` the cable is ignored. By default, all cables are used. I.e. ``cables_from_source('cables.csv', lambda r: r.startswith('09'))`` would return cables where the reference identifier starts with ``09``. """ return cables_from_directory(path, predicate) if os.path.isdir(path) else cables_from_csv(path, predicate)
def init(self, back=None): ''' Initialize the backend, only do so if the fs supports an init function ''' back = self.backends(back) for fsb in back: fstr = '{0}.init'.format(fsb) if fstr in self.servers: self.servers[fstr]()
def function[init, parameter[self, back]]: constant[ Initialize the backend, only do so if the fs supports an init function ] variable[back] assign[=] call[name[self].backends, parameter[name[back]]] for taget[name[fsb]] in starred[name[back]] begin[:] variable[fstr] assign[=] call[constant[{0}.init].format, parameter[name[fsb]]] if compare[name[fstr] in name[self].servers] begin[:] call[call[name[self].servers][name[fstr]], parameter[]]
keyword[def] identifier[init] ( identifier[self] , identifier[back] = keyword[None] ): literal[string] identifier[back] = identifier[self] . identifier[backends] ( identifier[back] ) keyword[for] identifier[fsb] keyword[in] identifier[back] : identifier[fstr] = literal[string] . identifier[format] ( identifier[fsb] ) keyword[if] identifier[fstr] keyword[in] identifier[self] . identifier[servers] : identifier[self] . identifier[servers] [ identifier[fstr] ]()
def init(self, back=None): """ Initialize the backend, only do so if the fs supports an init function """ back = self.backends(back) for fsb in back: fstr = '{0}.init'.format(fsb) if fstr in self.servers: self.servers[fstr]() # depends on [control=['if'], data=['fstr']] # depends on [control=['for'], data=['fsb']]
def create_mssql_pymssql(username, password, host, port, database, **kwargs): # pragma: no cover """ create an engine connected to a mssql database using pymssql. """ return create_engine( _create_mssql_pymssql(username, password, host, port, database), **kwargs )
def function[create_mssql_pymssql, parameter[username, password, host, port, database]]: constant[ create an engine connected to a mssql database using pymssql. ] return[call[name[create_engine], parameter[call[name[_create_mssql_pymssql], parameter[name[username], name[password], name[host], name[port], name[database]]]]]]
keyword[def] identifier[create_mssql_pymssql] ( identifier[username] , identifier[password] , identifier[host] , identifier[port] , identifier[database] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[create_engine] ( identifier[_create_mssql_pymssql] ( identifier[username] , identifier[password] , identifier[host] , identifier[port] , identifier[database] ), ** identifier[kwargs] )
def create_mssql_pymssql(username, password, host, port, database, **kwargs): # pragma: no cover '\n create an engine connected to a mssql database using pymssql.\n ' return create_engine(_create_mssql_pymssql(username, password, host, port, database), **kwargs)
def prefix_add(self, prefix, next_hop=None, route_dist=None): """ This method adds a new prefix to be advertised. ``prefix`` must be the string representation of an IP network (e.g., 10.1.1.0/24). ``next_hop`` specifies the next hop address for this prefix. This parameter is necessary for only VPNv4 and VPNv6 address families. ``route_dist`` specifies a route distinguisher value. This parameter is necessary for only VPNv4 and VPNv6 address families. """ func_name = 'network.add' networks = { PREFIX: prefix, } if next_hop: networks[NEXT_HOP] = next_hop if route_dist: func_name = 'prefix.add_local' networks[ROUTE_DISTINGUISHER] = route_dist rf, p = self._check_rf_and_normalize(prefix) networks[ROUTE_FAMILY] = rf networks[PREFIX] = p if rf == vrfs.VRF_RF_IPV6 and ip.valid_ipv4(next_hop): # convert the next_hop to IPv4-Mapped IPv6 Address networks[NEXT_HOP] = \ str(netaddr.IPAddress(next_hop).ipv6()) return call(func_name, **networks)
def function[prefix_add, parameter[self, prefix, next_hop, route_dist]]: constant[ This method adds a new prefix to be advertised. ``prefix`` must be the string representation of an IP network (e.g., 10.1.1.0/24). ``next_hop`` specifies the next hop address for this prefix. This parameter is necessary for only VPNv4 and VPNv6 address families. ``route_dist`` specifies a route distinguisher value. This parameter is necessary for only VPNv4 and VPNv6 address families. ] variable[func_name] assign[=] constant[network.add] variable[networks] assign[=] dictionary[[<ast.Name object at 0x7da1b1b29c00>], [<ast.Name object at 0x7da1b1b29bd0>]] if name[next_hop] begin[:] call[name[networks]][name[NEXT_HOP]] assign[=] name[next_hop] if name[route_dist] begin[:] variable[func_name] assign[=] constant[prefix.add_local] call[name[networks]][name[ROUTE_DISTINGUISHER]] assign[=] name[route_dist] <ast.Tuple object at 0x7da1b1b296f0> assign[=] call[name[self]._check_rf_and_normalize, parameter[name[prefix]]] call[name[networks]][name[ROUTE_FAMILY]] assign[=] name[rf] call[name[networks]][name[PREFIX]] assign[=] name[p] if <ast.BoolOp object at 0x7da1b1b292a0> begin[:] call[name[networks]][name[NEXT_HOP]] assign[=] call[name[str], parameter[call[call[name[netaddr].IPAddress, parameter[name[next_hop]]].ipv6, parameter[]]]] return[call[name[call], parameter[name[func_name]]]]
keyword[def] identifier[prefix_add] ( identifier[self] , identifier[prefix] , identifier[next_hop] = keyword[None] , identifier[route_dist] = keyword[None] ): literal[string] identifier[func_name] = literal[string] identifier[networks] ={ identifier[PREFIX] : identifier[prefix] , } keyword[if] identifier[next_hop] : identifier[networks] [ identifier[NEXT_HOP] ]= identifier[next_hop] keyword[if] identifier[route_dist] : identifier[func_name] = literal[string] identifier[networks] [ identifier[ROUTE_DISTINGUISHER] ]= identifier[route_dist] identifier[rf] , identifier[p] = identifier[self] . identifier[_check_rf_and_normalize] ( identifier[prefix] ) identifier[networks] [ identifier[ROUTE_FAMILY] ]= identifier[rf] identifier[networks] [ identifier[PREFIX] ]= identifier[p] keyword[if] identifier[rf] == identifier[vrfs] . identifier[VRF_RF_IPV6] keyword[and] identifier[ip] . identifier[valid_ipv4] ( identifier[next_hop] ): identifier[networks] [ identifier[NEXT_HOP] ]= identifier[str] ( identifier[netaddr] . identifier[IPAddress] ( identifier[next_hop] ). identifier[ipv6] ()) keyword[return] identifier[call] ( identifier[func_name] ,** identifier[networks] )
def prefix_add(self, prefix, next_hop=None, route_dist=None): """ This method adds a new prefix to be advertised. ``prefix`` must be the string representation of an IP network (e.g., 10.1.1.0/24). ``next_hop`` specifies the next hop address for this prefix. This parameter is necessary for only VPNv4 and VPNv6 address families. ``route_dist`` specifies a route distinguisher value. This parameter is necessary for only VPNv4 and VPNv6 address families. """ func_name = 'network.add' networks = {PREFIX: prefix} if next_hop: networks[NEXT_HOP] = next_hop # depends on [control=['if'], data=[]] if route_dist: func_name = 'prefix.add_local' networks[ROUTE_DISTINGUISHER] = route_dist (rf, p) = self._check_rf_and_normalize(prefix) networks[ROUTE_FAMILY] = rf networks[PREFIX] = p if rf == vrfs.VRF_RF_IPV6 and ip.valid_ipv4(next_hop): # convert the next_hop to IPv4-Mapped IPv6 Address networks[NEXT_HOP] = str(netaddr.IPAddress(next_hop).ipv6()) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return call(func_name, **networks)
def _delete_cells(self, column_family_id, columns, time_range=None, state=None): """Helper for :meth:`delete_cell` and :meth:`delete_cells`. ``state`` is unused by :class:`DirectRow` but is used by subclasses. :type column_family_id: str :param column_family_id: The column family that contains the column or columns with cells being deleted. Must be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. :type columns: :class:`list` of :class:`str` / :func:`unicode <unicode>`, or :class:`object` :param columns: The columns within the column family that will have cells deleted. If :attr:`ALL_COLUMNS` is used then the entire column family will be deleted from the row. :type time_range: :class:`TimestampRange` :param time_range: (Optional) The range of time within which cells should be deleted. :type state: bool :param state: (Optional) The state that is passed along to :meth:`_get_mutations`. """ mutations_list = self._get_mutations(state) if columns is self.ALL_COLUMNS: mutation_val = data_v2_pb2.Mutation.DeleteFromFamily( family_name=column_family_id ) mutation_pb = data_v2_pb2.Mutation(delete_from_family=mutation_val) mutations_list.append(mutation_pb) else: delete_kwargs = {} if time_range is not None: delete_kwargs["time_range"] = time_range.to_pb() to_append = [] for column in columns: column = _to_bytes(column) # time_range will never change if present, but the rest of # delete_kwargs will delete_kwargs.update( family_name=column_family_id, column_qualifier=column ) mutation_val = data_v2_pb2.Mutation.DeleteFromColumn(**delete_kwargs) mutation_pb = data_v2_pb2.Mutation(delete_from_column=mutation_val) to_append.append(mutation_pb) # We don't add the mutations until all columns have been # processed without error. mutations_list.extend(to_append)
def function[_delete_cells, parameter[self, column_family_id, columns, time_range, state]]: constant[Helper for :meth:`delete_cell` and :meth:`delete_cells`. ``state`` is unused by :class:`DirectRow` but is used by subclasses. :type column_family_id: str :param column_family_id: The column family that contains the column or columns with cells being deleted. Must be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. :type columns: :class:`list` of :class:`str` / :func:`unicode <unicode>`, or :class:`object` :param columns: The columns within the column family that will have cells deleted. If :attr:`ALL_COLUMNS` is used then the entire column family will be deleted from the row. :type time_range: :class:`TimestampRange` :param time_range: (Optional) The range of time within which cells should be deleted. :type state: bool :param state: (Optional) The state that is passed along to :meth:`_get_mutations`. ] variable[mutations_list] assign[=] call[name[self]._get_mutations, parameter[name[state]]] if compare[name[columns] is name[self].ALL_COLUMNS] begin[:] variable[mutation_val] assign[=] call[name[data_v2_pb2].Mutation.DeleteFromFamily, parameter[]] variable[mutation_pb] assign[=] call[name[data_v2_pb2].Mutation, parameter[]] call[name[mutations_list].append, parameter[name[mutation_pb]]]
keyword[def] identifier[_delete_cells] ( identifier[self] , identifier[column_family_id] , identifier[columns] , identifier[time_range] = keyword[None] , identifier[state] = keyword[None] ): literal[string] identifier[mutations_list] = identifier[self] . identifier[_get_mutations] ( identifier[state] ) keyword[if] identifier[columns] keyword[is] identifier[self] . identifier[ALL_COLUMNS] : identifier[mutation_val] = identifier[data_v2_pb2] . identifier[Mutation] . identifier[DeleteFromFamily] ( identifier[family_name] = identifier[column_family_id] ) identifier[mutation_pb] = identifier[data_v2_pb2] . identifier[Mutation] ( identifier[delete_from_family] = identifier[mutation_val] ) identifier[mutations_list] . identifier[append] ( identifier[mutation_pb] ) keyword[else] : identifier[delete_kwargs] ={} keyword[if] identifier[time_range] keyword[is] keyword[not] keyword[None] : identifier[delete_kwargs] [ literal[string] ]= identifier[time_range] . identifier[to_pb] () identifier[to_append] =[] keyword[for] identifier[column] keyword[in] identifier[columns] : identifier[column] = identifier[_to_bytes] ( identifier[column] ) identifier[delete_kwargs] . identifier[update] ( identifier[family_name] = identifier[column_family_id] , identifier[column_qualifier] = identifier[column] ) identifier[mutation_val] = identifier[data_v2_pb2] . identifier[Mutation] . identifier[DeleteFromColumn] (** identifier[delete_kwargs] ) identifier[mutation_pb] = identifier[data_v2_pb2] . identifier[Mutation] ( identifier[delete_from_column] = identifier[mutation_val] ) identifier[to_append] . identifier[append] ( identifier[mutation_pb] ) identifier[mutations_list] . identifier[extend] ( identifier[to_append] )
def _delete_cells(self, column_family_id, columns, time_range=None, state=None): """Helper for :meth:`delete_cell` and :meth:`delete_cells`. ``state`` is unused by :class:`DirectRow` but is used by subclasses. :type column_family_id: str :param column_family_id: The column family that contains the column or columns with cells being deleted. Must be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. :type columns: :class:`list` of :class:`str` / :func:`unicode <unicode>`, or :class:`object` :param columns: The columns within the column family that will have cells deleted. If :attr:`ALL_COLUMNS` is used then the entire column family will be deleted from the row. :type time_range: :class:`TimestampRange` :param time_range: (Optional) The range of time within which cells should be deleted. :type state: bool :param state: (Optional) The state that is passed along to :meth:`_get_mutations`. """ mutations_list = self._get_mutations(state) if columns is self.ALL_COLUMNS: mutation_val = data_v2_pb2.Mutation.DeleteFromFamily(family_name=column_family_id) mutation_pb = data_v2_pb2.Mutation(delete_from_family=mutation_val) mutations_list.append(mutation_pb) # depends on [control=['if'], data=[]] else: delete_kwargs = {} if time_range is not None: delete_kwargs['time_range'] = time_range.to_pb() # depends on [control=['if'], data=['time_range']] to_append = [] for column in columns: column = _to_bytes(column) # time_range will never change if present, but the rest of # delete_kwargs will delete_kwargs.update(family_name=column_family_id, column_qualifier=column) mutation_val = data_v2_pb2.Mutation.DeleteFromColumn(**delete_kwargs) mutation_pb = data_v2_pb2.Mutation(delete_from_column=mutation_val) to_append.append(mutation_pb) # depends on [control=['for'], data=['column']] # We don't add the mutations until all columns have been # processed without error. mutations_list.extend(to_append)
def localopt(self, forcefield='mmff94', steps=500): """ A wrapper to pybel's localopt method to optimize a Molecule. Args: forcefield: Default is mmff94. Options are 'gaff', 'ghemical', 'mmff94', 'mmff94s', and 'uff'. steps: Default is 500. """ pbmol = pb.Molecule(self._obmol) pbmol.localopt(forcefield=forcefield, steps=steps) self._obmol = pbmol.OBMol
def function[localopt, parameter[self, forcefield, steps]]: constant[ A wrapper to pybel's localopt method to optimize a Molecule. Args: forcefield: Default is mmff94. Options are 'gaff', 'ghemical', 'mmff94', 'mmff94s', and 'uff'. steps: Default is 500. ] variable[pbmol] assign[=] call[name[pb].Molecule, parameter[name[self]._obmol]] call[name[pbmol].localopt, parameter[]] name[self]._obmol assign[=] name[pbmol].OBMol
keyword[def] identifier[localopt] ( identifier[self] , identifier[forcefield] = literal[string] , identifier[steps] = literal[int] ): literal[string] identifier[pbmol] = identifier[pb] . identifier[Molecule] ( identifier[self] . identifier[_obmol] ) identifier[pbmol] . identifier[localopt] ( identifier[forcefield] = identifier[forcefield] , identifier[steps] = identifier[steps] ) identifier[self] . identifier[_obmol] = identifier[pbmol] . identifier[OBMol]
def localopt(self, forcefield='mmff94', steps=500): """ A wrapper to pybel's localopt method to optimize a Molecule. Args: forcefield: Default is mmff94. Options are 'gaff', 'ghemical', 'mmff94', 'mmff94s', and 'uff'. steps: Default is 500. """ pbmol = pb.Molecule(self._obmol) pbmol.localopt(forcefield=forcefield, steps=steps) self._obmol = pbmol.OBMol
def from_range(cls, data, name=None, dtype=None, **kwargs): """ Create RangeIndex from a range object. """ if not isinstance(data, range): raise TypeError( '{0}(...) must be called with object coercible to a ' 'range, {1} was passed'.format(cls.__name__, repr(data))) start, stop, step = data.start, data.stop, data.step return RangeIndex(start, stop, step, dtype=dtype, name=name, **kwargs)
def function[from_range, parameter[cls, data, name, dtype]]: constant[ Create RangeIndex from a range object. ] if <ast.UnaryOp object at 0x7da18f813e80> begin[:] <ast.Raise object at 0x7da18f812aa0> <ast.Tuple object at 0x7da2045676d0> assign[=] tuple[[<ast.Attribute object at 0x7da204567700>, <ast.Attribute object at 0x7da204565bd0>, <ast.Attribute object at 0x7da204567790>]] return[call[name[RangeIndex], parameter[name[start], name[stop], name[step]]]]
keyword[def] identifier[from_range] ( identifier[cls] , identifier[data] , identifier[name] = keyword[None] , identifier[dtype] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[data] , identifier[range] ): keyword[raise] identifier[TypeError] ( literal[string] literal[string] . identifier[format] ( identifier[cls] . identifier[__name__] , identifier[repr] ( identifier[data] ))) identifier[start] , identifier[stop] , identifier[step] = identifier[data] . identifier[start] , identifier[data] . identifier[stop] , identifier[data] . identifier[step] keyword[return] identifier[RangeIndex] ( identifier[start] , identifier[stop] , identifier[step] , identifier[dtype] = identifier[dtype] , identifier[name] = identifier[name] ,** identifier[kwargs] )
def from_range(cls, data, name=None, dtype=None, **kwargs): """ Create RangeIndex from a range object. """ if not isinstance(data, range): raise TypeError('{0}(...) must be called with object coercible to a range, {1} was passed'.format(cls.__name__, repr(data))) # depends on [control=['if'], data=[]] (start, stop, step) = (data.start, data.stop, data.step) return RangeIndex(start, stop, step, dtype=dtype, name=name, **kwargs)
def iterfields(klass): """Iterate over the input class members and yield its TypedFields. Args: klass: A class (usually an Entity subclass). Yields: (class attribute name, TypedField instance) tuples. """ is_field = lambda x: isinstance(x, TypedField) for name, field in inspect.getmembers(klass, predicate=is_field): yield name, field
def function[iterfields, parameter[klass]]: constant[Iterate over the input class members and yield its TypedFields. Args: klass: A class (usually an Entity subclass). Yields: (class attribute name, TypedField instance) tuples. ] variable[is_field] assign[=] <ast.Lambda object at 0x7da1b25b1000> for taget[tuple[[<ast.Name object at 0x7da1b25b06d0>, <ast.Name object at 0x7da1b25b16f0>]]] in starred[call[name[inspect].getmembers, parameter[name[klass]]]] begin[:] <ast.Yield object at 0x7da1b25b0790>
keyword[def] identifier[iterfields] ( identifier[klass] ): literal[string] identifier[is_field] = keyword[lambda] identifier[x] : identifier[isinstance] ( identifier[x] , identifier[TypedField] ) keyword[for] identifier[name] , identifier[field] keyword[in] identifier[inspect] . identifier[getmembers] ( identifier[klass] , identifier[predicate] = identifier[is_field] ): keyword[yield] identifier[name] , identifier[field]
def iterfields(klass): """Iterate over the input class members and yield its TypedFields. Args: klass: A class (usually an Entity subclass). Yields: (class attribute name, TypedField instance) tuples. """ is_field = lambda x: isinstance(x, TypedField) for (name, field) in inspect.getmembers(klass, predicate=is_field): yield (name, field) # depends on [control=['for'], data=[]]
def _load_old_defaults(self, old_version): """Read old defaults""" old_defaults = cp.ConfigParser() if check_version(old_version, '3.0.0', '<='): path = get_module_source_path('spyder') else: path = osp.dirname(self.filename()) path = osp.join(path, 'defaults') old_defaults.read(osp.join(path, 'defaults-'+old_version+'.ini')) return old_defaults
def function[_load_old_defaults, parameter[self, old_version]]: constant[Read old defaults] variable[old_defaults] assign[=] call[name[cp].ConfigParser, parameter[]] if call[name[check_version], parameter[name[old_version], constant[3.0.0], constant[<=]]] begin[:] variable[path] assign[=] call[name[get_module_source_path], parameter[constant[spyder]]] variable[path] assign[=] call[name[osp].join, parameter[name[path], constant[defaults]]] call[name[old_defaults].read, parameter[call[name[osp].join, parameter[name[path], binary_operation[binary_operation[constant[defaults-] + name[old_version]] + constant[.ini]]]]]] return[name[old_defaults]]
keyword[def] identifier[_load_old_defaults] ( identifier[self] , identifier[old_version] ): literal[string] identifier[old_defaults] = identifier[cp] . identifier[ConfigParser] () keyword[if] identifier[check_version] ( identifier[old_version] , literal[string] , literal[string] ): identifier[path] = identifier[get_module_source_path] ( literal[string] ) keyword[else] : identifier[path] = identifier[osp] . identifier[dirname] ( identifier[self] . identifier[filename] ()) identifier[path] = identifier[osp] . identifier[join] ( identifier[path] , literal[string] ) identifier[old_defaults] . identifier[read] ( identifier[osp] . identifier[join] ( identifier[path] , literal[string] + identifier[old_version] + literal[string] )) keyword[return] identifier[old_defaults]
def _load_old_defaults(self, old_version): """Read old defaults""" old_defaults = cp.ConfigParser() if check_version(old_version, '3.0.0', '<='): path = get_module_source_path('spyder') # depends on [control=['if'], data=[]] else: path = osp.dirname(self.filename()) path = osp.join(path, 'defaults') old_defaults.read(osp.join(path, 'defaults-' + old_version + '.ini')) return old_defaults
def __checkIfBestCompletedModel(self): """ Reads the current "best model" for the job and returns whether or not the current model is better than the "best model" stored for the job Returns: (isBetter, storedBest, origResultsStr) isBetter: True if the current model is better than the stored "best model" storedResults: A dict of the currently stored results in the jobs table record origResultsStr: The json-encoded string that currently resides in the "results" field of the jobs record (used to create atomicity) """ jobResultsStr = self._jobsDAO.jobGetFields(self._jobID, ['results'])[0] if jobResultsStr is None: jobResults = {} else: jobResults = json.loads(jobResultsStr) isSaved = jobResults.get('saved', False) bestMetric = jobResults.get('bestValue', None) currentMetric = self._getMetrics()[self._optimizedMetricLabel] self._isBestModel = (not isSaved) \ or (currentMetric < bestMetric) return self._isBestModel, jobResults, jobResultsStr
def function[__checkIfBestCompletedModel, parameter[self]]: constant[ Reads the current "best model" for the job and returns whether or not the current model is better than the "best model" stored for the job Returns: (isBetter, storedBest, origResultsStr) isBetter: True if the current model is better than the stored "best model" storedResults: A dict of the currently stored results in the jobs table record origResultsStr: The json-encoded string that currently resides in the "results" field of the jobs record (used to create atomicity) ] variable[jobResultsStr] assign[=] call[call[name[self]._jobsDAO.jobGetFields, parameter[name[self]._jobID, list[[<ast.Constant object at 0x7da20c7ca8c0>]]]]][constant[0]] if compare[name[jobResultsStr] is constant[None]] begin[:] variable[jobResults] assign[=] dictionary[[], []] variable[isSaved] assign[=] call[name[jobResults].get, parameter[constant[saved], constant[False]]] variable[bestMetric] assign[=] call[name[jobResults].get, parameter[constant[bestValue], constant[None]]] variable[currentMetric] assign[=] call[call[name[self]._getMetrics, parameter[]]][name[self]._optimizedMetricLabel] name[self]._isBestModel assign[=] <ast.BoolOp object at 0x7da20c7cab00> return[tuple[[<ast.Attribute object at 0x7da20c7c8280>, <ast.Name object at 0x7da20c7c8fd0>, <ast.Name object at 0x7da20c7c9750>]]]
keyword[def] identifier[__checkIfBestCompletedModel] ( identifier[self] ): literal[string] identifier[jobResultsStr] = identifier[self] . identifier[_jobsDAO] . identifier[jobGetFields] ( identifier[self] . identifier[_jobID] ,[ literal[string] ])[ literal[int] ] keyword[if] identifier[jobResultsStr] keyword[is] keyword[None] : identifier[jobResults] ={} keyword[else] : identifier[jobResults] = identifier[json] . identifier[loads] ( identifier[jobResultsStr] ) identifier[isSaved] = identifier[jobResults] . identifier[get] ( literal[string] , keyword[False] ) identifier[bestMetric] = identifier[jobResults] . identifier[get] ( literal[string] , keyword[None] ) identifier[currentMetric] = identifier[self] . identifier[_getMetrics] ()[ identifier[self] . identifier[_optimizedMetricLabel] ] identifier[self] . identifier[_isBestModel] =( keyword[not] identifier[isSaved] ) keyword[or] ( identifier[currentMetric] < identifier[bestMetric] ) keyword[return] identifier[self] . identifier[_isBestModel] , identifier[jobResults] , identifier[jobResultsStr]
def __checkIfBestCompletedModel(self): """ Reads the current "best model" for the job and returns whether or not the current model is better than the "best model" stored for the job Returns: (isBetter, storedBest, origResultsStr) isBetter: True if the current model is better than the stored "best model" storedResults: A dict of the currently stored results in the jobs table record origResultsStr: The json-encoded string that currently resides in the "results" field of the jobs record (used to create atomicity) """ jobResultsStr = self._jobsDAO.jobGetFields(self._jobID, ['results'])[0] if jobResultsStr is None: jobResults = {} # depends on [control=['if'], data=[]] else: jobResults = json.loads(jobResultsStr) isSaved = jobResults.get('saved', False) bestMetric = jobResults.get('bestValue', None) currentMetric = self._getMetrics()[self._optimizedMetricLabel] self._isBestModel = not isSaved or currentMetric < bestMetric return (self._isBestModel, jobResults, jobResultsStr)
def print_stats(genomes): """ print substitution rate data to table genomes[genome][contig][sample] = \ {'bp_stats':{}, 'sub_rates'[locus] = {ref PnPs, consensus PnPs}} """ header = ['#genome', 'contig', 'locus', 'position', 'strand', 'length', \ 'sample', 'coverage', \ 'ref. Pn/Ps', 'ref SNP density', \ 'consensus Pn/Ps', 'consensus SNP density', \ 'product'] print('\t'.join(header)) for genome, contigs in list(genomes.items()): for contig, samples in list(contigs.items()): for sample, stats in list(samples.items()): for locus, rates in list(stats['sub_rates'].items()): length = rates['info']['length'] position, strand = rates['info']['position'] position = '%s-%s' % position out = [genome, contig, locus, position, strand, length, \ sample, '%.2f' % (rates['cov']), \ rates['ref PnPs'], rates['ref SNP density'], \ rates['consensus PnPs'], rates['consensus SNP density'], \ rates['info']['product'][0]] print('\t'.join([str(i) for i in out]))
def function[print_stats, parameter[genomes]]: constant[ print substitution rate data to table genomes[genome][contig][sample] = {'bp_stats':{}, 'sub_rates'[locus] = {ref PnPs, consensus PnPs}} ] variable[header] assign[=] list[[<ast.Constant object at 0x7da18dc071c0>, <ast.Constant object at 0x7da18dc041c0>, <ast.Constant object at 0x7da18dc040a0>, <ast.Constant object at 0x7da18dc05030>, <ast.Constant object at 0x7da18dc057e0>, <ast.Constant object at 0x7da18dc070a0>, <ast.Constant object at 0x7da18dc05150>, <ast.Constant object at 0x7da18dc06b90>, <ast.Constant object at 0x7da18dc05120>, <ast.Constant object at 0x7da18dc04b50>, <ast.Constant object at 0x7da18dc04c10>, <ast.Constant object at 0x7da18dc05ea0>, <ast.Constant object at 0x7da18dc05b70>]] call[name[print], parameter[call[constant[ ].join, parameter[name[header]]]]] for taget[tuple[[<ast.Name object at 0x7da18dc07130>, <ast.Name object at 0x7da18dc053c0>]]] in starred[call[name[list], parameter[call[name[genomes].items, parameter[]]]]] begin[:] for taget[tuple[[<ast.Name object at 0x7da18bc70970>, <ast.Name object at 0x7da18bc72a40>]]] in starred[call[name[list], parameter[call[name[contigs].items, parameter[]]]]] begin[:] for taget[tuple[[<ast.Name object at 0x7da18bc735b0>, <ast.Name object at 0x7da18bc71270>]]] in starred[call[name[list], parameter[call[name[samples].items, parameter[]]]]] begin[:] for taget[tuple[[<ast.Name object at 0x7da18bc71000>, <ast.Name object at 0x7da18bc706a0>]]] in starred[call[name[list], parameter[call[call[name[stats]][constant[sub_rates]].items, parameter[]]]]] begin[:] variable[length] assign[=] call[call[name[rates]][constant[info]]][constant[length]] <ast.Tuple object at 0x7da2041db490> assign[=] call[call[name[rates]][constant[info]]][constant[position]] variable[position] assign[=] binary_operation[constant[%s-%s] <ast.Mod object at 0x7da2590d6920> name[position]] variable[out] assign[=] list[[<ast.Name object at 0x7da2041d9ed0>, <ast.Name object at 0x7da2041dad70>, <ast.Name object at 0x7da2041d9720>, <ast.Name object at 0x7da2041d90f0>, <ast.Name object at 0x7da2041d9270>, <ast.Name object at 0x7da2041d8160>, <ast.Name object at 0x7da2041d91b0>, <ast.BinOp object at 0x7da2041da140>, <ast.Subscript object at 0x7da2041daf80>, <ast.Subscript object at 0x7da2041d8ee0>, <ast.Subscript object at 0x7da2041d9cf0>, <ast.Subscript object at 0x7da2041d9ab0>, <ast.Subscript object at 0x7da2041d93f0>]] call[name[print], parameter[call[constant[ ].join, parameter[<ast.ListComp object at 0x7da2041da110>]]]]
keyword[def] identifier[print_stats] ( identifier[genomes] ): literal[string] identifier[header] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] identifier[print] ( literal[string] . identifier[join] ( identifier[header] )) keyword[for] identifier[genome] , identifier[contigs] keyword[in] identifier[list] ( identifier[genomes] . identifier[items] ()): keyword[for] identifier[contig] , identifier[samples] keyword[in] identifier[list] ( identifier[contigs] . identifier[items] ()): keyword[for] identifier[sample] , identifier[stats] keyword[in] identifier[list] ( identifier[samples] . identifier[items] ()): keyword[for] identifier[locus] , identifier[rates] keyword[in] identifier[list] ( identifier[stats] [ literal[string] ]. identifier[items] ()): identifier[length] = identifier[rates] [ literal[string] ][ literal[string] ] identifier[position] , identifier[strand] = identifier[rates] [ literal[string] ][ literal[string] ] identifier[position] = literal[string] % identifier[position] identifier[out] =[ identifier[genome] , identifier[contig] , identifier[locus] , identifier[position] , identifier[strand] , identifier[length] , identifier[sample] , literal[string] %( identifier[rates] [ literal[string] ]), identifier[rates] [ literal[string] ], identifier[rates] [ literal[string] ], identifier[rates] [ literal[string] ], identifier[rates] [ literal[string] ], identifier[rates] [ literal[string] ][ literal[string] ][ literal[int] ]] identifier[print] ( literal[string] . identifier[join] ([ identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[out] ]))
def print_stats(genomes): """ print substitution rate data to table genomes[genome][contig][sample] = {'bp_stats':{}, 'sub_rates'[locus] = {ref PnPs, consensus PnPs}} """ header = ['#genome', 'contig', 'locus', 'position', 'strand', 'length', 'sample', 'coverage', 'ref. Pn/Ps', 'ref SNP density', 'consensus Pn/Ps', 'consensus SNP density', 'product'] print('\t'.join(header)) for (genome, contigs) in list(genomes.items()): for (contig, samples) in list(contigs.items()): for (sample, stats) in list(samples.items()): for (locus, rates) in list(stats['sub_rates'].items()): length = rates['info']['length'] (position, strand) = rates['info']['position'] position = '%s-%s' % position out = [genome, contig, locus, position, strand, length, sample, '%.2f' % rates['cov'], rates['ref PnPs'], rates['ref SNP density'], rates['consensus PnPs'], rates['consensus SNP density'], rates['info']['product'][0]] print('\t'.join([str(i) for i in out])) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
def _install(self, args): ''' Install a package from a repo ''' if len(args) < 2: raise SPMInvocationError('A package must be specified') caller_opts = self.opts.copy() caller_opts['file_client'] = 'local' self.caller = salt.client.Caller(mopts=caller_opts) self.client = salt.client.get_local_client(self.opts['conf_file']) cache = salt.cache.Cache(self.opts) packages = args[1:] file_map = {} optional = [] recommended = [] to_install = [] for pkg in packages: if pkg.endswith('.spm'): if self._pkgfiles_fun('path_exists', pkg): comps = pkg.split('-') comps = os.path.split('-'.join(comps[:-2])) pkg_name = comps[-1] formula_tar = tarfile.open(pkg, 'r:bz2') formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name)) formula_def = salt.utils.yaml.safe_load(formula_ref) file_map[pkg_name] = pkg to_, op_, re_ = self._check_all_deps( pkg_name=pkg_name, pkg_file=pkg, formula_def=formula_def ) to_install.extend(to_) optional.extend(op_) recommended.extend(re_) formula_tar.close() else: raise SPMInvocationError('Package file {0} not found'.format(pkg)) else: to_, op_, re_ = self._check_all_deps(pkg_name=pkg) to_install.extend(to_) optional.extend(op_) recommended.extend(re_) optional = set(filter(len, optional)) if optional: self.ui.status('The following dependencies are optional:\n\t{0}\n'.format( '\n\t'.join(optional) )) recommended = set(filter(len, recommended)) if recommended: self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format( '\n\t'.join(recommended) )) to_install = set(filter(len, to_install)) msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install)) if not self.opts['assume_yes']: self.ui.confirm(msg) repo_metadata = self._get_repo_metadata() dl_list = {} for package in to_install: if package in file_map: self._install_indv_pkg(package, file_map[package]) else: for repo in repo_metadata: repo_info = repo_metadata[repo] if package in repo_info['packages']: dl_package = False repo_ver = repo_info['packages'][package]['info']['version'] repo_rel = repo_info['packages'][package]['info']['release'] repo_url = repo_info['info']['url'] if package in dl_list: # Check package version, replace if newer version if repo_ver == dl_list[package]['version']: # Version is the same, check release if repo_rel > dl_list[package]['release']: dl_package = True elif repo_rel == dl_list[package]['release']: # Version and release are the same, give # preference to local (file://) repos if dl_list[package]['source'].startswith('file://'): if not repo_url.startswith('file://'): dl_package = True elif repo_ver > dl_list[package]['version']: dl_package = True else: dl_package = True if dl_package is True: # Put together download directory cache_path = os.path.join( self.opts['spm_cache_dir'], repo ) # Put together download paths dl_url = '{0}/{1}'.format( repo_info['info']['url'], repo_info['packages'][package]['filename'] ) out_file = os.path.join( cache_path, repo_info['packages'][package]['filename'] ) dl_list[package] = { 'version': repo_ver, 'release': repo_rel, 'source': dl_url, 'dest_dir': cache_path, 'dest_file': out_file, } for package in dl_list: dl_url = dl_list[package]['source'] cache_path = dl_list[package]['dest_dir'] out_file = dl_list[package]['dest_file'] # Make sure download directory exists if not os.path.exists(cache_path): os.makedirs(cache_path) # Download the package if dl_url.startswith('file://'): dl_url = dl_url.replace('file://', '') shutil.copyfile(dl_url, out_file) else: with salt.utils.files.fopen(out_file, 'w') as outf: outf.write(self._query_http(dl_url, repo_info['info'])) # First we download everything, then we install for package in dl_list: out_file = dl_list[package]['dest_file'] # Kick off the install self._install_indv_pkg(package, out_file) return
def function[_install, parameter[self, args]]: constant[ Install a package from a repo ] if compare[call[name[len], parameter[name[args]]] less[<] constant[2]] begin[:] <ast.Raise object at 0x7da20c6c4e50> variable[caller_opts] assign[=] call[name[self].opts.copy, parameter[]] call[name[caller_opts]][constant[file_client]] assign[=] constant[local] name[self].caller assign[=] call[name[salt].client.Caller, parameter[]] name[self].client assign[=] call[name[salt].client.get_local_client, parameter[call[name[self].opts][constant[conf_file]]]] variable[cache] assign[=] call[name[salt].cache.Cache, parameter[name[self].opts]] variable[packages] assign[=] call[name[args]][<ast.Slice object at 0x7da20c6c7ac0>] variable[file_map] assign[=] dictionary[[], []] variable[optional] assign[=] list[[]] variable[recommended] assign[=] list[[]] variable[to_install] assign[=] list[[]] for taget[name[pkg]] in starred[name[packages]] begin[:] if call[name[pkg].endswith, parameter[constant[.spm]]] begin[:] if call[name[self]._pkgfiles_fun, parameter[constant[path_exists], name[pkg]]] begin[:] variable[comps] assign[=] call[name[pkg].split, parameter[constant[-]]] variable[comps] assign[=] call[name[os].path.split, parameter[call[constant[-].join, parameter[call[name[comps]][<ast.Slice object at 0x7da20c6c51e0>]]]]] variable[pkg_name] assign[=] call[name[comps]][<ast.UnaryOp object at 0x7da20c6c7d90>] variable[formula_tar] assign[=] call[name[tarfile].open, parameter[name[pkg], constant[r:bz2]]] variable[formula_ref] assign[=] call[name[formula_tar].extractfile, parameter[call[constant[{0}/FORMULA].format, parameter[name[pkg_name]]]]] variable[formula_def] assign[=] call[name[salt].utils.yaml.safe_load, parameter[name[formula_ref]]] call[name[file_map]][name[pkg_name]] assign[=] name[pkg] <ast.Tuple object at 0x7da20c6c6bc0> assign[=] call[name[self]._check_all_deps, parameter[]] call[name[to_install].extend, parameter[name[to_]]] call[name[optional].extend, parameter[name[op_]]] call[name[recommended].extend, parameter[name[re_]]] call[name[formula_tar].close, parameter[]] variable[optional] assign[=] call[name[set], parameter[call[name[filter], parameter[name[len], name[optional]]]]] if name[optional] begin[:] call[name[self].ui.status, parameter[call[constant[The following dependencies are optional: {0} ].format, parameter[call[constant[ ].join, parameter[name[optional]]]]]]] variable[recommended] assign[=] call[name[set], parameter[call[name[filter], parameter[name[len], name[recommended]]]]] if name[recommended] begin[:] call[name[self].ui.status, parameter[call[constant[The following dependencies are recommended: {0} ].format, parameter[call[constant[ ].join, parameter[name[recommended]]]]]]] variable[to_install] assign[=] call[name[set], parameter[call[name[filter], parameter[name[len], name[to_install]]]]] variable[msg] assign[=] call[constant[Installing packages: {0} ].format, parameter[call[constant[ ].join, parameter[name[to_install]]]]] if <ast.UnaryOp object at 0x7da2041dbbb0> begin[:] call[name[self].ui.confirm, parameter[name[msg]]] variable[repo_metadata] assign[=] call[name[self]._get_repo_metadata, parameter[]] variable[dl_list] assign[=] dictionary[[], []] for taget[name[package]] in starred[name[to_install]] begin[:] if compare[name[package] in name[file_map]] begin[:] call[name[self]._install_indv_pkg, parameter[name[package], call[name[file_map]][name[package]]]] for taget[name[package]] in starred[name[dl_list]] begin[:] variable[dl_url] assign[=] call[call[name[dl_list]][name[package]]][constant[source]] variable[cache_path] assign[=] call[call[name[dl_list]][name[package]]][constant[dest_dir]] variable[out_file] assign[=] call[call[name[dl_list]][name[package]]][constant[dest_file]] if <ast.UnaryOp object at 0x7da1b21e1870> begin[:] call[name[os].makedirs, parameter[name[cache_path]]] if call[name[dl_url].startswith, parameter[constant[file://]]] begin[:] variable[dl_url] assign[=] call[name[dl_url].replace, parameter[constant[file://], constant[]]] call[name[shutil].copyfile, parameter[name[dl_url], name[out_file]]] for taget[name[package]] in starred[name[dl_list]] begin[:] variable[out_file] assign[=] call[call[name[dl_list]][name[package]]][constant[dest_file]] call[name[self]._install_indv_pkg, parameter[name[package], name[out_file]]] return[None]
keyword[def] identifier[_install] ( identifier[self] , identifier[args] ): literal[string] keyword[if] identifier[len] ( identifier[args] )< literal[int] : keyword[raise] identifier[SPMInvocationError] ( literal[string] ) identifier[caller_opts] = identifier[self] . identifier[opts] . identifier[copy] () identifier[caller_opts] [ literal[string] ]= literal[string] identifier[self] . identifier[caller] = identifier[salt] . identifier[client] . identifier[Caller] ( identifier[mopts] = identifier[caller_opts] ) identifier[self] . identifier[client] = identifier[salt] . identifier[client] . identifier[get_local_client] ( identifier[self] . identifier[opts] [ literal[string] ]) identifier[cache] = identifier[salt] . identifier[cache] . identifier[Cache] ( identifier[self] . identifier[opts] ) identifier[packages] = identifier[args] [ literal[int] :] identifier[file_map] ={} identifier[optional] =[] identifier[recommended] =[] identifier[to_install] =[] keyword[for] identifier[pkg] keyword[in] identifier[packages] : keyword[if] identifier[pkg] . identifier[endswith] ( literal[string] ): keyword[if] identifier[self] . identifier[_pkgfiles_fun] ( literal[string] , identifier[pkg] ): identifier[comps] = identifier[pkg] . identifier[split] ( literal[string] ) identifier[comps] = identifier[os] . identifier[path] . identifier[split] ( literal[string] . identifier[join] ( identifier[comps] [:- literal[int] ])) identifier[pkg_name] = identifier[comps] [- literal[int] ] identifier[formula_tar] = identifier[tarfile] . identifier[open] ( identifier[pkg] , literal[string] ) identifier[formula_ref] = identifier[formula_tar] . identifier[extractfile] ( literal[string] . identifier[format] ( identifier[pkg_name] )) identifier[formula_def] = identifier[salt] . identifier[utils] . identifier[yaml] . identifier[safe_load] ( identifier[formula_ref] ) identifier[file_map] [ identifier[pkg_name] ]= identifier[pkg] identifier[to_] , identifier[op_] , identifier[re_] = identifier[self] . identifier[_check_all_deps] ( identifier[pkg_name] = identifier[pkg_name] , identifier[pkg_file] = identifier[pkg] , identifier[formula_def] = identifier[formula_def] ) identifier[to_install] . identifier[extend] ( identifier[to_] ) identifier[optional] . identifier[extend] ( identifier[op_] ) identifier[recommended] . identifier[extend] ( identifier[re_] ) identifier[formula_tar] . identifier[close] () keyword[else] : keyword[raise] identifier[SPMInvocationError] ( literal[string] . identifier[format] ( identifier[pkg] )) keyword[else] : identifier[to_] , identifier[op_] , identifier[re_] = identifier[self] . identifier[_check_all_deps] ( identifier[pkg_name] = identifier[pkg] ) identifier[to_install] . identifier[extend] ( identifier[to_] ) identifier[optional] . identifier[extend] ( identifier[op_] ) identifier[recommended] . identifier[extend] ( identifier[re_] ) identifier[optional] = identifier[set] ( identifier[filter] ( identifier[len] , identifier[optional] )) keyword[if] identifier[optional] : identifier[self] . identifier[ui] . identifier[status] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[optional] ) )) identifier[recommended] = identifier[set] ( identifier[filter] ( identifier[len] , identifier[recommended] )) keyword[if] identifier[recommended] : identifier[self] . identifier[ui] . identifier[status] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[recommended] ) )) identifier[to_install] = identifier[set] ( identifier[filter] ( identifier[len] , identifier[to_install] )) identifier[msg] = literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[to_install] )) keyword[if] keyword[not] identifier[self] . identifier[opts] [ literal[string] ]: identifier[self] . identifier[ui] . identifier[confirm] ( identifier[msg] ) identifier[repo_metadata] = identifier[self] . identifier[_get_repo_metadata] () identifier[dl_list] ={} keyword[for] identifier[package] keyword[in] identifier[to_install] : keyword[if] identifier[package] keyword[in] identifier[file_map] : identifier[self] . identifier[_install_indv_pkg] ( identifier[package] , identifier[file_map] [ identifier[package] ]) keyword[else] : keyword[for] identifier[repo] keyword[in] identifier[repo_metadata] : identifier[repo_info] = identifier[repo_metadata] [ identifier[repo] ] keyword[if] identifier[package] keyword[in] identifier[repo_info] [ literal[string] ]: identifier[dl_package] = keyword[False] identifier[repo_ver] = identifier[repo_info] [ literal[string] ][ identifier[package] ][ literal[string] ][ literal[string] ] identifier[repo_rel] = identifier[repo_info] [ literal[string] ][ identifier[package] ][ literal[string] ][ literal[string] ] identifier[repo_url] = identifier[repo_info] [ literal[string] ][ literal[string] ] keyword[if] identifier[package] keyword[in] identifier[dl_list] : keyword[if] identifier[repo_ver] == identifier[dl_list] [ identifier[package] ][ literal[string] ]: keyword[if] identifier[repo_rel] > identifier[dl_list] [ identifier[package] ][ literal[string] ]: identifier[dl_package] = keyword[True] keyword[elif] identifier[repo_rel] == identifier[dl_list] [ identifier[package] ][ literal[string] ]: keyword[if] identifier[dl_list] [ identifier[package] ][ literal[string] ]. identifier[startswith] ( literal[string] ): keyword[if] keyword[not] identifier[repo_url] . identifier[startswith] ( literal[string] ): identifier[dl_package] = keyword[True] keyword[elif] identifier[repo_ver] > identifier[dl_list] [ identifier[package] ][ literal[string] ]: identifier[dl_package] = keyword[True] keyword[else] : identifier[dl_package] = keyword[True] keyword[if] identifier[dl_package] keyword[is] keyword[True] : identifier[cache_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[opts] [ literal[string] ], identifier[repo] ) identifier[dl_url] = literal[string] . identifier[format] ( identifier[repo_info] [ literal[string] ][ literal[string] ], identifier[repo_info] [ literal[string] ][ identifier[package] ][ literal[string] ] ) identifier[out_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[cache_path] , identifier[repo_info] [ literal[string] ][ identifier[package] ][ literal[string] ] ) identifier[dl_list] [ identifier[package] ]={ literal[string] : identifier[repo_ver] , literal[string] : identifier[repo_rel] , literal[string] : identifier[dl_url] , literal[string] : identifier[cache_path] , literal[string] : identifier[out_file] , } keyword[for] identifier[package] keyword[in] identifier[dl_list] : identifier[dl_url] = identifier[dl_list] [ identifier[package] ][ literal[string] ] identifier[cache_path] = identifier[dl_list] [ identifier[package] ][ literal[string] ] identifier[out_file] = identifier[dl_list] [ identifier[package] ][ literal[string] ] keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[cache_path] ): identifier[os] . identifier[makedirs] ( identifier[cache_path] ) keyword[if] identifier[dl_url] . identifier[startswith] ( literal[string] ): identifier[dl_url] = identifier[dl_url] . identifier[replace] ( literal[string] , literal[string] ) identifier[shutil] . identifier[copyfile] ( identifier[dl_url] , identifier[out_file] ) keyword[else] : keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[out_file] , literal[string] ) keyword[as] identifier[outf] : identifier[outf] . identifier[write] ( identifier[self] . identifier[_query_http] ( identifier[dl_url] , identifier[repo_info] [ literal[string] ])) keyword[for] identifier[package] keyword[in] identifier[dl_list] : identifier[out_file] = identifier[dl_list] [ identifier[package] ][ literal[string] ] identifier[self] . identifier[_install_indv_pkg] ( identifier[package] , identifier[out_file] ) keyword[return]
def _install(self, args): """ Install a package from a repo """ if len(args) < 2: raise SPMInvocationError('A package must be specified') # depends on [control=['if'], data=[]] caller_opts = self.opts.copy() caller_opts['file_client'] = 'local' self.caller = salt.client.Caller(mopts=caller_opts) self.client = salt.client.get_local_client(self.opts['conf_file']) cache = salt.cache.Cache(self.opts) packages = args[1:] file_map = {} optional = [] recommended = [] to_install = [] for pkg in packages: if pkg.endswith('.spm'): if self._pkgfiles_fun('path_exists', pkg): comps = pkg.split('-') comps = os.path.split('-'.join(comps[:-2])) pkg_name = comps[-1] formula_tar = tarfile.open(pkg, 'r:bz2') formula_ref = formula_tar.extractfile('{0}/FORMULA'.format(pkg_name)) formula_def = salt.utils.yaml.safe_load(formula_ref) file_map[pkg_name] = pkg (to_, op_, re_) = self._check_all_deps(pkg_name=pkg_name, pkg_file=pkg, formula_def=formula_def) to_install.extend(to_) optional.extend(op_) recommended.extend(re_) formula_tar.close() # depends on [control=['if'], data=[]] else: raise SPMInvocationError('Package file {0} not found'.format(pkg)) # depends on [control=['if'], data=[]] else: (to_, op_, re_) = self._check_all_deps(pkg_name=pkg) to_install.extend(to_) optional.extend(op_) recommended.extend(re_) # depends on [control=['for'], data=['pkg']] optional = set(filter(len, optional)) if optional: self.ui.status('The following dependencies are optional:\n\t{0}\n'.format('\n\t'.join(optional))) # depends on [control=['if'], data=[]] recommended = set(filter(len, recommended)) if recommended: self.ui.status('The following dependencies are recommended:\n\t{0}\n'.format('\n\t'.join(recommended))) # depends on [control=['if'], data=[]] to_install = set(filter(len, to_install)) msg = 'Installing packages:\n\t{0}\n'.format('\n\t'.join(to_install)) if not self.opts['assume_yes']: self.ui.confirm(msg) # depends on [control=['if'], data=[]] repo_metadata = self._get_repo_metadata() dl_list = {} for package in to_install: if package in file_map: self._install_indv_pkg(package, file_map[package]) # depends on [control=['if'], data=['package', 'file_map']] else: for repo in repo_metadata: repo_info = repo_metadata[repo] if package in repo_info['packages']: dl_package = False repo_ver = repo_info['packages'][package]['info']['version'] repo_rel = repo_info['packages'][package]['info']['release'] repo_url = repo_info['info']['url'] if package in dl_list: # Check package version, replace if newer version if repo_ver == dl_list[package]['version']: # Version is the same, check release if repo_rel > dl_list[package]['release']: dl_package = True # depends on [control=['if'], data=[]] elif repo_rel == dl_list[package]['release']: # Version and release are the same, give # preference to local (file://) repos if dl_list[package]['source'].startswith('file://'): if not repo_url.startswith('file://'): dl_package = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif repo_ver > dl_list[package]['version']: dl_package = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['package', 'dl_list']] else: dl_package = True if dl_package is True: # Put together download directory cache_path = os.path.join(self.opts['spm_cache_dir'], repo) # Put together download paths dl_url = '{0}/{1}'.format(repo_info['info']['url'], repo_info['packages'][package]['filename']) out_file = os.path.join(cache_path, repo_info['packages'][package]['filename']) dl_list[package] = {'version': repo_ver, 'release': repo_rel, 'source': dl_url, 'dest_dir': cache_path, 'dest_file': out_file} # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['package']] # depends on [control=['for'], data=['repo']] # depends on [control=['for'], data=['package']] for package in dl_list: dl_url = dl_list[package]['source'] cache_path = dl_list[package]['dest_dir'] out_file = dl_list[package]['dest_file'] # Make sure download directory exists if not os.path.exists(cache_path): os.makedirs(cache_path) # depends on [control=['if'], data=[]] # Download the package if dl_url.startswith('file://'): dl_url = dl_url.replace('file://', '') shutil.copyfile(dl_url, out_file) # depends on [control=['if'], data=[]] else: with salt.utils.files.fopen(out_file, 'w') as outf: outf.write(self._query_http(dl_url, repo_info['info'])) # depends on [control=['with'], data=['outf']] # depends on [control=['for'], data=['package']] # First we download everything, then we install for package in dl_list: out_file = dl_list[package]['dest_file'] # Kick off the install self._install_indv_pkg(package, out_file) # depends on [control=['for'], data=['package']] return
def _handle_outliers(self, p_o): """ Sets observation probabilities of outliers to uniform if ignore_outliers is set. Parameters ---------- p_o : ndarray((T, N)) output probabilities """ if self.ignore_outliers: outliers = np.where(p_o.sum(axis=1)==0)[0] if outliers.size > 0: p_o[outliers, :] = 1.0 self.found_outliers = True return p_o
def function[_handle_outliers, parameter[self, p_o]]: constant[ Sets observation probabilities of outliers to uniform if ignore_outliers is set. Parameters ---------- p_o : ndarray((T, N)) output probabilities ] if name[self].ignore_outliers begin[:] variable[outliers] assign[=] call[call[name[np].where, parameter[compare[call[name[p_o].sum, parameter[]] equal[==] constant[0]]]]][constant[0]] if compare[name[outliers].size greater[>] constant[0]] begin[:] call[name[p_o]][tuple[[<ast.Name object at 0x7da20c6c7340>, <ast.Slice object at 0x7da20c6c4fd0>]]] assign[=] constant[1.0] name[self].found_outliers assign[=] constant[True] return[name[p_o]]
keyword[def] identifier[_handle_outliers] ( identifier[self] , identifier[p_o] ): literal[string] keyword[if] identifier[self] . identifier[ignore_outliers] : identifier[outliers] = identifier[np] . identifier[where] ( identifier[p_o] . identifier[sum] ( identifier[axis] = literal[int] )== literal[int] )[ literal[int] ] keyword[if] identifier[outliers] . identifier[size] > literal[int] : identifier[p_o] [ identifier[outliers] ,:]= literal[int] identifier[self] . identifier[found_outliers] = keyword[True] keyword[return] identifier[p_o]
def _handle_outliers(self, p_o): """ Sets observation probabilities of outliers to uniform if ignore_outliers is set. Parameters ---------- p_o : ndarray((T, N)) output probabilities """ if self.ignore_outliers: outliers = np.where(p_o.sum(axis=1) == 0)[0] if outliers.size > 0: p_o[outliers, :] = 1.0 self.found_outliers = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return p_o
def _getsetting(setting, default): """Get `setting` if set, fallback to `default` if not This method tries to return the value of the specified setting from Django's settings module, after prefixing the name with _DJANGO_SETTING_PREFIX. If this fails for any reason, the value supplied in `default` will be returned instead. """ setting = _DJANGO_SETTING_PREFIX + setting try: return getattr(settings, setting) except: return default
def function[_getsetting, parameter[setting, default]]: constant[Get `setting` if set, fallback to `default` if not This method tries to return the value of the specified setting from Django's settings module, after prefixing the name with _DJANGO_SETTING_PREFIX. If this fails for any reason, the value supplied in `default` will be returned instead. ] variable[setting] assign[=] binary_operation[name[_DJANGO_SETTING_PREFIX] + name[setting]] <ast.Try object at 0x7da1b0a1fe80>
keyword[def] identifier[_getsetting] ( identifier[setting] , identifier[default] ): literal[string] identifier[setting] = identifier[_DJANGO_SETTING_PREFIX] + identifier[setting] keyword[try] : keyword[return] identifier[getattr] ( identifier[settings] , identifier[setting] ) keyword[except] : keyword[return] identifier[default]
def _getsetting(setting, default): """Get `setting` if set, fallback to `default` if not This method tries to return the value of the specified setting from Django's settings module, after prefixing the name with _DJANGO_SETTING_PREFIX. If this fails for any reason, the value supplied in `default` will be returned instead. """ setting = _DJANGO_SETTING_PREFIX + setting try: return getattr(settings, setting) # depends on [control=['try'], data=[]] except: return default # depends on [control=['except'], data=[]]
def set_(key, value, host=DEFAULT_HOST, port=DEFAULT_PORT, time=DEFAULT_TIME, min_compress_len=DEFAULT_MIN_COMPRESS_LEN): ''' Set a key on the memcached server, overwriting the value if it exists. CLI Example: .. code-block:: bash salt '*' memcached.set <key> <value> ''' if not isinstance(time, six.integer_types): raise SaltInvocationError('\'time\' must be an integer') if not isinstance(min_compress_len, six.integer_types): raise SaltInvocationError('\'min_compress_len\' must be an integer') conn = _connect(host, port) _check_stats(conn) return conn.set(key, value, time, min_compress_len)
def function[set_, parameter[key, value, host, port, time, min_compress_len]]: constant[ Set a key on the memcached server, overwriting the value if it exists. CLI Example: .. code-block:: bash salt '*' memcached.set <key> <value> ] if <ast.UnaryOp object at 0x7da1b2161d80> begin[:] <ast.Raise object at 0x7da1b2161c30> if <ast.UnaryOp object at 0x7da1b2161ab0> begin[:] <ast.Raise object at 0x7da1b21602b0> variable[conn] assign[=] call[name[_connect], parameter[name[host], name[port]]] call[name[_check_stats], parameter[name[conn]]] return[call[name[conn].set, parameter[name[key], name[value], name[time], name[min_compress_len]]]]
keyword[def] identifier[set_] ( identifier[key] , identifier[value] , identifier[host] = identifier[DEFAULT_HOST] , identifier[port] = identifier[DEFAULT_PORT] , identifier[time] = identifier[DEFAULT_TIME] , identifier[min_compress_len] = identifier[DEFAULT_MIN_COMPRESS_LEN] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[time] , identifier[six] . identifier[integer_types] ): keyword[raise] identifier[SaltInvocationError] ( literal[string] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[min_compress_len] , identifier[six] . identifier[integer_types] ): keyword[raise] identifier[SaltInvocationError] ( literal[string] ) identifier[conn] = identifier[_connect] ( identifier[host] , identifier[port] ) identifier[_check_stats] ( identifier[conn] ) keyword[return] identifier[conn] . identifier[set] ( identifier[key] , identifier[value] , identifier[time] , identifier[min_compress_len] )
def set_(key, value, host=DEFAULT_HOST, port=DEFAULT_PORT, time=DEFAULT_TIME, min_compress_len=DEFAULT_MIN_COMPRESS_LEN): """ Set a key on the memcached server, overwriting the value if it exists. CLI Example: .. code-block:: bash salt '*' memcached.set <key> <value> """ if not isinstance(time, six.integer_types): raise SaltInvocationError("'time' must be an integer") # depends on [control=['if'], data=[]] if not isinstance(min_compress_len, six.integer_types): raise SaltInvocationError("'min_compress_len' must be an integer") # depends on [control=['if'], data=[]] conn = _connect(host, port) _check_stats(conn) return conn.set(key, value, time, min_compress_len)
def onecmd_plus_hooks(self, line): ''' Trigger hooks after command. ''' if not line: return self.emptyline() return Cmd.onecmd_plus_hooks(self, line)
def function[onecmd_plus_hooks, parameter[self, line]]: constant[ Trigger hooks after command. ] if <ast.UnaryOp object at 0x7da1b0745420> begin[:] return[call[name[self].emptyline, parameter[]]] return[call[name[Cmd].onecmd_plus_hooks, parameter[name[self], name[line]]]]
keyword[def] identifier[onecmd_plus_hooks] ( identifier[self] , identifier[line] ): literal[string] keyword[if] keyword[not] identifier[line] : keyword[return] identifier[self] . identifier[emptyline] () keyword[return] identifier[Cmd] . identifier[onecmd_plus_hooks] ( identifier[self] , identifier[line] )
def onecmd_plus_hooks(self, line): """ Trigger hooks after command. """ if not line: return self.emptyline() # depends on [control=['if'], data=[]] return Cmd.onecmd_plus_hooks(self, line)
def on_epoch_end(self, last_metrics, **kwargs): "Put the various losses in the recorder." return add_metrics(last_metrics, [s.smooth for k,s in self.smootheners.items()])
def function[on_epoch_end, parameter[self, last_metrics]]: constant[Put the various losses in the recorder.] return[call[name[add_metrics], parameter[name[last_metrics], <ast.ListComp object at 0x7da1b1e171c0>]]]
keyword[def] identifier[on_epoch_end] ( identifier[self] , identifier[last_metrics] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[add_metrics] ( identifier[last_metrics] ,[ identifier[s] . identifier[smooth] keyword[for] identifier[k] , identifier[s] keyword[in] identifier[self] . identifier[smootheners] . identifier[items] ()])
def on_epoch_end(self, last_metrics, **kwargs): """Put the various losses in the recorder.""" return add_metrics(last_metrics, [s.smooth for (k, s) in self.smootheners.items()])
def get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts): """ Parameters: - db_name - tbl_name - part_vals - max_parts """ self.send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts) return self.recv_get_partition_names_ps()
def function[get_partition_names_ps, parameter[self, db_name, tbl_name, part_vals, max_parts]]: constant[ Parameters: - db_name - tbl_name - part_vals - max_parts ] call[name[self].send_get_partition_names_ps, parameter[name[db_name], name[tbl_name], name[part_vals], name[max_parts]]] return[call[name[self].recv_get_partition_names_ps, parameter[]]]
keyword[def] identifier[get_partition_names_ps] ( identifier[self] , identifier[db_name] , identifier[tbl_name] , identifier[part_vals] , identifier[max_parts] ): literal[string] identifier[self] . identifier[send_get_partition_names_ps] ( identifier[db_name] , identifier[tbl_name] , identifier[part_vals] , identifier[max_parts] ) keyword[return] identifier[self] . identifier[recv_get_partition_names_ps] ()
def get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts): """ Parameters: - db_name - tbl_name - part_vals - max_parts """ self.send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts) return self.recv_get_partition_names_ps()
def _open_file(cls, writer_spec, filename_suffix, use_tmp_bucket=False): """Opens a new gcs file for writing.""" if use_tmp_bucket: bucket = cls._get_tmp_gcs_bucket(writer_spec) account_id = cls._get_tmp_account_id(writer_spec) else: bucket = cls._get_gcs_bucket(writer_spec) account_id = cls._get_account_id(writer_spec) # GoogleCloudStorage format for filenames, Initial slash is required filename = "/%s/%s" % (bucket, filename_suffix) content_type = writer_spec.get(cls.CONTENT_TYPE_PARAM, None) options = {} if cls.ACL_PARAM in writer_spec: options["x-goog-acl"] = writer_spec.get(cls.ACL_PARAM) return cloudstorage.open(filename, mode="w", content_type=content_type, options=options, _account_id=account_id)
def function[_open_file, parameter[cls, writer_spec, filename_suffix, use_tmp_bucket]]: constant[Opens a new gcs file for writing.] if name[use_tmp_bucket] begin[:] variable[bucket] assign[=] call[name[cls]._get_tmp_gcs_bucket, parameter[name[writer_spec]]] variable[account_id] assign[=] call[name[cls]._get_tmp_account_id, parameter[name[writer_spec]]] variable[filename] assign[=] binary_operation[constant[/%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0510730>, <ast.Name object at 0x7da1b0510e80>]]] variable[content_type] assign[=] call[name[writer_spec].get, parameter[name[cls].CONTENT_TYPE_PARAM, constant[None]]] variable[options] assign[=] dictionary[[], []] if compare[name[cls].ACL_PARAM in name[writer_spec]] begin[:] call[name[options]][constant[x-goog-acl]] assign[=] call[name[writer_spec].get, parameter[name[cls].ACL_PARAM]] return[call[name[cloudstorage].open, parameter[name[filename]]]]
keyword[def] identifier[_open_file] ( identifier[cls] , identifier[writer_spec] , identifier[filename_suffix] , identifier[use_tmp_bucket] = keyword[False] ): literal[string] keyword[if] identifier[use_tmp_bucket] : identifier[bucket] = identifier[cls] . identifier[_get_tmp_gcs_bucket] ( identifier[writer_spec] ) identifier[account_id] = identifier[cls] . identifier[_get_tmp_account_id] ( identifier[writer_spec] ) keyword[else] : identifier[bucket] = identifier[cls] . identifier[_get_gcs_bucket] ( identifier[writer_spec] ) identifier[account_id] = identifier[cls] . identifier[_get_account_id] ( identifier[writer_spec] ) identifier[filename] = literal[string] %( identifier[bucket] , identifier[filename_suffix] ) identifier[content_type] = identifier[writer_spec] . identifier[get] ( identifier[cls] . identifier[CONTENT_TYPE_PARAM] , keyword[None] ) identifier[options] ={} keyword[if] identifier[cls] . identifier[ACL_PARAM] keyword[in] identifier[writer_spec] : identifier[options] [ literal[string] ]= identifier[writer_spec] . identifier[get] ( identifier[cls] . identifier[ACL_PARAM] ) keyword[return] identifier[cloudstorage] . identifier[open] ( identifier[filename] , identifier[mode] = literal[string] , identifier[content_type] = identifier[content_type] , identifier[options] = identifier[options] , identifier[_account_id] = identifier[account_id] )
def _open_file(cls, writer_spec, filename_suffix, use_tmp_bucket=False): """Opens a new gcs file for writing.""" if use_tmp_bucket: bucket = cls._get_tmp_gcs_bucket(writer_spec) account_id = cls._get_tmp_account_id(writer_spec) # depends on [control=['if'], data=[]] else: bucket = cls._get_gcs_bucket(writer_spec) account_id = cls._get_account_id(writer_spec) # GoogleCloudStorage format for filenames, Initial slash is required filename = '/%s/%s' % (bucket, filename_suffix) content_type = writer_spec.get(cls.CONTENT_TYPE_PARAM, None) options = {} if cls.ACL_PARAM in writer_spec: options['x-goog-acl'] = writer_spec.get(cls.ACL_PARAM) # depends on [control=['if'], data=['writer_spec']] return cloudstorage.open(filename, mode='w', content_type=content_type, options=options, _account_id=account_id)
def bar(self,xdata,ydata,disp=True,**kwargs): '''Displays a bar graph. xdata: list of bar graph categories/bins. Can optionally include a header, see testGraph_barAndHist.py in https://github.com/Dfenestrator/GooPyCharts for an example. ydata: list of values associated with categories in xdata. If xdata includes a header, include a header list on ydata as well. disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot. **kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code. ''' #combine data into proper format data = combineData(xdata,ydata,self.xlabel) #Include other options, supplied by **kwargs other = '' for option in kwargs: other += option + ': ' + kwargs[option] + ',\n' #input argument format to template is in dictionary format (see template for where variables are inserted) argDict = { 'data':str(data), 'title':self.title, 'functionName':slugify(self.title), 'height':self.height, 'width':self.width, 'logScaleFlag':'false', 'ylabel':self.ylabel, 'plotType':'BarChart', 'numFig':self.numFig, 'other':other} self.javascript = templateType(xdata) % argDict if disp: self.dispFile()
def function[bar, parameter[self, xdata, ydata, disp]]: constant[Displays a bar graph. xdata: list of bar graph categories/bins. Can optionally include a header, see testGraph_barAndHist.py in https://github.com/Dfenestrator/GooPyCharts for an example. ydata: list of values associated with categories in xdata. If xdata includes a header, include a header list on ydata as well. disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot. **kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code. ] variable[data] assign[=] call[name[combineData], parameter[name[xdata], name[ydata], name[self].xlabel]] variable[other] assign[=] constant[] for taget[name[option]] in starred[name[kwargs]] begin[:] <ast.AugAssign object at 0x7da18ede7b50> variable[argDict] assign[=] dictionary[[<ast.Constant object at 0x7da18ede6cb0>, <ast.Constant object at 0x7da18ede42b0>, <ast.Constant object at 0x7da18ede7e20>, <ast.Constant object at 0x7da18ede7b80>, <ast.Constant object at 0x7da18ede6200>, <ast.Constant object at 0x7da18ede6c50>, <ast.Constant object at 0x7da18ede7640>, <ast.Constant object at 0x7da18ede7460>, <ast.Constant object at 0x7da18ede40d0>, <ast.Constant object at 0x7da18ede6ef0>], [<ast.Call object at 0x7da18ede46d0>, <ast.Attribute object at 0x7da18ede5ff0>, <ast.Call object at 0x7da18ede4400>, <ast.Attribute object at 0x7da18ede50c0>, <ast.Attribute object at 0x7da1b06199c0>, <ast.Constant object at 0x7da1b061aa70>, <ast.Attribute object at 0x7da1b0619300>, <ast.Constant object at 0x7da1b0619db0>, <ast.Attribute object at 0x7da1b06189a0>, <ast.Name object at 0x7da1b06187f0>]] name[self].javascript assign[=] binary_operation[call[name[templateType], parameter[name[xdata]]] <ast.Mod object at 0x7da2590d6920> name[argDict]] if name[disp] begin[:] call[name[self].dispFile, parameter[]]
keyword[def] identifier[bar] ( identifier[self] , identifier[xdata] , identifier[ydata] , identifier[disp] = keyword[True] ,** identifier[kwargs] ): literal[string] identifier[data] = identifier[combineData] ( identifier[xdata] , identifier[ydata] , identifier[self] . identifier[xlabel] ) identifier[other] = literal[string] keyword[for] identifier[option] keyword[in] identifier[kwargs] : identifier[other] += identifier[option] + literal[string] + identifier[kwargs] [ identifier[option] ]+ literal[string] identifier[argDict] ={ literal[string] : identifier[str] ( identifier[data] ), literal[string] : identifier[self] . identifier[title] , literal[string] : identifier[slugify] ( identifier[self] . identifier[title] ), literal[string] : identifier[self] . identifier[height] , literal[string] : identifier[self] . identifier[width] , literal[string] : literal[string] , literal[string] : identifier[self] . identifier[ylabel] , literal[string] : literal[string] , literal[string] : identifier[self] . identifier[numFig] , literal[string] : identifier[other] } identifier[self] . identifier[javascript] = identifier[templateType] ( identifier[xdata] )% identifier[argDict] keyword[if] identifier[disp] : identifier[self] . identifier[dispFile] ()
def bar(self, xdata, ydata, disp=True, **kwargs): """Displays a bar graph. xdata: list of bar graph categories/bins. Can optionally include a header, see testGraph_barAndHist.py in https://github.com/Dfenestrator/GooPyCharts for an example. ydata: list of values associated with categories in xdata. If xdata includes a header, include a header list on ydata as well. disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot. **kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code. """ #combine data into proper format data = combineData(xdata, ydata, self.xlabel) #Include other options, supplied by **kwargs other = '' for option in kwargs: other += option + ': ' + kwargs[option] + ',\n' # depends on [control=['for'], data=['option']] #input argument format to template is in dictionary format (see template for where variables are inserted) argDict = {'data': str(data), 'title': self.title, 'functionName': slugify(self.title), 'height': self.height, 'width': self.width, 'logScaleFlag': 'false', 'ylabel': self.ylabel, 'plotType': 'BarChart', 'numFig': self.numFig, 'other': other} self.javascript = templateType(xdata) % argDict if disp: self.dispFile() # depends on [control=['if'], data=[]]
def GetPathFromLink(resource_link, resource_type=''): """Gets path from resource link with optional resource type :param str resource_link: :param str resource_type: :return: Path from resource link with resource type appended (if provided). :rtype: str """ resource_link = TrimBeginningAndEndingSlashes(resource_link) if IsNameBased(resource_link): # Replace special characters in string using the %xx escape. For example, space(' ') would be replaced by %20 # This function is intended for quoting the path section of the URL and excludes '/' to be quoted as that's the default safe char resource_link = urllib_quote(resource_link) # Padding leading and trailing slashes to the path returned both for name based and resource id based links if resource_type: return '/' + resource_link + '/' + resource_type + '/' else: return '/' + resource_link + '/'
def function[GetPathFromLink, parameter[resource_link, resource_type]]: constant[Gets path from resource link with optional resource type :param str resource_link: :param str resource_type: :return: Path from resource link with resource type appended (if provided). :rtype: str ] variable[resource_link] assign[=] call[name[TrimBeginningAndEndingSlashes], parameter[name[resource_link]]] if call[name[IsNameBased], parameter[name[resource_link]]] begin[:] variable[resource_link] assign[=] call[name[urllib_quote], parameter[name[resource_link]]] if name[resource_type] begin[:] return[binary_operation[binary_operation[binary_operation[binary_operation[constant[/] + name[resource_link]] + constant[/]] + name[resource_type]] + constant[/]]]
keyword[def] identifier[GetPathFromLink] ( identifier[resource_link] , identifier[resource_type] = literal[string] ): literal[string] identifier[resource_link] = identifier[TrimBeginningAndEndingSlashes] ( identifier[resource_link] ) keyword[if] identifier[IsNameBased] ( identifier[resource_link] ): identifier[resource_link] = identifier[urllib_quote] ( identifier[resource_link] ) keyword[if] identifier[resource_type] : keyword[return] literal[string] + identifier[resource_link] + literal[string] + identifier[resource_type] + literal[string] keyword[else] : keyword[return] literal[string] + identifier[resource_link] + literal[string]
def GetPathFromLink(resource_link, resource_type=''): """Gets path from resource link with optional resource type :param str resource_link: :param str resource_type: :return: Path from resource link with resource type appended (if provided). :rtype: str """ resource_link = TrimBeginningAndEndingSlashes(resource_link) if IsNameBased(resource_link): # Replace special characters in string using the %xx escape. For example, space(' ') would be replaced by %20 # This function is intended for quoting the path section of the URL and excludes '/' to be quoted as that's the default safe char resource_link = urllib_quote(resource_link) # depends on [control=['if'], data=[]] # Padding leading and trailing slashes to the path returned both for name based and resource id based links if resource_type: return '/' + resource_link + '/' + resource_type + '/' # depends on [control=['if'], data=[]] else: return '/' + resource_link + '/'
def bytes_block_cast(block, include_text=True, include_link_tokens=True, include_css=True, include_features=True, **kwargs): """ Converts any string-like items in input Block object to bytes-like values, with respect to python version Parameters ---------- block : blocks.Block any string-like objects contained in the block object will be converted to bytes include_text : bool, default=True if True, cast text to bytes, else ignore include_link_tokens : bool, default=True if True, cast link_tokens to bytes, else ignore include_css : bool, default=True if True, cast css to bytes, else ignore include_features : bool, default=True if True, cast features to bytes, else ignore kwargs: encoding: str, default: 'utf-8' encoding to be used when encoding string """ if include_text: block.text = bytes_cast(block.text, **kwargs) if include_link_tokens: block.link_tokens = bytes_list_cast(block.link_tokens, **kwargs) if include_css: block.css = bytes_dict_cast(block.css, **kwargs) if include_features: block.features = bytes_dict_cast(block.features, **kwargs) return block
def function[bytes_block_cast, parameter[block, include_text, include_link_tokens, include_css, include_features]]: constant[ Converts any string-like items in input Block object to bytes-like values, with respect to python version Parameters ---------- block : blocks.Block any string-like objects contained in the block object will be converted to bytes include_text : bool, default=True if True, cast text to bytes, else ignore include_link_tokens : bool, default=True if True, cast link_tokens to bytes, else ignore include_css : bool, default=True if True, cast css to bytes, else ignore include_features : bool, default=True if True, cast features to bytes, else ignore kwargs: encoding: str, default: 'utf-8' encoding to be used when encoding string ] if name[include_text] begin[:] name[block].text assign[=] call[name[bytes_cast], parameter[name[block].text]] if name[include_link_tokens] begin[:] name[block].link_tokens assign[=] call[name[bytes_list_cast], parameter[name[block].link_tokens]] if name[include_css] begin[:] name[block].css assign[=] call[name[bytes_dict_cast], parameter[name[block].css]] if name[include_features] begin[:] name[block].features assign[=] call[name[bytes_dict_cast], parameter[name[block].features]] return[name[block]]
keyword[def] identifier[bytes_block_cast] ( identifier[block] , identifier[include_text] = keyword[True] , identifier[include_link_tokens] = keyword[True] , identifier[include_css] = keyword[True] , identifier[include_features] = keyword[True] , ** identifier[kwargs] ): literal[string] keyword[if] identifier[include_text] : identifier[block] . identifier[text] = identifier[bytes_cast] ( identifier[block] . identifier[text] ,** identifier[kwargs] ) keyword[if] identifier[include_link_tokens] : identifier[block] . identifier[link_tokens] = identifier[bytes_list_cast] ( identifier[block] . identifier[link_tokens] ,** identifier[kwargs] ) keyword[if] identifier[include_css] : identifier[block] . identifier[css] = identifier[bytes_dict_cast] ( identifier[block] . identifier[css] ,** identifier[kwargs] ) keyword[if] identifier[include_features] : identifier[block] . identifier[features] = identifier[bytes_dict_cast] ( identifier[block] . identifier[features] ,** identifier[kwargs] ) keyword[return] identifier[block]
def bytes_block_cast(block, include_text=True, include_link_tokens=True, include_css=True, include_features=True, **kwargs): """ Converts any string-like items in input Block object to bytes-like values, with respect to python version Parameters ---------- block : blocks.Block any string-like objects contained in the block object will be converted to bytes include_text : bool, default=True if True, cast text to bytes, else ignore include_link_tokens : bool, default=True if True, cast link_tokens to bytes, else ignore include_css : bool, default=True if True, cast css to bytes, else ignore include_features : bool, default=True if True, cast features to bytes, else ignore kwargs: encoding: str, default: 'utf-8' encoding to be used when encoding string """ if include_text: block.text = bytes_cast(block.text, **kwargs) # depends on [control=['if'], data=[]] if include_link_tokens: block.link_tokens = bytes_list_cast(block.link_tokens, **kwargs) # depends on [control=['if'], data=[]] if include_css: block.css = bytes_dict_cast(block.css, **kwargs) # depends on [control=['if'], data=[]] if include_features: block.features = bytes_dict_cast(block.features, **kwargs) # depends on [control=['if'], data=[]] return block