code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def outcomes(self, outcomes): """ Setter for _outcomes field See property. :param dict outcomes: Dictionary outcomes[outcome_id] that maps :class:`int` outcome_ids onto values of type :class:`rafcon.core.state_elements.logical_port.Outcome` :raises exceptions.TypeError: if outcomes parameter has the wrong type :raises exceptions.AttributeError: if the key of the outcome dictionary and the id of the outcome do not match """ if not isinstance(outcomes, dict): raise TypeError("outcomes must be of type dict") if [outcome_id for outcome_id, outcome in outcomes.items() if not isinstance(outcome, Outcome)]: raise TypeError("element of outcomes must be of type Outcome") if [outcome_id for outcome_id, outcome in outcomes.items() if not outcome_id == outcome.outcome_id]: raise AttributeError("The key of the outcomes dictionary and the id of the outcome do not match") old_outcomes = self.outcomes self._outcomes = outcomes for outcome_id, outcome in outcomes.items(): try: outcome.parent = self except ValueError: self._outcomes = old_outcomes raise # aborted and preempted must always exist if -1 not in outcomes: self._outcomes[-1] = Outcome(outcome_id=-1, name="aborted", parent=self) if -2 not in outcomes: self._outcomes[-2] = Outcome(outcome_id=-2, name="preempted", parent=self) # check that all old_outcomes are no more referencing self as there parent for old_outcome in old_outcomes.values(): if old_outcome not in iter(list(self._outcomes.values())) and old_outcome.parent is self: old_outcome.parent = None
def function[outcomes, parameter[self, outcomes]]: constant[ Setter for _outcomes field See property. :param dict outcomes: Dictionary outcomes[outcome_id] that maps :class:`int` outcome_ids onto values of type :class:`rafcon.core.state_elements.logical_port.Outcome` :raises exceptions.TypeError: if outcomes parameter has the wrong type :raises exceptions.AttributeError: if the key of the outcome dictionary and the id of the outcome do not match ] if <ast.UnaryOp object at 0x7da1b1a3fca0> begin[:] <ast.Raise object at 0x7da1b1a3f220> if <ast.ListComp object at 0x7da1b1a3d600> begin[:] <ast.Raise object at 0x7da1b1a3c730> if <ast.ListComp object at 0x7da1b1a3c640> begin[:] <ast.Raise object at 0x7da1b1a3f2b0> variable[old_outcomes] assign[=] name[self].outcomes name[self]._outcomes assign[=] name[outcomes] for taget[tuple[[<ast.Name object at 0x7da1b1a3c2b0>, <ast.Name object at 0x7da1b1a3da50>]]] in starred[call[name[outcomes].items, parameter[]]] begin[:] <ast.Try object at 0x7da1b1a3d4e0> if compare[<ast.UnaryOp object at 0x7da1b1a3fa90> <ast.NotIn object at 0x7da2590d7190> name[outcomes]] begin[:] call[name[self]._outcomes][<ast.UnaryOp object at 0x7da1b1a3c370>] assign[=] call[name[Outcome], parameter[]] if compare[<ast.UnaryOp object at 0x7da1b1a3cca0> <ast.NotIn object at 0x7da2590d7190> name[outcomes]] begin[:] call[name[self]._outcomes][<ast.UnaryOp object at 0x7da1b1a3c940>] assign[=] call[name[Outcome], parameter[]] for taget[name[old_outcome]] in starred[call[name[old_outcomes].values, parameter[]]] begin[:] if <ast.BoolOp object at 0x7da1b1a28820> begin[:] name[old_outcome].parent assign[=] constant[None]
keyword[def] identifier[outcomes] ( identifier[self] , identifier[outcomes] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[outcomes] , identifier[dict] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[if] [ identifier[outcome_id] keyword[for] identifier[outcome_id] , identifier[outcome] keyword[in] identifier[outcomes] . identifier[items] () keyword[if] keyword[not] identifier[isinstance] ( identifier[outcome] , identifier[Outcome] )]: keyword[raise] identifier[TypeError] ( literal[string] ) keyword[if] [ identifier[outcome_id] keyword[for] identifier[outcome_id] , identifier[outcome] keyword[in] identifier[outcomes] . identifier[items] () keyword[if] keyword[not] identifier[outcome_id] == identifier[outcome] . identifier[outcome_id] ]: keyword[raise] identifier[AttributeError] ( literal[string] ) identifier[old_outcomes] = identifier[self] . identifier[outcomes] identifier[self] . identifier[_outcomes] = identifier[outcomes] keyword[for] identifier[outcome_id] , identifier[outcome] keyword[in] identifier[outcomes] . identifier[items] (): keyword[try] : identifier[outcome] . identifier[parent] = identifier[self] keyword[except] identifier[ValueError] : identifier[self] . identifier[_outcomes] = identifier[old_outcomes] keyword[raise] keyword[if] - literal[int] keyword[not] keyword[in] identifier[outcomes] : identifier[self] . identifier[_outcomes] [- literal[int] ]= identifier[Outcome] ( identifier[outcome_id] =- literal[int] , identifier[name] = literal[string] , identifier[parent] = identifier[self] ) keyword[if] - literal[int] keyword[not] keyword[in] identifier[outcomes] : identifier[self] . identifier[_outcomes] [- literal[int] ]= identifier[Outcome] ( identifier[outcome_id] =- literal[int] , identifier[name] = literal[string] , identifier[parent] = identifier[self] ) keyword[for] identifier[old_outcome] keyword[in] identifier[old_outcomes] . identifier[values] (): keyword[if] identifier[old_outcome] keyword[not] keyword[in] identifier[iter] ( identifier[list] ( identifier[self] . identifier[_outcomes] . identifier[values] ())) keyword[and] identifier[old_outcome] . identifier[parent] keyword[is] identifier[self] : identifier[old_outcome] . identifier[parent] = keyword[None]
def outcomes(self, outcomes): """ Setter for _outcomes field See property. :param dict outcomes: Dictionary outcomes[outcome_id] that maps :class:`int` outcome_ids onto values of type :class:`rafcon.core.state_elements.logical_port.Outcome` :raises exceptions.TypeError: if outcomes parameter has the wrong type :raises exceptions.AttributeError: if the key of the outcome dictionary and the id of the outcome do not match """ if not isinstance(outcomes, dict): raise TypeError('outcomes must be of type dict') # depends on [control=['if'], data=[]] if [outcome_id for (outcome_id, outcome) in outcomes.items() if not isinstance(outcome, Outcome)]: raise TypeError('element of outcomes must be of type Outcome') # depends on [control=['if'], data=[]] if [outcome_id for (outcome_id, outcome) in outcomes.items() if not outcome_id == outcome.outcome_id]: raise AttributeError('The key of the outcomes dictionary and the id of the outcome do not match') # depends on [control=['if'], data=[]] old_outcomes = self.outcomes self._outcomes = outcomes for (outcome_id, outcome) in outcomes.items(): try: outcome.parent = self # depends on [control=['try'], data=[]] except ValueError: self._outcomes = old_outcomes raise # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] # aborted and preempted must always exist if -1 not in outcomes: self._outcomes[-1] = Outcome(outcome_id=-1, name='aborted', parent=self) # depends on [control=['if'], data=[]] if -2 not in outcomes: self._outcomes[-2] = Outcome(outcome_id=-2, name='preempted', parent=self) # depends on [control=['if'], data=[]] # check that all old_outcomes are no more referencing self as there parent for old_outcome in old_outcomes.values(): if old_outcome not in iter(list(self._outcomes.values())) and old_outcome.parent is self: old_outcome.parent = None # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['old_outcome']]
def update(self, quality_score, issue=values.unset): """ Update the FeedbackInstance :param unicode quality_score: The call quality expressed as an integer from 1 to 5 :param FeedbackInstance.Issues issue: Issues experienced during the call :returns: Updated FeedbackInstance :rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance """ return self._proxy.update(quality_score, issue=issue, )
def function[update, parameter[self, quality_score, issue]]: constant[ Update the FeedbackInstance :param unicode quality_score: The call quality expressed as an integer from 1 to 5 :param FeedbackInstance.Issues issue: Issues experienced during the call :returns: Updated FeedbackInstance :rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance ] return[call[name[self]._proxy.update, parameter[name[quality_score]]]]
keyword[def] identifier[update] ( identifier[self] , identifier[quality_score] , identifier[issue] = identifier[values] . identifier[unset] ): literal[string] keyword[return] identifier[self] . identifier[_proxy] . identifier[update] ( identifier[quality_score] , identifier[issue] = identifier[issue] ,)
def update(self, quality_score, issue=values.unset): """ Update the FeedbackInstance :param unicode quality_score: The call quality expressed as an integer from 1 to 5 :param FeedbackInstance.Issues issue: Issues experienced during the call :returns: Updated FeedbackInstance :rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance """ return self._proxy.update(quality_score, issue=issue)
def from_unknown_text(text, strict=False): """ Detect crs string format and parse into crs object with appropriate function. Arguments: - *text*: The crs text representation of unknown type. - *strict* (optional): When True, the parser is strict about names having to match exactly with upper and lowercases. Default is not strict (False). Returns: - CRS object. """ if text.startswith("+"): crs = from_proj4(text, strict) elif text.startswith(("PROJCS[","GEOGCS[")): crs = from_unknown_wkt(text, strict) #elif text.startswith("urn:"): # crs = from_ogc_urn(text, strict) elif text.startswith("EPSG:"): crs = from_epsg_code(text.split(":")[1]) elif text.startswith("ESRI:"): crs = from_esri_code(text.split(":")[1]) elif text.startswith("SR-ORG:"): crs = from_sr_code(text.split(":")[1]) else: raise FormatError("Could not auto-detect the type of crs format, make sure it is one of the supported formats") return crs
def function[from_unknown_text, parameter[text, strict]]: constant[ Detect crs string format and parse into crs object with appropriate function. Arguments: - *text*: The crs text representation of unknown type. - *strict* (optional): When True, the parser is strict about names having to match exactly with upper and lowercases. Default is not strict (False). Returns: - CRS object. ] if call[name[text].startswith, parameter[constant[+]]] begin[:] variable[crs] assign[=] call[name[from_proj4], parameter[name[text], name[strict]]] return[name[crs]]
keyword[def] identifier[from_unknown_text] ( identifier[text] , identifier[strict] = keyword[False] ): literal[string] keyword[if] identifier[text] . identifier[startswith] ( literal[string] ): identifier[crs] = identifier[from_proj4] ( identifier[text] , identifier[strict] ) keyword[elif] identifier[text] . identifier[startswith] (( literal[string] , literal[string] )): identifier[crs] = identifier[from_unknown_wkt] ( identifier[text] , identifier[strict] ) keyword[elif] identifier[text] . identifier[startswith] ( literal[string] ): identifier[crs] = identifier[from_epsg_code] ( identifier[text] . identifier[split] ( literal[string] )[ literal[int] ]) keyword[elif] identifier[text] . identifier[startswith] ( literal[string] ): identifier[crs] = identifier[from_esri_code] ( identifier[text] . identifier[split] ( literal[string] )[ literal[int] ]) keyword[elif] identifier[text] . identifier[startswith] ( literal[string] ): identifier[crs] = identifier[from_sr_code] ( identifier[text] . identifier[split] ( literal[string] )[ literal[int] ]) keyword[else] : keyword[raise] identifier[FormatError] ( literal[string] ) keyword[return] identifier[crs]
def from_unknown_text(text, strict=False): """ Detect crs string format and parse into crs object with appropriate function. Arguments: - *text*: The crs text representation of unknown type. - *strict* (optional): When True, the parser is strict about names having to match exactly with upper and lowercases. Default is not strict (False). Returns: - CRS object. """ if text.startswith('+'): crs = from_proj4(text, strict) # depends on [control=['if'], data=[]] elif text.startswith(('PROJCS[', 'GEOGCS[')): crs = from_unknown_wkt(text, strict) # depends on [control=['if'], data=[]] #elif text.startswith("urn:"): # crs = from_ogc_urn(text, strict) elif text.startswith('EPSG:'): crs = from_epsg_code(text.split(':')[1]) # depends on [control=['if'], data=[]] elif text.startswith('ESRI:'): crs = from_esri_code(text.split(':')[1]) # depends on [control=['if'], data=[]] elif text.startswith('SR-ORG:'): crs = from_sr_code(text.split(':')[1]) # depends on [control=['if'], data=[]] else: raise FormatError('Could not auto-detect the type of crs format, make sure it is one of the supported formats') return crs
def run(engine, parameters, components_paths=None, requisite_components=None, visible_components=None): """ Starts the Application. :param engine: Engine. :type engine: QObject :param parameters: Command line parameters. :type parameters: tuple :param components_paths: Components components_paths. :type components_paths: tuple or list :param requisite_components: Requisite components names. :type requisite_components: tuple or list :param visible_components: Visible components names. :type visible_components: tuple or list :return: Definition success. :rtype: bool """ # Command line parameters handling. RuntimeGlobals.parameters, RuntimeGlobals.arguments = parameters foundations.trace.evaluate_trace_request(RuntimeGlobals.parameters.trace_modules, foundations.verbose.tracer) if RuntimeGlobals.parameters.about: for line in SESSION_HEADER_TEXT: sys.stdout.write("{0}\n".format(line)) foundations.core.exit(1) # Redirecting standard output and error messages. sys.stdout = foundations.verbose.StandardOutputStreamer(LOGGER) sys.stderr = foundations.verbose.StandardOutputStreamer(LOGGER) # Setting application verbose level. foundations.verbose.set_verbosity_level(4) # Setting user application data directory. if RuntimeGlobals.parameters.user_application_data_directory: user_application_data_directory = RuntimeGlobals.user_application_data_directory = \ RuntimeGlobals.parameters.user_application_data_directory else: user_application_data_directory = RuntimeGlobals.user_application_data_directory = \ foundations.environment.get_user_application_data_directory() if not set_user_application_data_directory(user_application_data_directory): raise umbra.exceptions.EngineConfigurationError( "{0} | '{1}' user Application data directory is not available, '{2}' will now close!".format( __name__, RuntimeGlobals.user_application_data_directory, Constants.application_name)) if foundations.environment.get_temporary_directory() in user_application_data_directory: umbra.ui.widgets.message_box.message_box("Error", "Error", "{0} failed to use the default user Application data directory to store its preferences \ and has defaulted to the following directory:\n\n\t'{1}'.\n\nReasons for this are various:\n\ \t- Undefined 'APPDATA' ( Windows ) or 'HOME' ( Mac Os X, Linux ) environment variables.\n\ \t- User name with non 'UTF-8' encoding compliant characters.\n\ \t- Non 'UTF-8' encoding compliant characters in the preferences directory path.\n\n\ You will have to define your own preferences directory by launching {0} with the \ '-u \"path\\to\\the\\custom\\preferences\\directory\"' command line parameter.".format( Constants.application_name, user_application_data_directory)) LOGGER.debug("> Application Python interpreter: '{0}'".format(sys.executable)) LOGGER.debug("> Application PyQt version: '{0}'".format(PYQT_VERSION_STR)) LOGGER.debug("> Application startup location: '{0}'".format(os.getcwd())) LOGGER.debug("> Session user Application data directory: '{0}'".format( RuntimeGlobals.user_application_data_directory)) LOGGER.debug("> Initializing '{0}'!".format(Constants.application_name)) # Getting the logging file path. RuntimeGlobals.logging_file = get_logging_file() RuntimeGlobals.logging_file_handler = foundations.verbose.get_logging_file_handler( file=RuntimeGlobals.logging_file) # Getting the patches file path. RuntimeGlobals.patches_file = os.path.join(RuntimeGlobals.user_application_data_directory, Constants.patches_directory, Constants.patches_file) # Initializing the patches manager. RuntimeGlobals.patches_manager = umbra.managers.patches_manager.PatchesManager(RuntimeGlobals.patches_file, [os.path.join(path, Constants.patches_directory) for path in RuntimeGlobals.resources_directories]) RuntimeGlobals.patches_manager.register_patches() and RuntimeGlobals.patches_manager.apply_patches() # Retrieving settings file. RuntimeGlobals.settings_file = os.path.join(RuntimeGlobals.user_application_data_directory, Constants.settings_directory, Constants.settings_file) RuntimeGlobals.settings = Preferences(RuntimeGlobals.settings_file) LOGGER.debug("> Retrieving default layouts.") RuntimeGlobals.settings.set_default_layouts(("startup_centric",)) foundations.common.path_exists(RuntimeGlobals.settings_file) or RuntimeGlobals.settings.set_default_preferences() LOGGER.debug("> Retrieving stored verbose level.") RuntimeGlobals.verbosity_level = RuntimeGlobals.parameters.verbosity_level \ if RuntimeGlobals.parameters.verbosity_level is not None else \ foundations.common.get_first_item(RuntimeGlobals.settings.get_key("Settings", "verbosity_level").toInt()) LOGGER.debug("> Setting logger verbosity level to: '{0}'.".format(RuntimeGlobals.verbosity_level)) foundations.verbose.set_verbosity_level(RuntimeGlobals.verbosity_level) RuntimeGlobals.settings.set_key("Settings", "verbosity_level", RuntimeGlobals.verbosity_level) LOGGER.debug("> Retrieving stored logging formatter.") logging_formatter = RuntimeGlobals.parameters.logging_formatter if RuntimeGlobals.parameters.logging_formatter is not None else \ foundations.strings.to_string(RuntimeGlobals.settings.get_key("Settings", "logging_formatter").toString()) logging_formatter = logging_formatter if logging_formatter in RuntimeGlobals.logging_formatters else None RuntimeGlobals.logging_active_formatter = logging_formatter if logging_formatter is not None else Constants.logging_default_formatter LOGGER.debug("> Setting logging formatter: '{0}'.".format(RuntimeGlobals.logging_active_formatter)) for handler in (RuntimeGlobals.logging_console_handler, RuntimeGlobals.logging_file_handler): handler and handler.setFormatter(RuntimeGlobals.logging_formatters[RuntimeGlobals.logging_active_formatter]) # Starting the session handler. RuntimeGlobals.logging_session_handler = foundations.verbose.get_logging_stream_handler() RuntimeGlobals.logging_session_handler_stream = RuntimeGlobals.logging_session_handler.stream LOGGER.info(Constants.logging_separators) for line in SESSION_HEADER_TEXT: LOGGER.info(line) LOGGER.info("{0} | Session started at: {1}".format(Constants.application_name, time.strftime('%X - %x'))) LOGGER.info(Constants.logging_separators) LOGGER.info("{0} | Starting Interface!".format(Constants.application_name)) # Initializing splashscreen. if RuntimeGlobals.parameters.hide_splash_screen: LOGGER.debug("> SplashScreen skipped by 'hide_splash_screen' command line parameter.") else: LOGGER.debug("> Initializing splashscreen.") RuntimeGlobals.splashscreen_image = QPixmap(umbra.ui.common.get_resource_path(UiConstants.splash_screen_image)) RuntimeGlobals.splashscreen = Delayed_QSplashScreen(RuntimeGlobals.splashscreen_image, text_color=Qt.white) RuntimeGlobals.splashscreen.show_message( "{0} - {1} | Initializing {0}.".format(Constants.application_name, Constants.version)) RuntimeGlobals.splashscreen.show() # Initializing requests stack. RuntimeGlobals.requests_stack = collections.deque() # Initializing engine. RuntimeGlobals.engine = engine(parent=None, components_paths=components_paths, requisite_components=requisite_components, visible_components=visible_components, splashscreen=RuntimeGlobals.splashscreen, requests_stack=RuntimeGlobals.requests_stack, patches_manager=RuntimeGlobals.patches_manager, user_application_data_directory=RuntimeGlobals.user_application_data_directory, logging_session_handler=RuntimeGlobals.logging_session_handler, logging_file_handler=RuntimeGlobals.logging_file_handler, logging_console_handler=RuntimeGlobals.logging_console_handler, logging_session_handler_stream=RuntimeGlobals.logging_session_handler_stream, logging_active_formatter=RuntimeGlobals.logging_active_formatter, settings=RuntimeGlobals.settings, verbosity_level=RuntimeGlobals.verbosity_level, parameters=RuntimeGlobals.parameters, arguments=RuntimeGlobals.arguments) RuntimeGlobals.engine.show() RuntimeGlobals.engine.raise_() return sys.exit(RuntimeGlobals.application.exec_())
def function[run, parameter[engine, parameters, components_paths, requisite_components, visible_components]]: constant[ Starts the Application. :param engine: Engine. :type engine: QObject :param parameters: Command line parameters. :type parameters: tuple :param components_paths: Components components_paths. :type components_paths: tuple or list :param requisite_components: Requisite components names. :type requisite_components: tuple or list :param visible_components: Visible components names. :type visible_components: tuple or list :return: Definition success. :rtype: bool ] <ast.Tuple object at 0x7da1b0945270> assign[=] name[parameters] call[name[foundations].trace.evaluate_trace_request, parameter[name[RuntimeGlobals].parameters.trace_modules, name[foundations].verbose.tracer]] if name[RuntimeGlobals].parameters.about begin[:] for taget[name[line]] in starred[name[SESSION_HEADER_TEXT]] begin[:] call[name[sys].stdout.write, parameter[call[constant[{0} ].format, parameter[name[line]]]]] call[name[foundations].core.exit, parameter[constant[1]]] name[sys].stdout assign[=] call[name[foundations].verbose.StandardOutputStreamer, parameter[name[LOGGER]]] name[sys].stderr assign[=] call[name[foundations].verbose.StandardOutputStreamer, parameter[name[LOGGER]]] call[name[foundations].verbose.set_verbosity_level, parameter[constant[4]]] if name[RuntimeGlobals].parameters.user_application_data_directory begin[:] variable[user_application_data_directory] assign[=] name[RuntimeGlobals].parameters.user_application_data_directory if <ast.UnaryOp object at 0x7da1b0946f80> begin[:] <ast.Raise object at 0x7da1b0945f30> if compare[call[name[foundations].environment.get_temporary_directory, parameter[]] in name[user_application_data_directory]] begin[:] call[name[umbra].ui.widgets.message_box.message_box, parameter[constant[Error], constant[Error], call[constant[{0} failed to use the default user Application data directory to store its preferences and has defaulted to the following directory: '{1}'. Reasons for this are various: - Undefined 'APPDATA' ( Windows ) or 'HOME' ( Mac Os X, Linux ) environment variables. - User name with non 'UTF-8' encoding compliant characters. - Non 'UTF-8' encoding compliant characters in the preferences directory path. You will have to define your own preferences directory by launching {0} with the '-u "path\to\the\custom\preferences\directory"' command line parameter.].format, parameter[name[Constants].application_name, name[user_application_data_directory]]]]] call[name[LOGGER].debug, parameter[call[constant[> Application Python interpreter: '{0}'].format, parameter[name[sys].executable]]]] call[name[LOGGER].debug, parameter[call[constant[> Application PyQt version: '{0}'].format, parameter[name[PYQT_VERSION_STR]]]]] call[name[LOGGER].debug, parameter[call[constant[> Application startup location: '{0}'].format, parameter[call[name[os].getcwd, parameter[]]]]]] call[name[LOGGER].debug, parameter[call[constant[> Session user Application data directory: '{0}'].format, parameter[name[RuntimeGlobals].user_application_data_directory]]]] call[name[LOGGER].debug, parameter[call[constant[> Initializing '{0}'!].format, parameter[name[Constants].application_name]]]] name[RuntimeGlobals].logging_file assign[=] call[name[get_logging_file], parameter[]] name[RuntimeGlobals].logging_file_handler assign[=] call[name[foundations].verbose.get_logging_file_handler, parameter[]] name[RuntimeGlobals].patches_file assign[=] call[name[os].path.join, parameter[name[RuntimeGlobals].user_application_data_directory, name[Constants].patches_directory, name[Constants].patches_file]] name[RuntimeGlobals].patches_manager assign[=] call[name[umbra].managers.patches_manager.PatchesManager, parameter[name[RuntimeGlobals].patches_file, <ast.ListComp object at 0x7da1b0946710>]] <ast.BoolOp object at 0x7da1b0944130> name[RuntimeGlobals].settings_file assign[=] call[name[os].path.join, parameter[name[RuntimeGlobals].user_application_data_directory, name[Constants].settings_directory, name[Constants].settings_file]] name[RuntimeGlobals].settings assign[=] call[name[Preferences], parameter[name[RuntimeGlobals].settings_file]] call[name[LOGGER].debug, parameter[constant[> Retrieving default layouts.]]] call[name[RuntimeGlobals].settings.set_default_layouts, parameter[tuple[[<ast.Constant object at 0x7da1b09e9030>]]]] <ast.BoolOp object at 0x7da1b09e9390> call[name[LOGGER].debug, parameter[constant[> Retrieving stored verbose level.]]] name[RuntimeGlobals].verbosity_level assign[=] <ast.IfExp object at 0x7da1b09eb220> call[name[LOGGER].debug, parameter[call[constant[> Setting logger verbosity level to: '{0}'.].format, parameter[name[RuntimeGlobals].verbosity_level]]]] call[name[foundations].verbose.set_verbosity_level, parameter[name[RuntimeGlobals].verbosity_level]] call[name[RuntimeGlobals].settings.set_key, parameter[constant[Settings], constant[verbosity_level], name[RuntimeGlobals].verbosity_level]] call[name[LOGGER].debug, parameter[constant[> Retrieving stored logging formatter.]]] variable[logging_formatter] assign[=] <ast.IfExp object at 0x7da1b09eb970> variable[logging_formatter] assign[=] <ast.IfExp object at 0x7da1b09eb5e0> name[RuntimeGlobals].logging_active_formatter assign[=] <ast.IfExp object at 0x7da1b09e8100> call[name[LOGGER].debug, parameter[call[constant[> Setting logging formatter: '{0}'.].format, parameter[name[RuntimeGlobals].logging_active_formatter]]]] for taget[name[handler]] in starred[tuple[[<ast.Attribute object at 0x7da1b09107c0>, <ast.Attribute object at 0x7da1b09111e0>]]] begin[:] <ast.BoolOp object at 0x7da1b0911cc0> name[RuntimeGlobals].logging_session_handler assign[=] call[name[foundations].verbose.get_logging_stream_handler, parameter[]] name[RuntimeGlobals].logging_session_handler_stream assign[=] name[RuntimeGlobals].logging_session_handler.stream call[name[LOGGER].info, parameter[name[Constants].logging_separators]] for taget[name[line]] in starred[name[SESSION_HEADER_TEXT]] begin[:] call[name[LOGGER].info, parameter[name[line]]] call[name[LOGGER].info, parameter[call[constant[{0} | Session started at: {1}].format, parameter[name[Constants].application_name, call[name[time].strftime, parameter[constant[%X - %x]]]]]]] call[name[LOGGER].info, parameter[name[Constants].logging_separators]] call[name[LOGGER].info, parameter[call[constant[{0} | Starting Interface!].format, parameter[name[Constants].application_name]]]] if name[RuntimeGlobals].parameters.hide_splash_screen begin[:] call[name[LOGGER].debug, parameter[constant[> SplashScreen skipped by 'hide_splash_screen' command line parameter.]]] name[RuntimeGlobals].requests_stack assign[=] call[name[collections].deque, parameter[]] name[RuntimeGlobals].engine assign[=] call[name[engine], parameter[]] call[name[RuntimeGlobals].engine.show, parameter[]] call[name[RuntimeGlobals].engine.raise_, parameter[]] return[call[name[sys].exit, parameter[call[name[RuntimeGlobals].application.exec_, parameter[]]]]]
keyword[def] identifier[run] ( identifier[engine] , identifier[parameters] , identifier[components_paths] = keyword[None] , identifier[requisite_components] = keyword[None] , identifier[visible_components] = keyword[None] ): literal[string] identifier[RuntimeGlobals] . identifier[parameters] , identifier[RuntimeGlobals] . identifier[arguments] = identifier[parameters] identifier[foundations] . identifier[trace] . identifier[evaluate_trace_request] ( identifier[RuntimeGlobals] . identifier[parameters] . identifier[trace_modules] , identifier[foundations] . identifier[verbose] . identifier[tracer] ) keyword[if] identifier[RuntimeGlobals] . identifier[parameters] . identifier[about] : keyword[for] identifier[line] keyword[in] identifier[SESSION_HEADER_TEXT] : identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] . identifier[format] ( identifier[line] )) identifier[foundations] . identifier[core] . identifier[exit] ( literal[int] ) identifier[sys] . identifier[stdout] = identifier[foundations] . identifier[verbose] . identifier[StandardOutputStreamer] ( identifier[LOGGER] ) identifier[sys] . identifier[stderr] = identifier[foundations] . identifier[verbose] . identifier[StandardOutputStreamer] ( identifier[LOGGER] ) identifier[foundations] . identifier[verbose] . identifier[set_verbosity_level] ( literal[int] ) keyword[if] identifier[RuntimeGlobals] . identifier[parameters] . identifier[user_application_data_directory] : identifier[user_application_data_directory] = identifier[RuntimeGlobals] . identifier[user_application_data_directory] = identifier[RuntimeGlobals] . identifier[parameters] . identifier[user_application_data_directory] keyword[else] : identifier[user_application_data_directory] = identifier[RuntimeGlobals] . identifier[user_application_data_directory] = identifier[foundations] . identifier[environment] . identifier[get_user_application_data_directory] () keyword[if] keyword[not] identifier[set_user_application_data_directory] ( identifier[user_application_data_directory] ): keyword[raise] identifier[umbra] . identifier[exceptions] . identifier[EngineConfigurationError] ( literal[string] . identifier[format] ( identifier[__name__] , identifier[RuntimeGlobals] . identifier[user_application_data_directory] , identifier[Constants] . identifier[application_name] )) keyword[if] identifier[foundations] . identifier[environment] . identifier[get_temporary_directory] () keyword[in] identifier[user_application_data_directory] : identifier[umbra] . identifier[ui] . identifier[widgets] . identifier[message_box] . identifier[message_box] ( literal[string] , literal[string] , literal[string] . identifier[format] ( identifier[Constants] . identifier[application_name] , identifier[user_application_data_directory] )) identifier[LOGGER] . identifier[debug] ( literal[string] . identifier[format] ( identifier[sys] . identifier[executable] )) identifier[LOGGER] . identifier[debug] ( literal[string] . identifier[format] ( identifier[PYQT_VERSION_STR] )) identifier[LOGGER] . identifier[debug] ( literal[string] . identifier[format] ( identifier[os] . identifier[getcwd] ())) identifier[LOGGER] . identifier[debug] ( literal[string] . identifier[format] ( identifier[RuntimeGlobals] . identifier[user_application_data_directory] )) identifier[LOGGER] . identifier[debug] ( literal[string] . identifier[format] ( identifier[Constants] . identifier[application_name] )) identifier[RuntimeGlobals] . identifier[logging_file] = identifier[get_logging_file] () identifier[RuntimeGlobals] . identifier[logging_file_handler] = identifier[foundations] . identifier[verbose] . identifier[get_logging_file_handler] ( identifier[file] = identifier[RuntimeGlobals] . identifier[logging_file] ) identifier[RuntimeGlobals] . identifier[patches_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[RuntimeGlobals] . identifier[user_application_data_directory] , identifier[Constants] . identifier[patches_directory] , identifier[Constants] . identifier[patches_file] ) identifier[RuntimeGlobals] . identifier[patches_manager] = identifier[umbra] . identifier[managers] . identifier[patches_manager] . identifier[PatchesManager] ( identifier[RuntimeGlobals] . identifier[patches_file] , [ identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[Constants] . identifier[patches_directory] ) keyword[for] identifier[path] keyword[in] identifier[RuntimeGlobals] . identifier[resources_directories] ]) identifier[RuntimeGlobals] . identifier[patches_manager] . identifier[register_patches] () keyword[and] identifier[RuntimeGlobals] . identifier[patches_manager] . identifier[apply_patches] () identifier[RuntimeGlobals] . identifier[settings_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[RuntimeGlobals] . identifier[user_application_data_directory] , identifier[Constants] . identifier[settings_directory] , identifier[Constants] . identifier[settings_file] ) identifier[RuntimeGlobals] . identifier[settings] = identifier[Preferences] ( identifier[RuntimeGlobals] . identifier[settings_file] ) identifier[LOGGER] . identifier[debug] ( literal[string] ) identifier[RuntimeGlobals] . identifier[settings] . identifier[set_default_layouts] (( literal[string] ,)) identifier[foundations] . identifier[common] . identifier[path_exists] ( identifier[RuntimeGlobals] . identifier[settings_file] ) keyword[or] identifier[RuntimeGlobals] . identifier[settings] . identifier[set_default_preferences] () identifier[LOGGER] . identifier[debug] ( literal[string] ) identifier[RuntimeGlobals] . identifier[verbosity_level] = identifier[RuntimeGlobals] . identifier[parameters] . identifier[verbosity_level] keyword[if] identifier[RuntimeGlobals] . identifier[parameters] . identifier[verbosity_level] keyword[is] keyword[not] keyword[None] keyword[else] identifier[foundations] . identifier[common] . identifier[get_first_item] ( identifier[RuntimeGlobals] . identifier[settings] . identifier[get_key] ( literal[string] , literal[string] ). identifier[toInt] ()) identifier[LOGGER] . identifier[debug] ( literal[string] . identifier[format] ( identifier[RuntimeGlobals] . identifier[verbosity_level] )) identifier[foundations] . identifier[verbose] . identifier[set_verbosity_level] ( identifier[RuntimeGlobals] . identifier[verbosity_level] ) identifier[RuntimeGlobals] . identifier[settings] . identifier[set_key] ( literal[string] , literal[string] , identifier[RuntimeGlobals] . identifier[verbosity_level] ) identifier[LOGGER] . identifier[debug] ( literal[string] ) identifier[logging_formatter] = identifier[RuntimeGlobals] . identifier[parameters] . identifier[logging_formatter] keyword[if] identifier[RuntimeGlobals] . identifier[parameters] . identifier[logging_formatter] keyword[is] keyword[not] keyword[None] keyword[else] identifier[foundations] . identifier[strings] . identifier[to_string] ( identifier[RuntimeGlobals] . identifier[settings] . identifier[get_key] ( literal[string] , literal[string] ). identifier[toString] ()) identifier[logging_formatter] = identifier[logging_formatter] keyword[if] identifier[logging_formatter] keyword[in] identifier[RuntimeGlobals] . identifier[logging_formatters] keyword[else] keyword[None] identifier[RuntimeGlobals] . identifier[logging_active_formatter] = identifier[logging_formatter] keyword[if] identifier[logging_formatter] keyword[is] keyword[not] keyword[None] keyword[else] identifier[Constants] . identifier[logging_default_formatter] identifier[LOGGER] . identifier[debug] ( literal[string] . identifier[format] ( identifier[RuntimeGlobals] . identifier[logging_active_formatter] )) keyword[for] identifier[handler] keyword[in] ( identifier[RuntimeGlobals] . identifier[logging_console_handler] , identifier[RuntimeGlobals] . identifier[logging_file_handler] ): identifier[handler] keyword[and] identifier[handler] . identifier[setFormatter] ( identifier[RuntimeGlobals] . identifier[logging_formatters] [ identifier[RuntimeGlobals] . identifier[logging_active_formatter] ]) identifier[RuntimeGlobals] . identifier[logging_session_handler] = identifier[foundations] . identifier[verbose] . identifier[get_logging_stream_handler] () identifier[RuntimeGlobals] . identifier[logging_session_handler_stream] = identifier[RuntimeGlobals] . identifier[logging_session_handler] . identifier[stream] identifier[LOGGER] . identifier[info] ( identifier[Constants] . identifier[logging_separators] ) keyword[for] identifier[line] keyword[in] identifier[SESSION_HEADER_TEXT] : identifier[LOGGER] . identifier[info] ( identifier[line] ) identifier[LOGGER] . identifier[info] ( literal[string] . identifier[format] ( identifier[Constants] . identifier[application_name] , identifier[time] . identifier[strftime] ( literal[string] ))) identifier[LOGGER] . identifier[info] ( identifier[Constants] . identifier[logging_separators] ) identifier[LOGGER] . identifier[info] ( literal[string] . identifier[format] ( identifier[Constants] . identifier[application_name] )) keyword[if] identifier[RuntimeGlobals] . identifier[parameters] . identifier[hide_splash_screen] : identifier[LOGGER] . identifier[debug] ( literal[string] ) keyword[else] : identifier[LOGGER] . identifier[debug] ( literal[string] ) identifier[RuntimeGlobals] . identifier[splashscreen_image] = identifier[QPixmap] ( identifier[umbra] . identifier[ui] . identifier[common] . identifier[get_resource_path] ( identifier[UiConstants] . identifier[splash_screen_image] )) identifier[RuntimeGlobals] . identifier[splashscreen] = identifier[Delayed_QSplashScreen] ( identifier[RuntimeGlobals] . identifier[splashscreen_image] , identifier[text_color] = identifier[Qt] . identifier[white] ) identifier[RuntimeGlobals] . identifier[splashscreen] . identifier[show_message] ( literal[string] . identifier[format] ( identifier[Constants] . identifier[application_name] , identifier[Constants] . identifier[version] )) identifier[RuntimeGlobals] . identifier[splashscreen] . identifier[show] () identifier[RuntimeGlobals] . identifier[requests_stack] = identifier[collections] . identifier[deque] () identifier[RuntimeGlobals] . identifier[engine] = identifier[engine] ( identifier[parent] = keyword[None] , identifier[components_paths] = identifier[components_paths] , identifier[requisite_components] = identifier[requisite_components] , identifier[visible_components] = identifier[visible_components] , identifier[splashscreen] = identifier[RuntimeGlobals] . identifier[splashscreen] , identifier[requests_stack] = identifier[RuntimeGlobals] . identifier[requests_stack] , identifier[patches_manager] = identifier[RuntimeGlobals] . identifier[patches_manager] , identifier[user_application_data_directory] = identifier[RuntimeGlobals] . identifier[user_application_data_directory] , identifier[logging_session_handler] = identifier[RuntimeGlobals] . identifier[logging_session_handler] , identifier[logging_file_handler] = identifier[RuntimeGlobals] . identifier[logging_file_handler] , identifier[logging_console_handler] = identifier[RuntimeGlobals] . identifier[logging_console_handler] , identifier[logging_session_handler_stream] = identifier[RuntimeGlobals] . identifier[logging_session_handler_stream] , identifier[logging_active_formatter] = identifier[RuntimeGlobals] . identifier[logging_active_formatter] , identifier[settings] = identifier[RuntimeGlobals] . identifier[settings] , identifier[verbosity_level] = identifier[RuntimeGlobals] . identifier[verbosity_level] , identifier[parameters] = identifier[RuntimeGlobals] . identifier[parameters] , identifier[arguments] = identifier[RuntimeGlobals] . identifier[arguments] ) identifier[RuntimeGlobals] . identifier[engine] . identifier[show] () identifier[RuntimeGlobals] . identifier[engine] . identifier[raise_] () keyword[return] identifier[sys] . identifier[exit] ( identifier[RuntimeGlobals] . identifier[application] . identifier[exec_] ())
def run(engine, parameters, components_paths=None, requisite_components=None, visible_components=None): """ Starts the Application. :param engine: Engine. :type engine: QObject :param parameters: Command line parameters. :type parameters: tuple :param components_paths: Components components_paths. :type components_paths: tuple or list :param requisite_components: Requisite components names. :type requisite_components: tuple or list :param visible_components: Visible components names. :type visible_components: tuple or list :return: Definition success. :rtype: bool """ # Command line parameters handling. (RuntimeGlobals.parameters, RuntimeGlobals.arguments) = parameters foundations.trace.evaluate_trace_request(RuntimeGlobals.parameters.trace_modules, foundations.verbose.tracer) if RuntimeGlobals.parameters.about: for line in SESSION_HEADER_TEXT: sys.stdout.write('{0}\n'.format(line)) # depends on [control=['for'], data=['line']] foundations.core.exit(1) # depends on [control=['if'], data=[]] # Redirecting standard output and error messages. sys.stdout = foundations.verbose.StandardOutputStreamer(LOGGER) sys.stderr = foundations.verbose.StandardOutputStreamer(LOGGER) # Setting application verbose level. foundations.verbose.set_verbosity_level(4) # Setting user application data directory. if RuntimeGlobals.parameters.user_application_data_directory: user_application_data_directory = RuntimeGlobals.user_application_data_directory = RuntimeGlobals.parameters.user_application_data_directory # depends on [control=['if'], data=[]] else: user_application_data_directory = RuntimeGlobals.user_application_data_directory = foundations.environment.get_user_application_data_directory() if not set_user_application_data_directory(user_application_data_directory): raise umbra.exceptions.EngineConfigurationError("{0} | '{1}' user Application data directory is not available, '{2}' will now close!".format(__name__, RuntimeGlobals.user_application_data_directory, Constants.application_name)) # depends on [control=['if'], data=[]] if foundations.environment.get_temporary_directory() in user_application_data_directory: umbra.ui.widgets.message_box.message_box('Error', 'Error', '{0} failed to use the default user Application data directory to store its preferences and has defaulted to the following directory:\n\n\t\'{1}\'.\n\nReasons for this are various:\n\t- Undefined \'APPDATA\' ( Windows ) or \'HOME\' ( Mac Os X, Linux ) environment variables.\n\t- User name with non \'UTF-8\' encoding compliant characters.\n\t- Non \'UTF-8\' encoding compliant characters in the preferences directory path.\n\nYou will have to define your own preferences directory by launching {0} with the \'-u "path\\to\\the\\custom\\preferences\\directory"\' command line parameter.'.format(Constants.application_name, user_application_data_directory)) # depends on [control=['if'], data=['user_application_data_directory']] LOGGER.debug("> Application Python interpreter: '{0}'".format(sys.executable)) LOGGER.debug("> Application PyQt version: '{0}'".format(PYQT_VERSION_STR)) LOGGER.debug("> Application startup location: '{0}'".format(os.getcwd())) LOGGER.debug("> Session user Application data directory: '{0}'".format(RuntimeGlobals.user_application_data_directory)) LOGGER.debug("> Initializing '{0}'!".format(Constants.application_name)) # Getting the logging file path. RuntimeGlobals.logging_file = get_logging_file() RuntimeGlobals.logging_file_handler = foundations.verbose.get_logging_file_handler(file=RuntimeGlobals.logging_file) # Getting the patches file path. RuntimeGlobals.patches_file = os.path.join(RuntimeGlobals.user_application_data_directory, Constants.patches_directory, Constants.patches_file) # Initializing the patches manager. RuntimeGlobals.patches_manager = umbra.managers.patches_manager.PatchesManager(RuntimeGlobals.patches_file, [os.path.join(path, Constants.patches_directory) for path in RuntimeGlobals.resources_directories]) RuntimeGlobals.patches_manager.register_patches() and RuntimeGlobals.patches_manager.apply_patches() # Retrieving settings file. RuntimeGlobals.settings_file = os.path.join(RuntimeGlobals.user_application_data_directory, Constants.settings_directory, Constants.settings_file) RuntimeGlobals.settings = Preferences(RuntimeGlobals.settings_file) LOGGER.debug('> Retrieving default layouts.') RuntimeGlobals.settings.set_default_layouts(('startup_centric',)) foundations.common.path_exists(RuntimeGlobals.settings_file) or RuntimeGlobals.settings.set_default_preferences() LOGGER.debug('> Retrieving stored verbose level.') RuntimeGlobals.verbosity_level = RuntimeGlobals.parameters.verbosity_level if RuntimeGlobals.parameters.verbosity_level is not None else foundations.common.get_first_item(RuntimeGlobals.settings.get_key('Settings', 'verbosity_level').toInt()) LOGGER.debug("> Setting logger verbosity level to: '{0}'.".format(RuntimeGlobals.verbosity_level)) foundations.verbose.set_verbosity_level(RuntimeGlobals.verbosity_level) RuntimeGlobals.settings.set_key('Settings', 'verbosity_level', RuntimeGlobals.verbosity_level) LOGGER.debug('> Retrieving stored logging formatter.') logging_formatter = RuntimeGlobals.parameters.logging_formatter if RuntimeGlobals.parameters.logging_formatter is not None else foundations.strings.to_string(RuntimeGlobals.settings.get_key('Settings', 'logging_formatter').toString()) logging_formatter = logging_formatter if logging_formatter in RuntimeGlobals.logging_formatters else None RuntimeGlobals.logging_active_formatter = logging_formatter if logging_formatter is not None else Constants.logging_default_formatter LOGGER.debug("> Setting logging formatter: '{0}'.".format(RuntimeGlobals.logging_active_formatter)) for handler in (RuntimeGlobals.logging_console_handler, RuntimeGlobals.logging_file_handler): handler and handler.setFormatter(RuntimeGlobals.logging_formatters[RuntimeGlobals.logging_active_formatter]) # depends on [control=['for'], data=['handler']] # Starting the session handler. RuntimeGlobals.logging_session_handler = foundations.verbose.get_logging_stream_handler() RuntimeGlobals.logging_session_handler_stream = RuntimeGlobals.logging_session_handler.stream LOGGER.info(Constants.logging_separators) for line in SESSION_HEADER_TEXT: LOGGER.info(line) # depends on [control=['for'], data=['line']] LOGGER.info('{0} | Session started at: {1}'.format(Constants.application_name, time.strftime('%X - %x'))) LOGGER.info(Constants.logging_separators) LOGGER.info('{0} | Starting Interface!'.format(Constants.application_name)) # Initializing splashscreen. if RuntimeGlobals.parameters.hide_splash_screen: LOGGER.debug("> SplashScreen skipped by 'hide_splash_screen' command line parameter.") # depends on [control=['if'], data=[]] else: LOGGER.debug('> Initializing splashscreen.') RuntimeGlobals.splashscreen_image = QPixmap(umbra.ui.common.get_resource_path(UiConstants.splash_screen_image)) RuntimeGlobals.splashscreen = Delayed_QSplashScreen(RuntimeGlobals.splashscreen_image, text_color=Qt.white) RuntimeGlobals.splashscreen.show_message('{0} - {1} | Initializing {0}.'.format(Constants.application_name, Constants.version)) RuntimeGlobals.splashscreen.show() # Initializing requests stack. RuntimeGlobals.requests_stack = collections.deque() # Initializing engine. RuntimeGlobals.engine = engine(parent=None, components_paths=components_paths, requisite_components=requisite_components, visible_components=visible_components, splashscreen=RuntimeGlobals.splashscreen, requests_stack=RuntimeGlobals.requests_stack, patches_manager=RuntimeGlobals.patches_manager, user_application_data_directory=RuntimeGlobals.user_application_data_directory, logging_session_handler=RuntimeGlobals.logging_session_handler, logging_file_handler=RuntimeGlobals.logging_file_handler, logging_console_handler=RuntimeGlobals.logging_console_handler, logging_session_handler_stream=RuntimeGlobals.logging_session_handler_stream, logging_active_formatter=RuntimeGlobals.logging_active_formatter, settings=RuntimeGlobals.settings, verbosity_level=RuntimeGlobals.verbosity_level, parameters=RuntimeGlobals.parameters, arguments=RuntimeGlobals.arguments) RuntimeGlobals.engine.show() RuntimeGlobals.engine.raise_() return sys.exit(RuntimeGlobals.application.exec_())
def get(self, key, index=None): """Retrieves a value associated with a key from the database Args: key (str): The key to retrieve """ records = self.get_multi([key], index=index) try: return records[0][1] # return the value from the key/value tuple except IndexError: return None
def function[get, parameter[self, key, index]]: constant[Retrieves a value associated with a key from the database Args: key (str): The key to retrieve ] variable[records] assign[=] call[name[self].get_multi, parameter[list[[<ast.Name object at 0x7da20c6c7d00>]]]] <ast.Try object at 0x7da20c6c7a30>
keyword[def] identifier[get] ( identifier[self] , identifier[key] , identifier[index] = keyword[None] ): literal[string] identifier[records] = identifier[self] . identifier[get_multi] ([ identifier[key] ], identifier[index] = identifier[index] ) keyword[try] : keyword[return] identifier[records] [ literal[int] ][ literal[int] ] keyword[except] identifier[IndexError] : keyword[return] keyword[None]
def get(self, key, index=None): """Retrieves a value associated with a key from the database Args: key (str): The key to retrieve """ records = self.get_multi([key], index=index) try: return records[0][1] # return the value from the key/value tuple # depends on [control=['try'], data=[]] except IndexError: return None # depends on [control=['except'], data=[]]
def linear_least_squares(a, b, residuals=False): """ Return the least-squares solution to a linear matrix equation. Solves the equation `a x = b` by computing a vector `x` that minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may be under-, well-, or over- determined (i.e., the number of linearly independent rows of `a` can be less than, equal to, or greater than its number of linearly independent columns). If `a` is square and of full rank, then `x` (but for round-off error) is the "exact" solution of the equation. Parameters ---------- a : (M, N) array_like "Coefficient" matrix. b : (M,) array_like Ordinate or "dependent variable" values. residuals : bool Compute the residuals associated with the least-squares solution Returns ------- x : (M,) ndarray Least-squares solution. The shape of `x` depends on the shape of `b`. residuals : int (Optional) Sums of residuals; squared Euclidean 2-norm for each column in ``b - a*x``. """ # Copyright (c) 2013 Alexandre Drouin. All rights reserved. # From https://gist.github.com/aldro61/5889795 from warnings import warn # from scipy.linalg.fblas import dgemm from scipy.linalg.blas import dgemm # if type(a) != np.ndarray or not a.flags['C_CONTIGUOUS']: # warn('Matrix a is not a C-contiguous numpy array. The solver will create a copy, which will result' + \ # ' in increased memory usage.') a = np.asarray(a, order='c') i = dgemm(alpha=1.0, a=a.T, b=a.T, trans_b=True) x = np.linalg.solve(i, dgemm(alpha=1.0, a=a.T, b=b)).flatten() if residuals: return x, np.linalg.norm(np.dot(a, x) - b) else: return x
def function[linear_least_squares, parameter[a, b, residuals]]: constant[ Return the least-squares solution to a linear matrix equation. Solves the equation `a x = b` by computing a vector `x` that minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may be under-, well-, or over- determined (i.e., the number of linearly independent rows of `a` can be less than, equal to, or greater than its number of linearly independent columns). If `a` is square and of full rank, then `x` (but for round-off error) is the "exact" solution of the equation. Parameters ---------- a : (M, N) array_like "Coefficient" matrix. b : (M,) array_like Ordinate or "dependent variable" values. residuals : bool Compute the residuals associated with the least-squares solution Returns ------- x : (M,) ndarray Least-squares solution. The shape of `x` depends on the shape of `b`. residuals : int (Optional) Sums of residuals; squared Euclidean 2-norm for each column in ``b - a*x``. ] from relative_module[warnings] import module[warn] from relative_module[scipy.linalg.blas] import module[dgemm] variable[a] assign[=] call[name[np].asarray, parameter[name[a]]] variable[i] assign[=] call[name[dgemm], parameter[]] variable[x] assign[=] call[call[name[np].linalg.solve, parameter[name[i], call[name[dgemm], parameter[]]]].flatten, parameter[]] if name[residuals] begin[:] return[tuple[[<ast.Name object at 0x7da20c76da80>, <ast.Call object at 0x7da20c76f820>]]]
keyword[def] identifier[linear_least_squares] ( identifier[a] , identifier[b] , identifier[residuals] = keyword[False] ): literal[string] keyword[from] identifier[warnings] keyword[import] identifier[warn] keyword[from] identifier[scipy] . identifier[linalg] . identifier[blas] keyword[import] identifier[dgemm] identifier[a] = identifier[np] . identifier[asarray] ( identifier[a] , identifier[order] = literal[string] ) identifier[i] = identifier[dgemm] ( identifier[alpha] = literal[int] , identifier[a] = identifier[a] . identifier[T] , identifier[b] = identifier[a] . identifier[T] , identifier[trans_b] = keyword[True] ) identifier[x] = identifier[np] . identifier[linalg] . identifier[solve] ( identifier[i] , identifier[dgemm] ( identifier[alpha] = literal[int] , identifier[a] = identifier[a] . identifier[T] , identifier[b] = identifier[b] )). identifier[flatten] () keyword[if] identifier[residuals] : keyword[return] identifier[x] , identifier[np] . identifier[linalg] . identifier[norm] ( identifier[np] . identifier[dot] ( identifier[a] , identifier[x] )- identifier[b] ) keyword[else] : keyword[return] identifier[x]
def linear_least_squares(a, b, residuals=False): """ Return the least-squares solution to a linear matrix equation. Solves the equation `a x = b` by computing a vector `x` that minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may be under-, well-, or over- determined (i.e., the number of linearly independent rows of `a` can be less than, equal to, or greater than its number of linearly independent columns). If `a` is square and of full rank, then `x` (but for round-off error) is the "exact" solution of the equation. Parameters ---------- a : (M, N) array_like "Coefficient" matrix. b : (M,) array_like Ordinate or "dependent variable" values. residuals : bool Compute the residuals associated with the least-squares solution Returns ------- x : (M,) ndarray Least-squares solution. The shape of `x` depends on the shape of `b`. residuals : int (Optional) Sums of residuals; squared Euclidean 2-norm for each column in ``b - a*x``. """ # Copyright (c) 2013 Alexandre Drouin. All rights reserved. # From https://gist.github.com/aldro61/5889795 from warnings import warn # from scipy.linalg.fblas import dgemm from scipy.linalg.blas import dgemm # if type(a) != np.ndarray or not a.flags['C_CONTIGUOUS']: # warn('Matrix a is not a C-contiguous numpy array. The solver will create a copy, which will result' + \ # ' in increased memory usage.') a = np.asarray(a, order='c') i = dgemm(alpha=1.0, a=a.T, b=a.T, trans_b=True) x = np.linalg.solve(i, dgemm(alpha=1.0, a=a.T, b=b)).flatten() if residuals: return (x, np.linalg.norm(np.dot(a, x) - b)) # depends on [control=['if'], data=[]] else: return x
def display_dp_matrix_attr(dp_matrix, attr_name): """ show a value assocciated with an attribute for each DataProperty instance in the dp_matrix """ print() print("---------- {:s} ----------".format(attr_name)) for dp_list in dp_matrix: print([getattr(dp, attr_name) for dp in dp_list])
def function[display_dp_matrix_attr, parameter[dp_matrix, attr_name]]: constant[ show a value assocciated with an attribute for each DataProperty instance in the dp_matrix ] call[name[print], parameter[]] call[name[print], parameter[call[constant[---------- {:s} ----------].format, parameter[name[attr_name]]]]] for taget[name[dp_list]] in starred[name[dp_matrix]] begin[:] call[name[print], parameter[<ast.ListComp object at 0x7da1b23ea7d0>]]
keyword[def] identifier[display_dp_matrix_attr] ( identifier[dp_matrix] , identifier[attr_name] ): literal[string] identifier[print] () identifier[print] ( literal[string] . identifier[format] ( identifier[attr_name] )) keyword[for] identifier[dp_list] keyword[in] identifier[dp_matrix] : identifier[print] ([ identifier[getattr] ( identifier[dp] , identifier[attr_name] ) keyword[for] identifier[dp] keyword[in] identifier[dp_list] ])
def display_dp_matrix_attr(dp_matrix, attr_name): """ show a value assocciated with an attribute for each DataProperty instance in the dp_matrix """ print() print('---------- {:s} ----------'.format(attr_name)) for dp_list in dp_matrix: print([getattr(dp, attr_name) for dp in dp_list]) # depends on [control=['for'], data=['dp_list']]
def sort_bam_by_reference(job, job_vars): """ Sorts the bam by reference job_vars: tuple Tuple of dictionaries: input_args and ids """ # Unpack variables input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() # I/O sorted_bam, sorted_bai = return_input_paths(job, work_dir, ids, 'sorted.bam', 'sorted.bam.bai') output = os.path.join(work_dir, 'sort_by_ref.bam') # Call: Samtools ref_seqs = [] handle = subprocess.Popen(["samtools", "view", "-H", sorted_bam], stdout=subprocess.PIPE).stdout for line in handle: if line.startswith("@SQ"): tmp = line.split("\t") chrom = tmp[1].split(":")[1] ref_seqs.append(chrom) handle.close() # Iterate through chromosomes to create mini-bams for chrom in ref_seqs: # job.addChildJobFn(sbbr_child, chrom, os.path.join(work_dir, chrom), sorted_bam) cmd_view = ["samtools", "view", "-b", sorted_bam, chrom] cmd_sort = ["samtools", "sort", "-m", "3000000000", "-n", "-", os.path.join(work_dir, chrom)] p1 = subprocess.Popen(cmd_view, stdout=subprocess.PIPE) subprocess.check_call(cmd_sort, stdin=p1.stdout) sorted_files = [os.path.join(work_dir, chrom) + '.bam' for chrom in ref_seqs] cmd = ["samtools", "cat", "-o", output] + sorted_files subprocess.check_call(cmd) # Write to FileStore ids['sort_by_ref.bam'] = job.fileStore.writeGlobalFile(output) rsem_id = job.addChildJobFn(transcriptome, job_vars, disk='30 G', memory='30 G').rv() exon_id = job.addChildJobFn(exon_count, job_vars, disk='30 G').rv() return exon_id, rsem_id
def function[sort_bam_by_reference, parameter[job, job_vars]]: constant[ Sorts the bam by reference job_vars: tuple Tuple of dictionaries: input_args and ids ] <ast.Tuple object at 0x7da20e9b3f10> assign[=] name[job_vars] variable[work_dir] assign[=] call[name[job].fileStore.getLocalTempDir, parameter[]] <ast.Tuple object at 0x7da20e9b0880> assign[=] call[name[return_input_paths], parameter[name[job], name[work_dir], name[ids], constant[sorted.bam], constant[sorted.bam.bai]]] variable[output] assign[=] call[name[os].path.join, parameter[name[work_dir], constant[sort_by_ref.bam]]] variable[ref_seqs] assign[=] list[[]] variable[handle] assign[=] call[name[subprocess].Popen, parameter[list[[<ast.Constant object at 0x7da18f58fb50>, <ast.Constant object at 0x7da18f58c8e0>, <ast.Constant object at 0x7da18f58d810>, <ast.Name object at 0x7da18f58f4f0>]]]].stdout for taget[name[line]] in starred[name[handle]] begin[:] if call[name[line].startswith, parameter[constant[@SQ]]] begin[:] variable[tmp] assign[=] call[name[line].split, parameter[constant[ ]]] variable[chrom] assign[=] call[call[call[name[tmp]][constant[1]].split, parameter[constant[:]]]][constant[1]] call[name[ref_seqs].append, parameter[name[chrom]]] call[name[handle].close, parameter[]] for taget[name[chrom]] in starred[name[ref_seqs]] begin[:] variable[cmd_view] assign[=] list[[<ast.Constant object at 0x7da20e9b1060>, <ast.Constant object at 0x7da20e9b1810>, <ast.Constant object at 0x7da20e9b0670>, <ast.Name object at 0x7da20e9b3a60>, <ast.Name object at 0x7da20e9b2f50>]] variable[cmd_sort] assign[=] list[[<ast.Constant object at 0x7da20e9b16c0>, <ast.Constant object at 0x7da20e9b17e0>, <ast.Constant object at 0x7da20e9b1f90>, <ast.Constant object at 0x7da20e9b01f0>, <ast.Constant object at 0x7da20e9b2740>, <ast.Constant object at 0x7da20e9b0580>, <ast.Call object at 0x7da20e9b1450>]] variable[p1] assign[=] call[name[subprocess].Popen, parameter[name[cmd_view]]] call[name[subprocess].check_call, parameter[name[cmd_sort]]] variable[sorted_files] assign[=] <ast.ListComp object at 0x7da18c4cceb0> variable[cmd] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18c4cc4f0>, <ast.Constant object at 0x7da18c4cf970>, <ast.Constant object at 0x7da18c4ceda0>, <ast.Name object at 0x7da18c4cc5e0>]] + name[sorted_files]] call[name[subprocess].check_call, parameter[name[cmd]]] call[name[ids]][constant[sort_by_ref.bam]] assign[=] call[name[job].fileStore.writeGlobalFile, parameter[name[output]]] variable[rsem_id] assign[=] call[call[name[job].addChildJobFn, parameter[name[transcriptome], name[job_vars]]].rv, parameter[]] variable[exon_id] assign[=] call[call[name[job].addChildJobFn, parameter[name[exon_count], name[job_vars]]].rv, parameter[]] return[tuple[[<ast.Name object at 0x7da18c4cd510>, <ast.Name object at 0x7da18c4cd450>]]]
keyword[def] identifier[sort_bam_by_reference] ( identifier[job] , identifier[job_vars] ): literal[string] identifier[input_args] , identifier[ids] = identifier[job_vars] identifier[work_dir] = identifier[job] . identifier[fileStore] . identifier[getLocalTempDir] () identifier[sorted_bam] , identifier[sorted_bai] = identifier[return_input_paths] ( identifier[job] , identifier[work_dir] , identifier[ids] , literal[string] , literal[string] ) identifier[output] = identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , literal[string] ) identifier[ref_seqs] =[] identifier[handle] = identifier[subprocess] . identifier[Popen] ([ literal[string] , literal[string] , literal[string] , identifier[sorted_bam] ], identifier[stdout] = identifier[subprocess] . identifier[PIPE] ). identifier[stdout] keyword[for] identifier[line] keyword[in] identifier[handle] : keyword[if] identifier[line] . identifier[startswith] ( literal[string] ): identifier[tmp] = identifier[line] . identifier[split] ( literal[string] ) identifier[chrom] = identifier[tmp] [ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ] identifier[ref_seqs] . identifier[append] ( identifier[chrom] ) identifier[handle] . identifier[close] () keyword[for] identifier[chrom] keyword[in] identifier[ref_seqs] : identifier[cmd_view] =[ literal[string] , literal[string] , literal[string] , identifier[sorted_bam] , identifier[chrom] ] identifier[cmd_sort] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , identifier[chrom] )] identifier[p1] = identifier[subprocess] . identifier[Popen] ( identifier[cmd_view] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] ) identifier[subprocess] . identifier[check_call] ( identifier[cmd_sort] , identifier[stdin] = identifier[p1] . identifier[stdout] ) identifier[sorted_files] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , identifier[chrom] )+ literal[string] keyword[for] identifier[chrom] keyword[in] identifier[ref_seqs] ] identifier[cmd] =[ literal[string] , literal[string] , literal[string] , identifier[output] ]+ identifier[sorted_files] identifier[subprocess] . identifier[check_call] ( identifier[cmd] ) identifier[ids] [ literal[string] ]= identifier[job] . identifier[fileStore] . identifier[writeGlobalFile] ( identifier[output] ) identifier[rsem_id] = identifier[job] . identifier[addChildJobFn] ( identifier[transcriptome] , identifier[job_vars] , identifier[disk] = literal[string] , identifier[memory] = literal[string] ). identifier[rv] () identifier[exon_id] = identifier[job] . identifier[addChildJobFn] ( identifier[exon_count] , identifier[job_vars] , identifier[disk] = literal[string] ). identifier[rv] () keyword[return] identifier[exon_id] , identifier[rsem_id]
def sort_bam_by_reference(job, job_vars): """ Sorts the bam by reference job_vars: tuple Tuple of dictionaries: input_args and ids """ # Unpack variables (input_args, ids) = job_vars work_dir = job.fileStore.getLocalTempDir() # I/O (sorted_bam, sorted_bai) = return_input_paths(job, work_dir, ids, 'sorted.bam', 'sorted.bam.bai') output = os.path.join(work_dir, 'sort_by_ref.bam') # Call: Samtools ref_seqs = [] handle = subprocess.Popen(['samtools', 'view', '-H', sorted_bam], stdout=subprocess.PIPE).stdout for line in handle: if line.startswith('@SQ'): tmp = line.split('\t') chrom = tmp[1].split(':')[1] ref_seqs.append(chrom) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] handle.close() # Iterate through chromosomes to create mini-bams for chrom in ref_seqs: # job.addChildJobFn(sbbr_child, chrom, os.path.join(work_dir, chrom), sorted_bam) cmd_view = ['samtools', 'view', '-b', sorted_bam, chrom] cmd_sort = ['samtools', 'sort', '-m', '3000000000', '-n', '-', os.path.join(work_dir, chrom)] p1 = subprocess.Popen(cmd_view, stdout=subprocess.PIPE) subprocess.check_call(cmd_sort, stdin=p1.stdout) # depends on [control=['for'], data=['chrom']] sorted_files = [os.path.join(work_dir, chrom) + '.bam' for chrom in ref_seqs] cmd = ['samtools', 'cat', '-o', output] + sorted_files subprocess.check_call(cmd) # Write to FileStore ids['sort_by_ref.bam'] = job.fileStore.writeGlobalFile(output) rsem_id = job.addChildJobFn(transcriptome, job_vars, disk='30 G', memory='30 G').rv() exon_id = job.addChildJobFn(exon_count, job_vars, disk='30 G').rv() return (exon_id, rsem_id)
def init_group(self, group, chunk_size, compression=None, compression_opts=None): """Initializes a HDF5 group compliant with the stored data. This method creates the datasets 'items', 'labels', 'features' and 'index' and leaves them empty. :param h5py.Group group: The group to initializes. :param float chunk_size: The size of a chunk in the file (in MB). :param str compression: Optional compression, see :class:`h5features.writer` for details :param str compression: Optional compression options, see :class:`h5features.writer` for details """ create_index(group, chunk_size) self._entries['items'].create_dataset( group, chunk_size, compression=compression, compression_opts=compression_opts) self._entries['features'].create_dataset( group, chunk_size, compression=compression, compression_opts=compression_opts) # chunking the labels depends on features chunks self._entries['labels'].create_dataset( group, self._entries['features'].nb_per_chunk, compression=compression, compression_opts=compression_opts) if self.has_properties(): self._entries['properties'].create_dataset( group, compression=compression, compression_opts=compression_opts)
def function[init_group, parameter[self, group, chunk_size, compression, compression_opts]]: constant[Initializes a HDF5 group compliant with the stored data. This method creates the datasets 'items', 'labels', 'features' and 'index' and leaves them empty. :param h5py.Group group: The group to initializes. :param float chunk_size: The size of a chunk in the file (in MB). :param str compression: Optional compression, see :class:`h5features.writer` for details :param str compression: Optional compression options, see :class:`h5features.writer` for details ] call[name[create_index], parameter[name[group], name[chunk_size]]] call[call[name[self]._entries][constant[items]].create_dataset, parameter[name[group], name[chunk_size]]] call[call[name[self]._entries][constant[features]].create_dataset, parameter[name[group], name[chunk_size]]] call[call[name[self]._entries][constant[labels]].create_dataset, parameter[name[group], call[name[self]._entries][constant[features]].nb_per_chunk]] if call[name[self].has_properties, parameter[]] begin[:] call[call[name[self]._entries][constant[properties]].create_dataset, parameter[name[group]]]
keyword[def] identifier[init_group] ( identifier[self] , identifier[group] , identifier[chunk_size] , identifier[compression] = keyword[None] , identifier[compression_opts] = keyword[None] ): literal[string] identifier[create_index] ( identifier[group] , identifier[chunk_size] ) identifier[self] . identifier[_entries] [ literal[string] ]. identifier[create_dataset] ( identifier[group] , identifier[chunk_size] , identifier[compression] = identifier[compression] , identifier[compression_opts] = identifier[compression_opts] ) identifier[self] . identifier[_entries] [ literal[string] ]. identifier[create_dataset] ( identifier[group] , identifier[chunk_size] , identifier[compression] = identifier[compression] , identifier[compression_opts] = identifier[compression_opts] ) identifier[self] . identifier[_entries] [ literal[string] ]. identifier[create_dataset] ( identifier[group] , identifier[self] . identifier[_entries] [ literal[string] ]. identifier[nb_per_chunk] , identifier[compression] = identifier[compression] , identifier[compression_opts] = identifier[compression_opts] ) keyword[if] identifier[self] . identifier[has_properties] (): identifier[self] . identifier[_entries] [ literal[string] ]. identifier[create_dataset] ( identifier[group] , identifier[compression] = identifier[compression] , identifier[compression_opts] = identifier[compression_opts] )
def init_group(self, group, chunk_size, compression=None, compression_opts=None): """Initializes a HDF5 group compliant with the stored data. This method creates the datasets 'items', 'labels', 'features' and 'index' and leaves them empty. :param h5py.Group group: The group to initializes. :param float chunk_size: The size of a chunk in the file (in MB). :param str compression: Optional compression, see :class:`h5features.writer` for details :param str compression: Optional compression options, see :class:`h5features.writer` for details """ create_index(group, chunk_size) self._entries['items'].create_dataset(group, chunk_size, compression=compression, compression_opts=compression_opts) self._entries['features'].create_dataset(group, chunk_size, compression=compression, compression_opts=compression_opts) # chunking the labels depends on features chunks self._entries['labels'].create_dataset(group, self._entries['features'].nb_per_chunk, compression=compression, compression_opts=compression_opts) if self.has_properties(): self._entries['properties'].create_dataset(group, compression=compression, compression_opts=compression_opts) # depends on [control=['if'], data=[]]
def unhook(self, debug, pid): """ Removes the API hook from the given process and module. @warning: Do not call from an API hook callback. @type debug: L{Debug} @param debug: Debug object. @type pid: int @param pid: Process ID. """ try: hook = self.__hook[pid] except KeyError: return label = "%s!%s" % (self.__modName, self.__procName) hook.unhook(debug, pid, label) del self.__hook[pid]
def function[unhook, parameter[self, debug, pid]]: constant[ Removes the API hook from the given process and module. @warning: Do not call from an API hook callback. @type debug: L{Debug} @param debug: Debug object. @type pid: int @param pid: Process ID. ] <ast.Try object at 0x7da1b06fad10> variable[label] assign[=] binary_operation[constant[%s!%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b06fbe50>, <ast.Attribute object at 0x7da1b06fb520>]]] call[name[hook].unhook, parameter[name[debug], name[pid], name[label]]] <ast.Delete object at 0x7da1b06fb5b0>
keyword[def] identifier[unhook] ( identifier[self] , identifier[debug] , identifier[pid] ): literal[string] keyword[try] : identifier[hook] = identifier[self] . identifier[__hook] [ identifier[pid] ] keyword[except] identifier[KeyError] : keyword[return] identifier[label] = literal[string] %( identifier[self] . identifier[__modName] , identifier[self] . identifier[__procName] ) identifier[hook] . identifier[unhook] ( identifier[debug] , identifier[pid] , identifier[label] ) keyword[del] identifier[self] . identifier[__hook] [ identifier[pid] ]
def unhook(self, debug, pid): """ Removes the API hook from the given process and module. @warning: Do not call from an API hook callback. @type debug: L{Debug} @param debug: Debug object. @type pid: int @param pid: Process ID. """ try: hook = self.__hook[pid] # depends on [control=['try'], data=[]] except KeyError: return # depends on [control=['except'], data=[]] label = '%s!%s' % (self.__modName, self.__procName) hook.unhook(debug, pid, label) del self.__hook[pid]
def getParameterArrayCount(self, name, index): """Default implementation that return the length of the attribute. This default implementation goes hand in hand with :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameterArray`. If you override one of them in your subclass, you should probably override both of them. The implementation prevents accessing parameters names that start with ``_``. It may be better to enforce this convention at the node spec level. :param name: (string) name of requested parameter :param index: (int) index of node inside the region (if relevant) :raises: Exception if parameter starts with ``_``. """ if name.startswith('_'): raise Exception('Parameter name must not start with an underscore') return len(self.parameters[name])
def function[getParameterArrayCount, parameter[self, name, index]]: constant[Default implementation that return the length of the attribute. This default implementation goes hand in hand with :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameterArray`. If you override one of them in your subclass, you should probably override both of them. The implementation prevents accessing parameters names that start with ``_``. It may be better to enforce this convention at the node spec level. :param name: (string) name of requested parameter :param index: (int) index of node inside the region (if relevant) :raises: Exception if parameter starts with ``_``. ] if call[name[name].startswith, parameter[constant[_]]] begin[:] <ast.Raise object at 0x7da1b2316dd0> return[call[name[len], parameter[call[name[self].parameters][name[name]]]]]
keyword[def] identifier[getParameterArrayCount] ( identifier[self] , identifier[name] , identifier[index] ): literal[string] keyword[if] identifier[name] . identifier[startswith] ( literal[string] ): keyword[raise] identifier[Exception] ( literal[string] ) keyword[return] identifier[len] ( identifier[self] . identifier[parameters] [ identifier[name] ])
def getParameterArrayCount(self, name, index): """Default implementation that return the length of the attribute. This default implementation goes hand in hand with :meth:`~nupic.bindings.regions.PyRegion.PyRegion.getParameterArray`. If you override one of them in your subclass, you should probably override both of them. The implementation prevents accessing parameters names that start with ``_``. It may be better to enforce this convention at the node spec level. :param name: (string) name of requested parameter :param index: (int) index of node inside the region (if relevant) :raises: Exception if parameter starts with ``_``. """ if name.startswith('_'): raise Exception('Parameter name must not start with an underscore') # depends on [control=['if'], data=[]] return len(self.parameters[name])
def _from_json_list(cls, response_raw, wrapper=None): """ :type response_raw: client.BunqResponseRaw :type wrapper: str|None :rtype: client.BunqResponse[list[cls]] """ json = response_raw.body_bytes.decode() obj = converter.json_to_class(dict, json) array = obj[cls._FIELD_RESPONSE] array_deserialized = [] for item in array: item_unwrapped = item if wrapper is None else item[wrapper] item_deserialized = converter.deserialize(cls, item_unwrapped) array_deserialized.append(item_deserialized) pagination = converter.deserialize(client.Pagination, obj[cls._FIELD_PAGINATION]) return client.BunqResponse(array_deserialized, response_raw.headers, pagination)
def function[_from_json_list, parameter[cls, response_raw, wrapper]]: constant[ :type response_raw: client.BunqResponseRaw :type wrapper: str|None :rtype: client.BunqResponse[list[cls]] ] variable[json] assign[=] call[name[response_raw].body_bytes.decode, parameter[]] variable[obj] assign[=] call[name[converter].json_to_class, parameter[name[dict], name[json]]] variable[array] assign[=] call[name[obj]][name[cls]._FIELD_RESPONSE] variable[array_deserialized] assign[=] list[[]] for taget[name[item]] in starred[name[array]] begin[:] variable[item_unwrapped] assign[=] <ast.IfExp object at 0x7da1b080b9d0> variable[item_deserialized] assign[=] call[name[converter].deserialize, parameter[name[cls], name[item_unwrapped]]] call[name[array_deserialized].append, parameter[name[item_deserialized]]] variable[pagination] assign[=] call[name[converter].deserialize, parameter[name[client].Pagination, call[name[obj]][name[cls]._FIELD_PAGINATION]]] return[call[name[client].BunqResponse, parameter[name[array_deserialized], name[response_raw].headers, name[pagination]]]]
keyword[def] identifier[_from_json_list] ( identifier[cls] , identifier[response_raw] , identifier[wrapper] = keyword[None] ): literal[string] identifier[json] = identifier[response_raw] . identifier[body_bytes] . identifier[decode] () identifier[obj] = identifier[converter] . identifier[json_to_class] ( identifier[dict] , identifier[json] ) identifier[array] = identifier[obj] [ identifier[cls] . identifier[_FIELD_RESPONSE] ] identifier[array_deserialized] =[] keyword[for] identifier[item] keyword[in] identifier[array] : identifier[item_unwrapped] = identifier[item] keyword[if] identifier[wrapper] keyword[is] keyword[None] keyword[else] identifier[item] [ identifier[wrapper] ] identifier[item_deserialized] = identifier[converter] . identifier[deserialize] ( identifier[cls] , identifier[item_unwrapped] ) identifier[array_deserialized] . identifier[append] ( identifier[item_deserialized] ) identifier[pagination] = identifier[converter] . identifier[deserialize] ( identifier[client] . identifier[Pagination] , identifier[obj] [ identifier[cls] . identifier[_FIELD_PAGINATION] ]) keyword[return] identifier[client] . identifier[BunqResponse] ( identifier[array_deserialized] , identifier[response_raw] . identifier[headers] , identifier[pagination] )
def _from_json_list(cls, response_raw, wrapper=None): """ :type response_raw: client.BunqResponseRaw :type wrapper: str|None :rtype: client.BunqResponse[list[cls]] """ json = response_raw.body_bytes.decode() obj = converter.json_to_class(dict, json) array = obj[cls._FIELD_RESPONSE] array_deserialized = [] for item in array: item_unwrapped = item if wrapper is None else item[wrapper] item_deserialized = converter.deserialize(cls, item_unwrapped) array_deserialized.append(item_deserialized) # depends on [control=['for'], data=['item']] pagination = converter.deserialize(client.Pagination, obj[cls._FIELD_PAGINATION]) return client.BunqResponse(array_deserialized, response_raw.headers, pagination)
def _unpack(struct, bc, offset=0): """ returns the unpacked data tuple, and the next offset past the unpacked data """ return struct.unpack_from(bc, offset), offset + struct.size
def function[_unpack, parameter[struct, bc, offset]]: constant[ returns the unpacked data tuple, and the next offset past the unpacked data ] return[tuple[[<ast.Call object at 0x7da1b0e2e110>, <ast.BinOp object at 0x7da1b0b194e0>]]]
keyword[def] identifier[_unpack] ( identifier[struct] , identifier[bc] , identifier[offset] = literal[int] ): literal[string] keyword[return] identifier[struct] . identifier[unpack_from] ( identifier[bc] , identifier[offset] ), identifier[offset] + identifier[struct] . identifier[size]
def _unpack(struct, bc, offset=0): """ returns the unpacked data tuple, and the next offset past the unpacked data """ return (struct.unpack_from(bc, offset), offset + struct.size)
def clean_times(feed: "Feed") -> "Feed": """ In the given "Feed", convert H:MM:SS time strings to HH:MM:SS time strings to make sorting by time work as expected. Return the resulting "Feed". """ def reformat(t): if pd.isnull(t): return t t = t.strip() if len(t) == 7: t = "0" + t return t feed = feed.copy() tables_and_columns = [ ("stop_times", ["arrival_time", "departure_time"]), ("frequencies", ["start_time", "end_time"]), ] for table, columns in tables_and_columns: f = getattr(feed, table) if f is not None: f[columns] = f[columns].applymap(reformat) setattr(feed, table, f) return feed
def function[clean_times, parameter[feed]]: constant[ In the given "Feed", convert H:MM:SS time strings to HH:MM:SS time strings to make sorting by time work as expected. Return the resulting "Feed". ] def function[reformat, parameter[t]]: if call[name[pd].isnull, parameter[name[t]]] begin[:] return[name[t]] variable[t] assign[=] call[name[t].strip, parameter[]] if compare[call[name[len], parameter[name[t]]] equal[==] constant[7]] begin[:] variable[t] assign[=] binary_operation[constant[0] + name[t]] return[name[t]] variable[feed] assign[=] call[name[feed].copy, parameter[]] variable[tables_and_columns] assign[=] list[[<ast.Tuple object at 0x7da1b0b98160>, <ast.Tuple object at 0x7da1b0b98070>]] for taget[tuple[[<ast.Name object at 0x7da1b0b99d50>, <ast.Name object at 0x7da1b0b99d80>]]] in starred[name[tables_and_columns]] begin[:] variable[f] assign[=] call[name[getattr], parameter[name[feed], name[table]]] if compare[name[f] is_not constant[None]] begin[:] call[name[f]][name[columns]] assign[=] call[call[name[f]][name[columns]].applymap, parameter[name[reformat]]] call[name[setattr], parameter[name[feed], name[table], name[f]]] return[name[feed]]
keyword[def] identifier[clean_times] ( identifier[feed] : literal[string] )-> literal[string] : literal[string] keyword[def] identifier[reformat] ( identifier[t] ): keyword[if] identifier[pd] . identifier[isnull] ( identifier[t] ): keyword[return] identifier[t] identifier[t] = identifier[t] . identifier[strip] () keyword[if] identifier[len] ( identifier[t] )== literal[int] : identifier[t] = literal[string] + identifier[t] keyword[return] identifier[t] identifier[feed] = identifier[feed] . identifier[copy] () identifier[tables_and_columns] =[ ( literal[string] ,[ literal[string] , literal[string] ]), ( literal[string] ,[ literal[string] , literal[string] ]), ] keyword[for] identifier[table] , identifier[columns] keyword[in] identifier[tables_and_columns] : identifier[f] = identifier[getattr] ( identifier[feed] , identifier[table] ) keyword[if] identifier[f] keyword[is] keyword[not] keyword[None] : identifier[f] [ identifier[columns] ]= identifier[f] [ identifier[columns] ]. identifier[applymap] ( identifier[reformat] ) identifier[setattr] ( identifier[feed] , identifier[table] , identifier[f] ) keyword[return] identifier[feed]
def clean_times(feed: 'Feed') -> 'Feed': """ In the given "Feed", convert H:MM:SS time strings to HH:MM:SS time strings to make sorting by time work as expected. Return the resulting "Feed". """ def reformat(t): if pd.isnull(t): return t # depends on [control=['if'], data=[]] t = t.strip() if len(t) == 7: t = '0' + t # depends on [control=['if'], data=[]] return t feed = feed.copy() tables_and_columns = [('stop_times', ['arrival_time', 'departure_time']), ('frequencies', ['start_time', 'end_time'])] for (table, columns) in tables_and_columns: f = getattr(feed, table) if f is not None: f[columns] = f[columns].applymap(reformat) # depends on [control=['if'], data=['f']] setattr(feed, table, f) # depends on [control=['for'], data=[]] return feed
def establish_rabbitmq_connection(rabbitmq_uri): """ What it says on the tin. Input: - rabbitmq_uri: A RabbitMQ URI. Output: - connection: A RabbitMQ connection. """ userid, password, host, port, virtual_host, ssl = translate_rabbitmq_url(rabbitmq_uri) connection = Connection(userid=userid, password=password, host=host, port=port, virtual_host=virtual_host, ssl=False) return connection
def function[establish_rabbitmq_connection, parameter[rabbitmq_uri]]: constant[ What it says on the tin. Input: - rabbitmq_uri: A RabbitMQ URI. Output: - connection: A RabbitMQ connection. ] <ast.Tuple object at 0x7da2041d9b70> assign[=] call[name[translate_rabbitmq_url], parameter[name[rabbitmq_uri]]] variable[connection] assign[=] call[name[Connection], parameter[]] return[name[connection]]
keyword[def] identifier[establish_rabbitmq_connection] ( identifier[rabbitmq_uri] ): literal[string] identifier[userid] , identifier[password] , identifier[host] , identifier[port] , identifier[virtual_host] , identifier[ssl] = identifier[translate_rabbitmq_url] ( identifier[rabbitmq_uri] ) identifier[connection] = identifier[Connection] ( identifier[userid] = identifier[userid] , identifier[password] = identifier[password] , identifier[host] = identifier[host] , identifier[port] = identifier[port] , identifier[virtual_host] = identifier[virtual_host] , identifier[ssl] = keyword[False] ) keyword[return] identifier[connection]
def establish_rabbitmq_connection(rabbitmq_uri): """ What it says on the tin. Input: - rabbitmq_uri: A RabbitMQ URI. Output: - connection: A RabbitMQ connection. """ (userid, password, host, port, virtual_host, ssl) = translate_rabbitmq_url(rabbitmq_uri) connection = Connection(userid=userid, password=password, host=host, port=port, virtual_host=virtual_host, ssl=False) return connection
def get_command_templates(command_tokens, file_tokens=[], path_tokens=[], job_options=[]): """ Given a list of tokens from the grammar, return a list of commands. """ files = get_files(file_tokens) paths = get_paths(path_tokens) job_options = get_options(job_options) templates = _get_command_templates(command_tokens, files, paths, job_options) for command_template in templates: command_template._dependencies = _get_prelim_dependencies( command_template, templates) return templates
def function[get_command_templates, parameter[command_tokens, file_tokens, path_tokens, job_options]]: constant[ Given a list of tokens from the grammar, return a list of commands. ] variable[files] assign[=] call[name[get_files], parameter[name[file_tokens]]] variable[paths] assign[=] call[name[get_paths], parameter[name[path_tokens]]] variable[job_options] assign[=] call[name[get_options], parameter[name[job_options]]] variable[templates] assign[=] call[name[_get_command_templates], parameter[name[command_tokens], name[files], name[paths], name[job_options]]] for taget[name[command_template]] in starred[name[templates]] begin[:] name[command_template]._dependencies assign[=] call[name[_get_prelim_dependencies], parameter[name[command_template], name[templates]]] return[name[templates]]
keyword[def] identifier[get_command_templates] ( identifier[command_tokens] , identifier[file_tokens] =[], identifier[path_tokens] =[], identifier[job_options] =[]): literal[string] identifier[files] = identifier[get_files] ( identifier[file_tokens] ) identifier[paths] = identifier[get_paths] ( identifier[path_tokens] ) identifier[job_options] = identifier[get_options] ( identifier[job_options] ) identifier[templates] = identifier[_get_command_templates] ( identifier[command_tokens] , identifier[files] , identifier[paths] , identifier[job_options] ) keyword[for] identifier[command_template] keyword[in] identifier[templates] : identifier[command_template] . identifier[_dependencies] = identifier[_get_prelim_dependencies] ( identifier[command_template] , identifier[templates] ) keyword[return] identifier[templates]
def get_command_templates(command_tokens, file_tokens=[], path_tokens=[], job_options=[]): """ Given a list of tokens from the grammar, return a list of commands. """ files = get_files(file_tokens) paths = get_paths(path_tokens) job_options = get_options(job_options) templates = _get_command_templates(command_tokens, files, paths, job_options) for command_template in templates: command_template._dependencies = _get_prelim_dependencies(command_template, templates) # depends on [control=['for'], data=['command_template']] return templates
def post_parse(self): """Called after parsing is done""" if self.cache and not self.cache_loaded: self.cache_writer_cls(self.file_name, self.wavefront).write()
def function[post_parse, parameter[self]]: constant[Called after parsing is done] if <ast.BoolOp object at 0x7da204961e40> begin[:] call[call[name[self].cache_writer_cls, parameter[name[self].file_name, name[self].wavefront]].write, parameter[]]
keyword[def] identifier[post_parse] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[cache] keyword[and] keyword[not] identifier[self] . identifier[cache_loaded] : identifier[self] . identifier[cache_writer_cls] ( identifier[self] . identifier[file_name] , identifier[self] . identifier[wavefront] ). identifier[write] ()
def post_parse(self): """Called after parsing is done""" if self.cache and (not self.cache_loaded): self.cache_writer_cls(self.file_name, self.wavefront).write() # depends on [control=['if'], data=[]]
def pole(conic, plane): """ Calculates the pole of a polar plane for a given conic section. """ v = dot(N.linalg.inv(conic),plane) return v[:-1]/v[-1]
def function[pole, parameter[conic, plane]]: constant[ Calculates the pole of a polar plane for a given conic section. ] variable[v] assign[=] call[name[dot], parameter[call[name[N].linalg.inv, parameter[name[conic]]], name[plane]]] return[binary_operation[call[name[v]][<ast.Slice object at 0x7da1b190d090>] / call[name[v]][<ast.UnaryOp object at 0x7da1b190c8b0>]]]
keyword[def] identifier[pole] ( identifier[conic] , identifier[plane] ): literal[string] identifier[v] = identifier[dot] ( identifier[N] . identifier[linalg] . identifier[inv] ( identifier[conic] ), identifier[plane] ) keyword[return] identifier[v] [:- literal[int] ]/ identifier[v] [- literal[int] ]
def pole(conic, plane): """ Calculates the pole of a polar plane for a given conic section. """ v = dot(N.linalg.inv(conic), plane) return v[:-1] / v[-1]
def insert(self, data): """Insert a row into the .csv file. Parameters ---------- data : :obj:`dict` A dictionary mapping keys (header strings) to values. Returns ------- int The UID for the new row. Raises ------ Exception If the value for a given header is not of the appropriate type. """ row = {key:self._default_entry for key in self._headers} row['_uid'] = self._get_new_uid() for key, val in data.items(): if key in ('_uid', '_default'): logging.warn("Cannot manually set columns _uid or _default of a row! Given data: {0}".format(data)) continue if not isinstance(val, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]]): raise Exception('Data type mismatch for column {0}. Expected: {1}, got: {2}'.format(key, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]], type(val))) row[key] = val self._table.append(row) self._save() return row['_uid']
def function[insert, parameter[self, data]]: constant[Insert a row into the .csv file. Parameters ---------- data : :obj:`dict` A dictionary mapping keys (header strings) to values. Returns ------- int The UID for the new row. Raises ------ Exception If the value for a given header is not of the appropriate type. ] variable[row] assign[=] <ast.DictComp object at 0x7da1b1253a30> call[name[row]][constant[_uid]] assign[=] call[name[self]._get_new_uid, parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b1250c70>, <ast.Name object at 0x7da1b1252e30>]]] in starred[call[name[data].items, parameter[]]] begin[:] if compare[name[key] in tuple[[<ast.Constant object at 0x7da1b1250490>, <ast.Constant object at 0x7da1b1253c40>]]] begin[:] call[name[logging].warn, parameter[call[constant[Cannot manually set columns _uid or _default of a row! Given data: {0}].format, parameter[name[data]]]]] continue if <ast.UnaryOp object at 0x7da1b1252080> begin[:] <ast.Raise object at 0x7da1b1253310> call[name[row]][name[key]] assign[=] name[val] call[name[self]._table.append, parameter[name[row]]] call[name[self]._save, parameter[]] return[call[name[row]][constant[_uid]]]
keyword[def] identifier[insert] ( identifier[self] , identifier[data] ): literal[string] identifier[row] ={ identifier[key] : identifier[self] . identifier[_default_entry] keyword[for] identifier[key] keyword[in] identifier[self] . identifier[_headers] } identifier[row] [ literal[string] ]= identifier[self] . identifier[_get_new_uid] () keyword[for] identifier[key] , identifier[val] keyword[in] identifier[data] . identifier[items] (): keyword[if] identifier[key] keyword[in] ( literal[string] , literal[string] ): identifier[logging] . identifier[warn] ( literal[string] . identifier[format] ( identifier[data] )) keyword[continue] keyword[if] keyword[not] identifier[isinstance] ( identifier[val] , identifier[CSVModel] . identifier[_KNOWN_TYPES_MAP] [ identifier[self] . identifier[_headers_types] [ identifier[key] ]]): keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[key] , identifier[CSVModel] . identifier[_KNOWN_TYPES_MAP] [ identifier[self] . identifier[_headers_types] [ identifier[key] ]], identifier[type] ( identifier[val] ))) identifier[row] [ identifier[key] ]= identifier[val] identifier[self] . identifier[_table] . identifier[append] ( identifier[row] ) identifier[self] . identifier[_save] () keyword[return] identifier[row] [ literal[string] ]
def insert(self, data): """Insert a row into the .csv file. Parameters ---------- data : :obj:`dict` A dictionary mapping keys (header strings) to values. Returns ------- int The UID for the new row. Raises ------ Exception If the value for a given header is not of the appropriate type. """ row = {key: self._default_entry for key in self._headers} row['_uid'] = self._get_new_uid() for (key, val) in data.items(): if key in ('_uid', '_default'): logging.warn('Cannot manually set columns _uid or _default of a row! Given data: {0}'.format(data)) continue # depends on [control=['if'], data=[]] if not isinstance(val, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]]): raise Exception('Data type mismatch for column {0}. Expected: {1}, got: {2}'.format(key, CSVModel._KNOWN_TYPES_MAP[self._headers_types[key]], type(val))) # depends on [control=['if'], data=[]] row[key] = val # depends on [control=['for'], data=[]] self._table.append(row) self._save() return row['_uid']
def encode(self): """Encode the DAT packet based on instance variables, populating self.buffer, returning self.""" fmt = b"!HH%dsx" % len(self.errmsgs[self.errorcode]) log.debug("encoding ERR packet with fmt %s", fmt) self.buffer = struct.pack(fmt, self.opcode, self.errorcode, self.errmsgs[self.errorcode]) return self
def function[encode, parameter[self]]: constant[Encode the DAT packet based on instance variables, populating self.buffer, returning self.] variable[fmt] assign[=] binary_operation[constant[b'!HH%dsx'] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[call[name[self].errmsgs][name[self].errorcode]]]] call[name[log].debug, parameter[constant[encoding ERR packet with fmt %s], name[fmt]]] name[self].buffer assign[=] call[name[struct].pack, parameter[name[fmt], name[self].opcode, name[self].errorcode, call[name[self].errmsgs][name[self].errorcode]]] return[name[self]]
keyword[def] identifier[encode] ( identifier[self] ): literal[string] identifier[fmt] = literal[string] % identifier[len] ( identifier[self] . identifier[errmsgs] [ identifier[self] . identifier[errorcode] ]) identifier[log] . identifier[debug] ( literal[string] , identifier[fmt] ) identifier[self] . identifier[buffer] = identifier[struct] . identifier[pack] ( identifier[fmt] , identifier[self] . identifier[opcode] , identifier[self] . identifier[errorcode] , identifier[self] . identifier[errmsgs] [ identifier[self] . identifier[errorcode] ]) keyword[return] identifier[self]
def encode(self): """Encode the DAT packet based on instance variables, populating self.buffer, returning self.""" fmt = b'!HH%dsx' % len(self.errmsgs[self.errorcode]) log.debug('encoding ERR packet with fmt %s', fmt) self.buffer = struct.pack(fmt, self.opcode, self.errorcode, self.errmsgs[self.errorcode]) return self
def _zsh_comp_command(self, zcf, cmd, grouping, add_help=True): """Write zsh _arguments compdef for a given command. Args: zcf (file): zsh compdef file. cmd (str): command name, set to None or '' for bare command. grouping (bool): group options (zsh>=5.4). add_help (bool): add an help option. """ if add_help: if grouping: print("+ '(help)'", end=BLK, file=zcf) print("'--help[show help message]'", end=BLK, file=zcf) print("'-h[show help message]'", end=BLK, file=zcf) # could deal with duplicate by iterating in reverse and keep set of # already defined opts. no_comp = ('store_true', 'store_false') cmd_dict = self._opt_cmds[cmd] if cmd else self._opt_bare for opt, sct in cmd_dict.items(): meta = self._conf[sct].def_[opt] if meta.cmd_kwargs.get('action') == 'append': grpfmt, optfmt = "+ '{}'", "'*{}[{}]{}'" if meta.comprule is None: meta.comprule = '' else: grpfmt, optfmt = "+ '({})'", "'{}[{}]{}'" if meta.cmd_kwargs.get('action') in no_comp \ or meta.cmd_kwargs.get('nargs') == 0: meta.comprule = None if meta.comprule is None: compstr = '' elif meta.comprule == '': optfmt = optfmt.split('[') optfmt = optfmt[0] + '=[' + optfmt[1] compstr = ': :( )' else: optfmt = optfmt.split('[') optfmt = optfmt[0] + '=[' + optfmt[1] compstr = ': :{}'.format(meta.comprule) if grouping: print(grpfmt.format(opt), end=BLK, file=zcf) for name in _names(self._conf[sct], opt): print(optfmt.format(name, meta.help.replace("'", "'\"'\"'"), compstr), end=BLK, file=zcf)
def function[_zsh_comp_command, parameter[self, zcf, cmd, grouping, add_help]]: constant[Write zsh _arguments compdef for a given command. Args: zcf (file): zsh compdef file. cmd (str): command name, set to None or '' for bare command. grouping (bool): group options (zsh>=5.4). add_help (bool): add an help option. ] if name[add_help] begin[:] if name[grouping] begin[:] call[name[print], parameter[constant[+ '(help)']]] call[name[print], parameter[constant['--help[show help message]']]] call[name[print], parameter[constant['-h[show help message]']]] variable[no_comp] assign[=] tuple[[<ast.Constant object at 0x7da20cabe140>, <ast.Constant object at 0x7da20cabc700>]] variable[cmd_dict] assign[=] <ast.IfExp object at 0x7da20cabd6f0> for taget[tuple[[<ast.Name object at 0x7da20cabc2b0>, <ast.Name object at 0x7da20cabd6c0>]]] in starred[call[name[cmd_dict].items, parameter[]]] begin[:] variable[meta] assign[=] call[call[name[self]._conf][name[sct]].def_][name[opt]] if compare[call[name[meta].cmd_kwargs.get, parameter[constant[action]]] equal[==] constant[append]] begin[:] <ast.Tuple object at 0x7da20cabf130> assign[=] tuple[[<ast.Constant object at 0x7da20cabd750>, <ast.Constant object at 0x7da20cabfd90>]] if compare[name[meta].comprule is constant[None]] begin[:] name[meta].comprule assign[=] constant[] if <ast.BoolOp object at 0x7da2054a7100> begin[:] name[meta].comprule assign[=] constant[None] if compare[name[meta].comprule is constant[None]] begin[:] variable[compstr] assign[=] constant[] if name[grouping] begin[:] call[name[print], parameter[call[name[grpfmt].format, parameter[name[opt]]]]] for taget[name[name]] in starred[call[name[_names], parameter[call[name[self]._conf][name[sct]], name[opt]]]] begin[:] call[name[print], parameter[call[name[optfmt].format, parameter[name[name], call[name[meta].help.replace, parameter[constant['], constant['"'"']]], name[compstr]]]]]
keyword[def] identifier[_zsh_comp_command] ( identifier[self] , identifier[zcf] , identifier[cmd] , identifier[grouping] , identifier[add_help] = keyword[True] ): literal[string] keyword[if] identifier[add_help] : keyword[if] identifier[grouping] : identifier[print] ( literal[string] , identifier[end] = identifier[BLK] , identifier[file] = identifier[zcf] ) identifier[print] ( literal[string] , identifier[end] = identifier[BLK] , identifier[file] = identifier[zcf] ) identifier[print] ( literal[string] , identifier[end] = identifier[BLK] , identifier[file] = identifier[zcf] ) identifier[no_comp] =( literal[string] , literal[string] ) identifier[cmd_dict] = identifier[self] . identifier[_opt_cmds] [ identifier[cmd] ] keyword[if] identifier[cmd] keyword[else] identifier[self] . identifier[_opt_bare] keyword[for] identifier[opt] , identifier[sct] keyword[in] identifier[cmd_dict] . identifier[items] (): identifier[meta] = identifier[self] . identifier[_conf] [ identifier[sct] ]. identifier[def_] [ identifier[opt] ] keyword[if] identifier[meta] . identifier[cmd_kwargs] . identifier[get] ( literal[string] )== literal[string] : identifier[grpfmt] , identifier[optfmt] = literal[string] , literal[string] keyword[if] identifier[meta] . identifier[comprule] keyword[is] keyword[None] : identifier[meta] . identifier[comprule] = literal[string] keyword[else] : identifier[grpfmt] , identifier[optfmt] = literal[string] , literal[string] keyword[if] identifier[meta] . identifier[cmd_kwargs] . identifier[get] ( literal[string] ) keyword[in] identifier[no_comp] keyword[or] identifier[meta] . identifier[cmd_kwargs] . identifier[get] ( literal[string] )== literal[int] : identifier[meta] . identifier[comprule] = keyword[None] keyword[if] identifier[meta] . identifier[comprule] keyword[is] keyword[None] : identifier[compstr] = literal[string] keyword[elif] identifier[meta] . identifier[comprule] == literal[string] : identifier[optfmt] = identifier[optfmt] . identifier[split] ( literal[string] ) identifier[optfmt] = identifier[optfmt] [ literal[int] ]+ literal[string] + identifier[optfmt] [ literal[int] ] identifier[compstr] = literal[string] keyword[else] : identifier[optfmt] = identifier[optfmt] . identifier[split] ( literal[string] ) identifier[optfmt] = identifier[optfmt] [ literal[int] ]+ literal[string] + identifier[optfmt] [ literal[int] ] identifier[compstr] = literal[string] . identifier[format] ( identifier[meta] . identifier[comprule] ) keyword[if] identifier[grouping] : identifier[print] ( identifier[grpfmt] . identifier[format] ( identifier[opt] ), identifier[end] = identifier[BLK] , identifier[file] = identifier[zcf] ) keyword[for] identifier[name] keyword[in] identifier[_names] ( identifier[self] . identifier[_conf] [ identifier[sct] ], identifier[opt] ): identifier[print] ( identifier[optfmt] . identifier[format] ( identifier[name] , identifier[meta] . identifier[help] . identifier[replace] ( literal[string] , literal[string] ), identifier[compstr] ), identifier[end] = identifier[BLK] , identifier[file] = identifier[zcf] )
def _zsh_comp_command(self, zcf, cmd, grouping, add_help=True): """Write zsh _arguments compdef for a given command. Args: zcf (file): zsh compdef file. cmd (str): command name, set to None or '' for bare command. grouping (bool): group options (zsh>=5.4). add_help (bool): add an help option. """ if add_help: if grouping: print("+ '(help)'", end=BLK, file=zcf) # depends on [control=['if'], data=[]] print("'--help[show help message]'", end=BLK, file=zcf) print("'-h[show help message]'", end=BLK, file=zcf) # depends on [control=['if'], data=[]] # could deal with duplicate by iterating in reverse and keep set of # already defined opts. no_comp = ('store_true', 'store_false') cmd_dict = self._opt_cmds[cmd] if cmd else self._opt_bare for (opt, sct) in cmd_dict.items(): meta = self._conf[sct].def_[opt] if meta.cmd_kwargs.get('action') == 'append': (grpfmt, optfmt) = ("+ '{}'", "'*{}[{}]{}'") if meta.comprule is None: meta.comprule = '' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: (grpfmt, optfmt) = ("+ '({})'", "'{}[{}]{}'") if meta.cmd_kwargs.get('action') in no_comp or meta.cmd_kwargs.get('nargs') == 0: meta.comprule = None # depends on [control=['if'], data=[]] if meta.comprule is None: compstr = '' # depends on [control=['if'], data=[]] elif meta.comprule == '': optfmt = optfmt.split('[') optfmt = optfmt[0] + '=[' + optfmt[1] compstr = ': :( )' # depends on [control=['if'], data=[]] else: optfmt = optfmt.split('[') optfmt = optfmt[0] + '=[' + optfmt[1] compstr = ': :{}'.format(meta.comprule) if grouping: print(grpfmt.format(opt), end=BLK, file=zcf) # depends on [control=['if'], data=[]] for name in _names(self._conf[sct], opt): print(optfmt.format(name, meta.help.replace("'", '\'"\'"\''), compstr), end=BLK, file=zcf) # depends on [control=['for'], data=['name']] # depends on [control=['for'], data=[]]
def check_ttl_max_tries(tries, enqueued_at, max_tries, ttl): '''Check that the ttl for an item has not expired, and that the item has not exceeded it's maximum allotted tries''' if max_tries > 0 and tries >= max_tries: raise FSQMaxTriesError(errno.EINTR, u'Max tries exceded:'\ u' {0} ({1})'.format(max_tries, tries)) if ttl > 0 and datetime.datetime.now() < enqueued_at + datetime.timedelta( seconds=ttl): raise FSQTTLExpiredError(errno.EINTR, u'TTL Expired:'\ u' {0}'.format(ttl))
def function[check_ttl_max_tries, parameter[tries, enqueued_at, max_tries, ttl]]: constant[Check that the ttl for an item has not expired, and that the item has not exceeded it's maximum allotted tries] if <ast.BoolOp object at 0x7da18dc9a8c0> begin[:] <ast.Raise object at 0x7da18dc9ae90> if <ast.BoolOp object at 0x7da18dc980d0> begin[:] <ast.Raise object at 0x7da18dc9b340>
keyword[def] identifier[check_ttl_max_tries] ( identifier[tries] , identifier[enqueued_at] , identifier[max_tries] , identifier[ttl] ): literal[string] keyword[if] identifier[max_tries] > literal[int] keyword[and] identifier[tries] >= identifier[max_tries] : keyword[raise] identifier[FSQMaxTriesError] ( identifier[errno] . identifier[EINTR] , literal[string] literal[string] . identifier[format] ( identifier[max_tries] , identifier[tries] )) keyword[if] identifier[ttl] > literal[int] keyword[and] identifier[datetime] . identifier[datetime] . identifier[now] ()< identifier[enqueued_at] + identifier[datetime] . identifier[timedelta] ( identifier[seconds] = identifier[ttl] ): keyword[raise] identifier[FSQTTLExpiredError] ( identifier[errno] . identifier[EINTR] , literal[string] literal[string] . identifier[format] ( identifier[ttl] ))
def check_ttl_max_tries(tries, enqueued_at, max_tries, ttl): """Check that the ttl for an item has not expired, and that the item has not exceeded it's maximum allotted tries""" if max_tries > 0 and tries >= max_tries: raise FSQMaxTriesError(errno.EINTR, u'Max tries exceded: {0} ({1})'.format(max_tries, tries)) # depends on [control=['if'], data=[]] if ttl > 0 and datetime.datetime.now() < enqueued_at + datetime.timedelta(seconds=ttl): raise FSQTTLExpiredError(errno.EINTR, u'TTL Expired: {0}'.format(ttl)) # depends on [control=['if'], data=[]]
def parse_xml_node(self, node): '''Parse an xml.dom Node object representing a configuration data into this object. ''' self.name = node.getAttributeNS(RTS_NS, 'name') if node.hasAttributeNS(RTS_NS, 'data'): self.data = node.getAttributeNS(RTS_NS, 'data') else: self.data = '' return self
def function[parse_xml_node, parameter[self, node]]: constant[Parse an xml.dom Node object representing a configuration data into this object. ] name[self].name assign[=] call[name[node].getAttributeNS, parameter[name[RTS_NS], constant[name]]] if call[name[node].hasAttributeNS, parameter[name[RTS_NS], constant[data]]] begin[:] name[self].data assign[=] call[name[node].getAttributeNS, parameter[name[RTS_NS], constant[data]]] return[name[self]]
keyword[def] identifier[parse_xml_node] ( identifier[self] , identifier[node] ): literal[string] identifier[self] . identifier[name] = identifier[node] . identifier[getAttributeNS] ( identifier[RTS_NS] , literal[string] ) keyword[if] identifier[node] . identifier[hasAttributeNS] ( identifier[RTS_NS] , literal[string] ): identifier[self] . identifier[data] = identifier[node] . identifier[getAttributeNS] ( identifier[RTS_NS] , literal[string] ) keyword[else] : identifier[self] . identifier[data] = literal[string] keyword[return] identifier[self]
def parse_xml_node(self, node): """Parse an xml.dom Node object representing a configuration data into this object. """ self.name = node.getAttributeNS(RTS_NS, 'name') if node.hasAttributeNS(RTS_NS, 'data'): self.data = node.getAttributeNS(RTS_NS, 'data') # depends on [control=['if'], data=[]] else: self.data = '' return self
def contains(bank, key): ''' Checks if the specified bank contains the specified key. ''' _init_client() query = "SELECT COUNT(data) FROM {0} WHERE bank='{1}' " \ "AND etcd_key='{2}'".format(_table_name, bank, key) cur, _ = run_query(client, query) r = cur.fetchone() cur.close() return r[0] == 1
def function[contains, parameter[bank, key]]: constant[ Checks if the specified bank contains the specified key. ] call[name[_init_client], parameter[]] variable[query] assign[=] call[constant[SELECT COUNT(data) FROM {0} WHERE bank='{1}' AND etcd_key='{2}'].format, parameter[name[_table_name], name[bank], name[key]]] <ast.Tuple object at 0x7da18dc040a0> assign[=] call[name[run_query], parameter[name[client], name[query]]] variable[r] assign[=] call[name[cur].fetchone, parameter[]] call[name[cur].close, parameter[]] return[compare[call[name[r]][constant[0]] equal[==] constant[1]]]
keyword[def] identifier[contains] ( identifier[bank] , identifier[key] ): literal[string] identifier[_init_client] () identifier[query] = literal[string] literal[string] . identifier[format] ( identifier[_table_name] , identifier[bank] , identifier[key] ) identifier[cur] , identifier[_] = identifier[run_query] ( identifier[client] , identifier[query] ) identifier[r] = identifier[cur] . identifier[fetchone] () identifier[cur] . identifier[close] () keyword[return] identifier[r] [ literal[int] ]== literal[int]
def contains(bank, key): """ Checks if the specified bank contains the specified key. """ _init_client() query = "SELECT COUNT(data) FROM {0} WHERE bank='{1}' AND etcd_key='{2}'".format(_table_name, bank, key) (cur, _) = run_query(client, query) r = cur.fetchone() cur.close() return r[0] == 1
def pipe_strtransform(context=None, _INPUT=None, conf=None, **kwargs): """A string module that splits a string into tokens delimited by separators. Loopable. Parameters ---------- context : pipe2py.Context object _INPUT : iterable of items or strings conf : {'transformation': {value': <'swapcase'>}} Returns ------- _OUTPUT : generator of tokenized strings """ splits = get_splits(_INPUT, conf, **cdicts(opts, kwargs)) parsed = utils.dispatch(splits, *get_dispatch_funcs()) _OUTPUT = starmap(parse_result, parsed) return _OUTPUT
def function[pipe_strtransform, parameter[context, _INPUT, conf]]: constant[A string module that splits a string into tokens delimited by separators. Loopable. Parameters ---------- context : pipe2py.Context object _INPUT : iterable of items or strings conf : {'transformation': {value': <'swapcase'>}} Returns ------- _OUTPUT : generator of tokenized strings ] variable[splits] assign[=] call[name[get_splits], parameter[name[_INPUT], name[conf]]] variable[parsed] assign[=] call[name[utils].dispatch, parameter[name[splits], <ast.Starred object at 0x7da1b045c520>]] variable[_OUTPUT] assign[=] call[name[starmap], parameter[name[parse_result], name[parsed]]] return[name[_OUTPUT]]
keyword[def] identifier[pipe_strtransform] ( identifier[context] = keyword[None] , identifier[_INPUT] = keyword[None] , identifier[conf] = keyword[None] ,** identifier[kwargs] ): literal[string] identifier[splits] = identifier[get_splits] ( identifier[_INPUT] , identifier[conf] ,** identifier[cdicts] ( identifier[opts] , identifier[kwargs] )) identifier[parsed] = identifier[utils] . identifier[dispatch] ( identifier[splits] ,* identifier[get_dispatch_funcs] ()) identifier[_OUTPUT] = identifier[starmap] ( identifier[parse_result] , identifier[parsed] ) keyword[return] identifier[_OUTPUT]
def pipe_strtransform(context=None, _INPUT=None, conf=None, **kwargs): """A string module that splits a string into tokens delimited by separators. Loopable. Parameters ---------- context : pipe2py.Context object _INPUT : iterable of items or strings conf : {'transformation': {value': <'swapcase'>}} Returns ------- _OUTPUT : generator of tokenized strings """ splits = get_splits(_INPUT, conf, **cdicts(opts, kwargs)) parsed = utils.dispatch(splits, *get_dispatch_funcs()) _OUTPUT = starmap(parse_result, parsed) return _OUTPUT
def save_data_files(bs, prefix=None, directory=None): """Write the phonon band structure data files to disk. Args: bs (:obj:`~pymatgen.phonon.bandstructure.PhononBandStructureSymmLine`): The phonon band structure. prefix (:obj:`str`, optional): Prefix for data file. directory (:obj:`str`, optional): Directory in which to save the data. Returns: str: The filename of the written data file. """ filename = 'phonon_band.dat' filename = '{}_phonon_band.dat'.format(prefix) if prefix else filename directory = directory if directory else '.' filename = os.path.join(directory, filename) with open(filename, 'w') as f: header = '#k-distance frequency[THz]\n' f.write(header) for band in bs.bands: for d, e in zip(bs.distance, band): f.write('{:.8f} {:.8f}\n'.format(d, e)) f.write('\n') return filename
def function[save_data_files, parameter[bs, prefix, directory]]: constant[Write the phonon band structure data files to disk. Args: bs (:obj:`~pymatgen.phonon.bandstructure.PhononBandStructureSymmLine`): The phonon band structure. prefix (:obj:`str`, optional): Prefix for data file. directory (:obj:`str`, optional): Directory in which to save the data. Returns: str: The filename of the written data file. ] variable[filename] assign[=] constant[phonon_band.dat] variable[filename] assign[=] <ast.IfExp object at 0x7da18eb55180> variable[directory] assign[=] <ast.IfExp object at 0x7da18eb56aa0> variable[filename] assign[=] call[name[os].path.join, parameter[name[directory], name[filename]]] with call[name[open], parameter[name[filename], constant[w]]] begin[:] variable[header] assign[=] constant[#k-distance frequency[THz] ] call[name[f].write, parameter[name[header]]] for taget[name[band]] in starred[name[bs].bands] begin[:] for taget[tuple[[<ast.Name object at 0x7da18eb54ca0>, <ast.Name object at 0x7da18eb56830>]]] in starred[call[name[zip], parameter[name[bs].distance, name[band]]]] begin[:] call[name[f].write, parameter[call[constant[{:.8f} {:.8f} ].format, parameter[name[d], name[e]]]]] call[name[f].write, parameter[constant[ ]]] return[name[filename]]
keyword[def] identifier[save_data_files] ( identifier[bs] , identifier[prefix] = keyword[None] , identifier[directory] = keyword[None] ): literal[string] identifier[filename] = literal[string] identifier[filename] = literal[string] . identifier[format] ( identifier[prefix] ) keyword[if] identifier[prefix] keyword[else] identifier[filename] identifier[directory] = identifier[directory] keyword[if] identifier[directory] keyword[else] literal[string] identifier[filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , identifier[filename] ) keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] : identifier[header] = literal[string] identifier[f] . identifier[write] ( identifier[header] ) keyword[for] identifier[band] keyword[in] identifier[bs] . identifier[bands] : keyword[for] identifier[d] , identifier[e] keyword[in] identifier[zip] ( identifier[bs] . identifier[distance] , identifier[band] ): identifier[f] . identifier[write] ( literal[string] . identifier[format] ( identifier[d] , identifier[e] )) identifier[f] . identifier[write] ( literal[string] ) keyword[return] identifier[filename]
def save_data_files(bs, prefix=None, directory=None): """Write the phonon band structure data files to disk. Args: bs (:obj:`~pymatgen.phonon.bandstructure.PhononBandStructureSymmLine`): The phonon band structure. prefix (:obj:`str`, optional): Prefix for data file. directory (:obj:`str`, optional): Directory in which to save the data. Returns: str: The filename of the written data file. """ filename = 'phonon_band.dat' filename = '{}_phonon_band.dat'.format(prefix) if prefix else filename directory = directory if directory else '.' filename = os.path.join(directory, filename) with open(filename, 'w') as f: header = '#k-distance frequency[THz]\n' f.write(header) for band in bs.bands: for (d, e) in zip(bs.distance, band): f.write('{:.8f} {:.8f}\n'.format(d, e)) # depends on [control=['for'], data=[]] f.write('\n') # depends on [control=['for'], data=['band']] # depends on [control=['with'], data=['f']] return filename
def calculate_P(self, T, P, method): r'''Method to calculate pressure-dependent gas molar volume at temperature `T` and pressure `P` with a given method. This method has no exception handling; see `TP_dependent_property` for that. Parameters ---------- T : float Temperature at which to calculate molar volume, [K] P : float Pressure at which to calculate molar volume, [K] method : str Name of the method to use Returns ------- Vm : float Molar volume of the gas at T and P, [m^3/mol] ''' if method == EOS: self.eos[0] = self.eos[0].to_TP(T=T, P=P) Vm = self.eos[0].V_g elif method == TSONOPOULOS_EXTENDED: B = BVirial_Tsonopoulos_extended(T, self.Tc, self.Pc, self.omega, dipole=self.dipole) Vm = ideal_gas(T, P) + B elif method == TSONOPOULOS: B = BVirial_Tsonopoulos(T, self.Tc, self.Pc, self.omega) Vm = ideal_gas(T, P) + B elif method == ABBOTT: B = BVirial_Abbott(T, self.Tc, self.Pc, self.omega) Vm = ideal_gas(T, P) + B elif method == PITZER_CURL: B = BVirial_Pitzer_Curl(T, self.Tc, self.Pc, self.omega) Vm = ideal_gas(T, P) + B elif method == CRC_VIRIAL: a1, a2, a3, a4, a5 = self.CRC_VIRIAL_coeffs t = 298.15/T - 1. B = (a1 + a2*t + a3*t**2 + a4*t**3 + a5*t**4)/1E6 Vm = ideal_gas(T, P) + B elif method == IDEAL: Vm = ideal_gas(T, P) elif method == COOLPROP: Vm = 1./PropsSI('DMOLAR', 'T', T, 'P', P, self.CASRN) elif method in self.tabular_data: Vm = self.interpolate_P(T, P, method) return Vm
def function[calculate_P, parameter[self, T, P, method]]: constant[Method to calculate pressure-dependent gas molar volume at temperature `T` and pressure `P` with a given method. This method has no exception handling; see `TP_dependent_property` for that. Parameters ---------- T : float Temperature at which to calculate molar volume, [K] P : float Pressure at which to calculate molar volume, [K] method : str Name of the method to use Returns ------- Vm : float Molar volume of the gas at T and P, [m^3/mol] ] if compare[name[method] equal[==] name[EOS]] begin[:] call[name[self].eos][constant[0]] assign[=] call[call[name[self].eos][constant[0]].to_TP, parameter[]] variable[Vm] assign[=] call[name[self].eos][constant[0]].V_g return[name[Vm]]
keyword[def] identifier[calculate_P] ( identifier[self] , identifier[T] , identifier[P] , identifier[method] ): literal[string] keyword[if] identifier[method] == identifier[EOS] : identifier[self] . identifier[eos] [ literal[int] ]= identifier[self] . identifier[eos] [ literal[int] ]. identifier[to_TP] ( identifier[T] = identifier[T] , identifier[P] = identifier[P] ) identifier[Vm] = identifier[self] . identifier[eos] [ literal[int] ]. identifier[V_g] keyword[elif] identifier[method] == identifier[TSONOPOULOS_EXTENDED] : identifier[B] = identifier[BVirial_Tsonopoulos_extended] ( identifier[T] , identifier[self] . identifier[Tc] , identifier[self] . identifier[Pc] , identifier[self] . identifier[omega] , identifier[dipole] = identifier[self] . identifier[dipole] ) identifier[Vm] = identifier[ideal_gas] ( identifier[T] , identifier[P] )+ identifier[B] keyword[elif] identifier[method] == identifier[TSONOPOULOS] : identifier[B] = identifier[BVirial_Tsonopoulos] ( identifier[T] , identifier[self] . identifier[Tc] , identifier[self] . identifier[Pc] , identifier[self] . identifier[omega] ) identifier[Vm] = identifier[ideal_gas] ( identifier[T] , identifier[P] )+ identifier[B] keyword[elif] identifier[method] == identifier[ABBOTT] : identifier[B] = identifier[BVirial_Abbott] ( identifier[T] , identifier[self] . identifier[Tc] , identifier[self] . identifier[Pc] , identifier[self] . identifier[omega] ) identifier[Vm] = identifier[ideal_gas] ( identifier[T] , identifier[P] )+ identifier[B] keyword[elif] identifier[method] == identifier[PITZER_CURL] : identifier[B] = identifier[BVirial_Pitzer_Curl] ( identifier[T] , identifier[self] . identifier[Tc] , identifier[self] . identifier[Pc] , identifier[self] . identifier[omega] ) identifier[Vm] = identifier[ideal_gas] ( identifier[T] , identifier[P] )+ identifier[B] keyword[elif] identifier[method] == identifier[CRC_VIRIAL] : identifier[a1] , identifier[a2] , identifier[a3] , identifier[a4] , identifier[a5] = identifier[self] . identifier[CRC_VIRIAL_coeffs] identifier[t] = literal[int] / identifier[T] - literal[int] identifier[B] =( identifier[a1] + identifier[a2] * identifier[t] + identifier[a3] * identifier[t] ** literal[int] + identifier[a4] * identifier[t] ** literal[int] + identifier[a5] * identifier[t] ** literal[int] )/ literal[int] identifier[Vm] = identifier[ideal_gas] ( identifier[T] , identifier[P] )+ identifier[B] keyword[elif] identifier[method] == identifier[IDEAL] : identifier[Vm] = identifier[ideal_gas] ( identifier[T] , identifier[P] ) keyword[elif] identifier[method] == identifier[COOLPROP] : identifier[Vm] = literal[int] / identifier[PropsSI] ( literal[string] , literal[string] , identifier[T] , literal[string] , identifier[P] , identifier[self] . identifier[CASRN] ) keyword[elif] identifier[method] keyword[in] identifier[self] . identifier[tabular_data] : identifier[Vm] = identifier[self] . identifier[interpolate_P] ( identifier[T] , identifier[P] , identifier[method] ) keyword[return] identifier[Vm]
def calculate_P(self, T, P, method): """Method to calculate pressure-dependent gas molar volume at temperature `T` and pressure `P` with a given method. This method has no exception handling; see `TP_dependent_property` for that. Parameters ---------- T : float Temperature at which to calculate molar volume, [K] P : float Pressure at which to calculate molar volume, [K] method : str Name of the method to use Returns ------- Vm : float Molar volume of the gas at T and P, [m^3/mol] """ if method == EOS: self.eos[0] = self.eos[0].to_TP(T=T, P=P) Vm = self.eos[0].V_g # depends on [control=['if'], data=[]] elif method == TSONOPOULOS_EXTENDED: B = BVirial_Tsonopoulos_extended(T, self.Tc, self.Pc, self.omega, dipole=self.dipole) Vm = ideal_gas(T, P) + B # depends on [control=['if'], data=[]] elif method == TSONOPOULOS: B = BVirial_Tsonopoulos(T, self.Tc, self.Pc, self.omega) Vm = ideal_gas(T, P) + B # depends on [control=['if'], data=[]] elif method == ABBOTT: B = BVirial_Abbott(T, self.Tc, self.Pc, self.omega) Vm = ideal_gas(T, P) + B # depends on [control=['if'], data=[]] elif method == PITZER_CURL: B = BVirial_Pitzer_Curl(T, self.Tc, self.Pc, self.omega) Vm = ideal_gas(T, P) + B # depends on [control=['if'], data=[]] elif method == CRC_VIRIAL: (a1, a2, a3, a4, a5) = self.CRC_VIRIAL_coeffs t = 298.15 / T - 1.0 B = (a1 + a2 * t + a3 * t ** 2 + a4 * t ** 3 + a5 * t ** 4) / 1000000.0 Vm = ideal_gas(T, P) + B # depends on [control=['if'], data=[]] elif method == IDEAL: Vm = ideal_gas(T, P) # depends on [control=['if'], data=[]] elif method == COOLPROP: Vm = 1.0 / PropsSI('DMOLAR', 'T', T, 'P', P, self.CASRN) # depends on [control=['if'], data=[]] elif method in self.tabular_data: Vm = self.interpolate_P(T, P, method) # depends on [control=['if'], data=['method']] return Vm
def get_branches(self, repo_slug=None): """ Get a single repository on Bitbucket and return its branches.""" repo_slug = repo_slug or self.repo_slug or '' url = self.url('GET_BRANCHES', username=self.username, repo_slug=repo_slug) return self.dispatch('GET', url, auth=self.auth)
def function[get_branches, parameter[self, repo_slug]]: constant[ Get a single repository on Bitbucket and return its branches.] variable[repo_slug] assign[=] <ast.BoolOp object at 0x7da1b1a47400> variable[url] assign[=] call[name[self].url, parameter[constant[GET_BRANCHES]]] return[call[name[self].dispatch, parameter[constant[GET], name[url]]]]
keyword[def] identifier[get_branches] ( identifier[self] , identifier[repo_slug] = keyword[None] ): literal[string] identifier[repo_slug] = identifier[repo_slug] keyword[or] identifier[self] . identifier[repo_slug] keyword[or] literal[string] identifier[url] = identifier[self] . identifier[url] ( literal[string] , identifier[username] = identifier[self] . identifier[username] , identifier[repo_slug] = identifier[repo_slug] ) keyword[return] identifier[self] . identifier[dispatch] ( literal[string] , identifier[url] , identifier[auth] = identifier[self] . identifier[auth] )
def get_branches(self, repo_slug=None): """ Get a single repository on Bitbucket and return its branches.""" repo_slug = repo_slug or self.repo_slug or '' url = self.url('GET_BRANCHES', username=self.username, repo_slug=repo_slug) return self.dispatch('GET', url, auth=self.auth)
def set_location(request): """ Redirect to a given url while setting the chosen location in the cookie. The url and the location_id need to be specified in the request parameters. Since this view changes how the user will see the rest of the site, it must only be accessed as a POST request. If called as a GET request, it will redirect to the page in the request (the 'next' parameter) without changing any state. """ next = request.GET.get('next', None) or request.POST.get('next', None) if not next: next = request.META.get('HTTP_REFERER', None) if not next: next = '/' response = http.HttpResponseRedirect(next) if request.method == 'POST': location_id = request.POST.get('location_id', None) or request.POST.get('location', None) if location_id: try: location = get_class(settings.GEOIP_LOCATION_MODEL).objects.get(pk=location_id) storage_class(request=request, response=response).set(location=location, force=True) except (ValueError, ObjectDoesNotExist): pass return response
def function[set_location, parameter[request]]: constant[ Redirect to a given url while setting the chosen location in the cookie. The url and the location_id need to be specified in the request parameters. Since this view changes how the user will see the rest of the site, it must only be accessed as a POST request. If called as a GET request, it will redirect to the page in the request (the 'next' parameter) without changing any state. ] variable[next] assign[=] <ast.BoolOp object at 0x7da18f58ebf0> if <ast.UnaryOp object at 0x7da18f58c580> begin[:] variable[next] assign[=] call[name[request].META.get, parameter[constant[HTTP_REFERER], constant[None]]] if <ast.UnaryOp object at 0x7da18f58e050> begin[:] variable[next] assign[=] constant[/] variable[response] assign[=] call[name[http].HttpResponseRedirect, parameter[name[next]]] if compare[name[request].method equal[==] constant[POST]] begin[:] variable[location_id] assign[=] <ast.BoolOp object at 0x7da18f58eec0> if name[location_id] begin[:] <ast.Try object at 0x7da18f58e590> return[name[response]]
keyword[def] identifier[set_location] ( identifier[request] ): literal[string] identifier[next] = identifier[request] . identifier[GET] . identifier[get] ( literal[string] , keyword[None] ) keyword[or] identifier[request] . identifier[POST] . identifier[get] ( literal[string] , keyword[None] ) keyword[if] keyword[not] identifier[next] : identifier[next] = identifier[request] . identifier[META] . identifier[get] ( literal[string] , keyword[None] ) keyword[if] keyword[not] identifier[next] : identifier[next] = literal[string] identifier[response] = identifier[http] . identifier[HttpResponseRedirect] ( identifier[next] ) keyword[if] identifier[request] . identifier[method] == literal[string] : identifier[location_id] = identifier[request] . identifier[POST] . identifier[get] ( literal[string] , keyword[None] ) keyword[or] identifier[request] . identifier[POST] . identifier[get] ( literal[string] , keyword[None] ) keyword[if] identifier[location_id] : keyword[try] : identifier[location] = identifier[get_class] ( identifier[settings] . identifier[GEOIP_LOCATION_MODEL] ). identifier[objects] . identifier[get] ( identifier[pk] = identifier[location_id] ) identifier[storage_class] ( identifier[request] = identifier[request] , identifier[response] = identifier[response] ). identifier[set] ( identifier[location] = identifier[location] , identifier[force] = keyword[True] ) keyword[except] ( identifier[ValueError] , identifier[ObjectDoesNotExist] ): keyword[pass] keyword[return] identifier[response]
def set_location(request): """ Redirect to a given url while setting the chosen location in the cookie. The url and the location_id need to be specified in the request parameters. Since this view changes how the user will see the rest of the site, it must only be accessed as a POST request. If called as a GET request, it will redirect to the page in the request (the 'next' parameter) without changing any state. """ next = request.GET.get('next', None) or request.POST.get('next', None) if not next: next = request.META.get('HTTP_REFERER', None) # depends on [control=['if'], data=[]] if not next: next = '/' # depends on [control=['if'], data=[]] response = http.HttpResponseRedirect(next) if request.method == 'POST': location_id = request.POST.get('location_id', None) or request.POST.get('location', None) if location_id: try: location = get_class(settings.GEOIP_LOCATION_MODEL).objects.get(pk=location_id) storage_class(request=request, response=response).set(location=location, force=True) # depends on [control=['try'], data=[]] except (ValueError, ObjectDoesNotExist): pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return response
def translate(compound, pos): """Translate a compound by a vector. Parameters ---------- compound : mb.Compound The compound being translated. pos : np.ndarray, shape=(3,), dtype=float The vector to translate the compound by. """ atom_positions = compound.xyz_with_ports atom_positions = Translation(pos).apply_to(atom_positions) compound.xyz_with_ports = atom_positions
def function[translate, parameter[compound, pos]]: constant[Translate a compound by a vector. Parameters ---------- compound : mb.Compound The compound being translated. pos : np.ndarray, shape=(3,), dtype=float The vector to translate the compound by. ] variable[atom_positions] assign[=] name[compound].xyz_with_ports variable[atom_positions] assign[=] call[call[name[Translation], parameter[name[pos]]].apply_to, parameter[name[atom_positions]]] name[compound].xyz_with_ports assign[=] name[atom_positions]
keyword[def] identifier[translate] ( identifier[compound] , identifier[pos] ): literal[string] identifier[atom_positions] = identifier[compound] . identifier[xyz_with_ports] identifier[atom_positions] = identifier[Translation] ( identifier[pos] ). identifier[apply_to] ( identifier[atom_positions] ) identifier[compound] . identifier[xyz_with_ports] = identifier[atom_positions]
def translate(compound, pos): """Translate a compound by a vector. Parameters ---------- compound : mb.Compound The compound being translated. pos : np.ndarray, shape=(3,), dtype=float The vector to translate the compound by. """ atom_positions = compound.xyz_with_ports atom_positions = Translation(pos).apply_to(atom_positions) compound.xyz_with_ports = atom_positions
def plot_summary_axes(graph: BELGraph, lax, rax, logx=True): """Plots your graph summary statistics on the given axes. After, you should run :func:`plt.tight_layout` and you must run :func:`plt.show` to view. Shows: 1. Count of nodes, grouped by function type 2. Count of edges, grouped by relation type :param pybel.BELGraph graph: A BEL graph :param lax: An axis object from matplotlib :param rax: An axis object from matplotlib Example usage: >>> import matplotlib.pyplot as plt >>> from pybel import from_pickle >>> from pybel_tools.summary import plot_summary_axes >>> graph = from_pickle('~/dev/bms/aetionomy/parkinsons.gpickle') >>> fig, axes = plt.subplots(1, 2, figsize=(10, 4)) >>> plot_summary_axes(graph, axes[0], axes[1]) >>> plt.tight_layout() >>> plt.show() """ ntc = count_functions(graph) etc = count_relations(graph) df = pd.DataFrame.from_dict(dict(ntc), orient='index') df_ec = pd.DataFrame.from_dict(dict(etc), orient='index') df.sort_values(0, ascending=True).plot(kind='barh', logx=logx, ax=lax) lax.set_title('Number of nodes: {}'.format(graph.number_of_nodes())) df_ec.sort_values(0, ascending=True).plot(kind='barh', logx=logx, ax=rax) rax.set_title('Number of edges: {}'.format(graph.number_of_edges()))
def function[plot_summary_axes, parameter[graph, lax, rax, logx]]: constant[Plots your graph summary statistics on the given axes. After, you should run :func:`plt.tight_layout` and you must run :func:`plt.show` to view. Shows: 1. Count of nodes, grouped by function type 2. Count of edges, grouped by relation type :param pybel.BELGraph graph: A BEL graph :param lax: An axis object from matplotlib :param rax: An axis object from matplotlib Example usage: >>> import matplotlib.pyplot as plt >>> from pybel import from_pickle >>> from pybel_tools.summary import plot_summary_axes >>> graph = from_pickle('~/dev/bms/aetionomy/parkinsons.gpickle') >>> fig, axes = plt.subplots(1, 2, figsize=(10, 4)) >>> plot_summary_axes(graph, axes[0], axes[1]) >>> plt.tight_layout() >>> plt.show() ] variable[ntc] assign[=] call[name[count_functions], parameter[name[graph]]] variable[etc] assign[=] call[name[count_relations], parameter[name[graph]]] variable[df] assign[=] call[name[pd].DataFrame.from_dict, parameter[call[name[dict], parameter[name[ntc]]]]] variable[df_ec] assign[=] call[name[pd].DataFrame.from_dict, parameter[call[name[dict], parameter[name[etc]]]]] call[call[name[df].sort_values, parameter[constant[0]]].plot, parameter[]] call[name[lax].set_title, parameter[call[constant[Number of nodes: {}].format, parameter[call[name[graph].number_of_nodes, parameter[]]]]]] call[call[name[df_ec].sort_values, parameter[constant[0]]].plot, parameter[]] call[name[rax].set_title, parameter[call[constant[Number of edges: {}].format, parameter[call[name[graph].number_of_edges, parameter[]]]]]]
keyword[def] identifier[plot_summary_axes] ( identifier[graph] : identifier[BELGraph] , identifier[lax] , identifier[rax] , identifier[logx] = keyword[True] ): literal[string] identifier[ntc] = identifier[count_functions] ( identifier[graph] ) identifier[etc] = identifier[count_relations] ( identifier[graph] ) identifier[df] = identifier[pd] . identifier[DataFrame] . identifier[from_dict] ( identifier[dict] ( identifier[ntc] ), identifier[orient] = literal[string] ) identifier[df_ec] = identifier[pd] . identifier[DataFrame] . identifier[from_dict] ( identifier[dict] ( identifier[etc] ), identifier[orient] = literal[string] ) identifier[df] . identifier[sort_values] ( literal[int] , identifier[ascending] = keyword[True] ). identifier[plot] ( identifier[kind] = literal[string] , identifier[logx] = identifier[logx] , identifier[ax] = identifier[lax] ) identifier[lax] . identifier[set_title] ( literal[string] . identifier[format] ( identifier[graph] . identifier[number_of_nodes] ())) identifier[df_ec] . identifier[sort_values] ( literal[int] , identifier[ascending] = keyword[True] ). identifier[plot] ( identifier[kind] = literal[string] , identifier[logx] = identifier[logx] , identifier[ax] = identifier[rax] ) identifier[rax] . identifier[set_title] ( literal[string] . identifier[format] ( identifier[graph] . identifier[number_of_edges] ()))
def plot_summary_axes(graph: BELGraph, lax, rax, logx=True): """Plots your graph summary statistics on the given axes. After, you should run :func:`plt.tight_layout` and you must run :func:`plt.show` to view. Shows: 1. Count of nodes, grouped by function type 2. Count of edges, grouped by relation type :param pybel.BELGraph graph: A BEL graph :param lax: An axis object from matplotlib :param rax: An axis object from matplotlib Example usage: >>> import matplotlib.pyplot as plt >>> from pybel import from_pickle >>> from pybel_tools.summary import plot_summary_axes >>> graph = from_pickle('~/dev/bms/aetionomy/parkinsons.gpickle') >>> fig, axes = plt.subplots(1, 2, figsize=(10, 4)) >>> plot_summary_axes(graph, axes[0], axes[1]) >>> plt.tight_layout() >>> plt.show() """ ntc = count_functions(graph) etc = count_relations(graph) df = pd.DataFrame.from_dict(dict(ntc), orient='index') df_ec = pd.DataFrame.from_dict(dict(etc), orient='index') df.sort_values(0, ascending=True).plot(kind='barh', logx=logx, ax=lax) lax.set_title('Number of nodes: {}'.format(graph.number_of_nodes())) df_ec.sort_values(0, ascending=True).plot(kind='barh', logx=logx, ax=rax) rax.set_title('Number of edges: {}'.format(graph.number_of_edges()))
def get_urls(self): """Returns the additional urls used by the Reversion admin.""" urls = super(SimpleHistoryAdmin, self).get_urls() admin_site = self.admin_site opts = self.model._meta info = opts.app_label, opts.model_name history_urls = [ url( "^([^/]+)/history/([^/]+)/$", admin_site.admin_view(self.history_form_view), name="%s_%s_simple_history" % info, ) ] return history_urls + urls
def function[get_urls, parameter[self]]: constant[Returns the additional urls used by the Reversion admin.] variable[urls] assign[=] call[call[name[super], parameter[name[SimpleHistoryAdmin], name[self]]].get_urls, parameter[]] variable[admin_site] assign[=] name[self].admin_site variable[opts] assign[=] name[self].model._meta variable[info] assign[=] tuple[[<ast.Attribute object at 0x7da20c6e6410>, <ast.Attribute object at 0x7da20c6e6140>]] variable[history_urls] assign[=] list[[<ast.Call object at 0x7da20c6e6c50>]] return[binary_operation[name[history_urls] + name[urls]]]
keyword[def] identifier[get_urls] ( identifier[self] ): literal[string] identifier[urls] = identifier[super] ( identifier[SimpleHistoryAdmin] , identifier[self] ). identifier[get_urls] () identifier[admin_site] = identifier[self] . identifier[admin_site] identifier[opts] = identifier[self] . identifier[model] . identifier[_meta] identifier[info] = identifier[opts] . identifier[app_label] , identifier[opts] . identifier[model_name] identifier[history_urls] =[ identifier[url] ( literal[string] , identifier[admin_site] . identifier[admin_view] ( identifier[self] . identifier[history_form_view] ), identifier[name] = literal[string] % identifier[info] , ) ] keyword[return] identifier[history_urls] + identifier[urls]
def get_urls(self): """Returns the additional urls used by the Reversion admin.""" urls = super(SimpleHistoryAdmin, self).get_urls() admin_site = self.admin_site opts = self.model._meta info = (opts.app_label, opts.model_name) history_urls = [url('^([^/]+)/history/([^/]+)/$', admin_site.admin_view(self.history_form_view), name='%s_%s_simple_history' % info)] return history_urls + urls
def set_org_processor(request): """ Simple context processor that automatically sets 'org' on the context if it is present in the request. """ if getattr(request, "org", None): org = request.org pattern_bg = org.backgrounds.filter(is_active=True, background_type="P") pattern_bg = pattern_bg.order_by("-pk").first() banner_bg = org.backgrounds.filter(is_active=True, background_type="B") banner_bg = banner_bg.order_by("-pk").first() return dict(org=org, pattern_bg=pattern_bg, banner_bg=banner_bg) else: return dict()
def function[set_org_processor, parameter[request]]: constant[ Simple context processor that automatically sets 'org' on the context if it is present in the request. ] if call[name[getattr], parameter[name[request], constant[org], constant[None]]] begin[:] variable[org] assign[=] name[request].org variable[pattern_bg] assign[=] call[name[org].backgrounds.filter, parameter[]] variable[pattern_bg] assign[=] call[call[name[pattern_bg].order_by, parameter[constant[-pk]]].first, parameter[]] variable[banner_bg] assign[=] call[name[org].backgrounds.filter, parameter[]] variable[banner_bg] assign[=] call[call[name[banner_bg].order_by, parameter[constant[-pk]]].first, parameter[]] return[call[name[dict], parameter[]]]
keyword[def] identifier[set_org_processor] ( identifier[request] ): literal[string] keyword[if] identifier[getattr] ( identifier[request] , literal[string] , keyword[None] ): identifier[org] = identifier[request] . identifier[org] identifier[pattern_bg] = identifier[org] . identifier[backgrounds] . identifier[filter] ( identifier[is_active] = keyword[True] , identifier[background_type] = literal[string] ) identifier[pattern_bg] = identifier[pattern_bg] . identifier[order_by] ( literal[string] ). identifier[first] () identifier[banner_bg] = identifier[org] . identifier[backgrounds] . identifier[filter] ( identifier[is_active] = keyword[True] , identifier[background_type] = literal[string] ) identifier[banner_bg] = identifier[banner_bg] . identifier[order_by] ( literal[string] ). identifier[first] () keyword[return] identifier[dict] ( identifier[org] = identifier[org] , identifier[pattern_bg] = identifier[pattern_bg] , identifier[banner_bg] = identifier[banner_bg] ) keyword[else] : keyword[return] identifier[dict] ()
def set_org_processor(request): """ Simple context processor that automatically sets 'org' on the context if it is present in the request. """ if getattr(request, 'org', None): org = request.org pattern_bg = org.backgrounds.filter(is_active=True, background_type='P') pattern_bg = pattern_bg.order_by('-pk').first() banner_bg = org.backgrounds.filter(is_active=True, background_type='B') banner_bg = banner_bg.order_by('-pk').first() return dict(org=org, pattern_bg=pattern_bg, banner_bg=banner_bg) # depends on [control=['if'], data=[]] else: return dict()
def itoEuler(f, G, y0, tspan, dW=None): """Use the Euler-Maruyama algorithm to integrate the Ito equation dy = f(y,t)dt + G(y,t) dW(t) where y is the d-dimensional state vector, f is a vector-valued function, G is an d x m matrix-valued function giving the noise coefficients and dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments Args: f: callable(y, t) returning (d,) array Vector-valued function to define the deterministic part of the system G: callable(y, t) returning (d,m) array Matrix-valued function to define the noise coefficients of the system y0: array of shape (d,) giving the initial state vector y(t==0) tspan (array): The sequence of time points for which to solve for y. These must be equally spaced, e.g. np.arange(0,10,0.005) tspan[0] is the intial time corresponding to the initial state y0. dW: optional array of shape (len(tspan)-1, d). This is for advanced use, if you want to use a specific realization of the d independent Wiener processes. If not provided Wiener increments will be generated randomly Returns: y: array, with shape (len(tspan), len(y0)) With the initial value y0 in the first row Raises: SDEValueError See also: G. Maruyama (1955) Continuous Markov processes and stochastic equations Kloeden and Platen (1999) Numerical Solution of Differential Equations """ (d, m, f, G, y0, tspan, dW, __) = _check_args(f, G, y0, tspan, dW, None) N = len(tspan) h = (tspan[N-1] - tspan[0])/(N - 1) # allocate space for result y = np.zeros((N, d), dtype=type(y0[0])) if dW is None: # pre-generate Wiener increments (for m independent Wiener processes): dW = deltaW(N - 1, m, h) y[0] = y0; for n in range(0, N-1): tn = tspan[n] yn = y[n] dWn = dW[n,:] y[n+1] = yn + f(yn, tn)*h + G(yn, tn).dot(dWn) return y
def function[itoEuler, parameter[f, G, y0, tspan, dW]]: constant[Use the Euler-Maruyama algorithm to integrate the Ito equation dy = f(y,t)dt + G(y,t) dW(t) where y is the d-dimensional state vector, f is a vector-valued function, G is an d x m matrix-valued function giving the noise coefficients and dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments Args: f: callable(y, t) returning (d,) array Vector-valued function to define the deterministic part of the system G: callable(y, t) returning (d,m) array Matrix-valued function to define the noise coefficients of the system y0: array of shape (d,) giving the initial state vector y(t==0) tspan (array): The sequence of time points for which to solve for y. These must be equally spaced, e.g. np.arange(0,10,0.005) tspan[0] is the intial time corresponding to the initial state y0. dW: optional array of shape (len(tspan)-1, d). This is for advanced use, if you want to use a specific realization of the d independent Wiener processes. If not provided Wiener increments will be generated randomly Returns: y: array, with shape (len(tspan), len(y0)) With the initial value y0 in the first row Raises: SDEValueError See also: G. Maruyama (1955) Continuous Markov processes and stochastic equations Kloeden and Platen (1999) Numerical Solution of Differential Equations ] <ast.Tuple object at 0x7da1b1188220> assign[=] call[name[_check_args], parameter[name[f], name[G], name[y0], name[tspan], name[dW], constant[None]]] variable[N] assign[=] call[name[len], parameter[name[tspan]]] variable[h] assign[=] binary_operation[binary_operation[call[name[tspan]][binary_operation[name[N] - constant[1]]] - call[name[tspan]][constant[0]]] / binary_operation[name[N] - constant[1]]] variable[y] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b12c1ea0>, <ast.Name object at 0x7da1b12c17b0>]]]] if compare[name[dW] is constant[None]] begin[:] variable[dW] assign[=] call[name[deltaW], parameter[binary_operation[name[N] - constant[1]], name[m], name[h]]] call[name[y]][constant[0]] assign[=] name[y0] for taget[name[n]] in starred[call[name[range], parameter[constant[0], binary_operation[name[N] - constant[1]]]]] begin[:] variable[tn] assign[=] call[name[tspan]][name[n]] variable[yn] assign[=] call[name[y]][name[n]] variable[dWn] assign[=] call[name[dW]][tuple[[<ast.Name object at 0x7da1b11a4940>, <ast.Slice object at 0x7da1b11a7e50>]]] call[name[y]][binary_operation[name[n] + constant[1]]] assign[=] binary_operation[binary_operation[name[yn] + binary_operation[call[name[f], parameter[name[yn], name[tn]]] * name[h]]] + call[call[name[G], parameter[name[yn], name[tn]]].dot, parameter[name[dWn]]]] return[name[y]]
keyword[def] identifier[itoEuler] ( identifier[f] , identifier[G] , identifier[y0] , identifier[tspan] , identifier[dW] = keyword[None] ): literal[string] ( identifier[d] , identifier[m] , identifier[f] , identifier[G] , identifier[y0] , identifier[tspan] , identifier[dW] , identifier[__] )= identifier[_check_args] ( identifier[f] , identifier[G] , identifier[y0] , identifier[tspan] , identifier[dW] , keyword[None] ) identifier[N] = identifier[len] ( identifier[tspan] ) identifier[h] =( identifier[tspan] [ identifier[N] - literal[int] ]- identifier[tspan] [ literal[int] ])/( identifier[N] - literal[int] ) identifier[y] = identifier[np] . identifier[zeros] (( identifier[N] , identifier[d] ), identifier[dtype] = identifier[type] ( identifier[y0] [ literal[int] ])) keyword[if] identifier[dW] keyword[is] keyword[None] : identifier[dW] = identifier[deltaW] ( identifier[N] - literal[int] , identifier[m] , identifier[h] ) identifier[y] [ literal[int] ]= identifier[y0] ; keyword[for] identifier[n] keyword[in] identifier[range] ( literal[int] , identifier[N] - literal[int] ): identifier[tn] = identifier[tspan] [ identifier[n] ] identifier[yn] = identifier[y] [ identifier[n] ] identifier[dWn] = identifier[dW] [ identifier[n] ,:] identifier[y] [ identifier[n] + literal[int] ]= identifier[yn] + identifier[f] ( identifier[yn] , identifier[tn] )* identifier[h] + identifier[G] ( identifier[yn] , identifier[tn] ). identifier[dot] ( identifier[dWn] ) keyword[return] identifier[y]
def itoEuler(f, G, y0, tspan, dW=None): """Use the Euler-Maruyama algorithm to integrate the Ito equation dy = f(y,t)dt + G(y,t) dW(t) where y is the d-dimensional state vector, f is a vector-valued function, G is an d x m matrix-valued function giving the noise coefficients and dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments Args: f: callable(y, t) returning (d,) array Vector-valued function to define the deterministic part of the system G: callable(y, t) returning (d,m) array Matrix-valued function to define the noise coefficients of the system y0: array of shape (d,) giving the initial state vector y(t==0) tspan (array): The sequence of time points for which to solve for y. These must be equally spaced, e.g. np.arange(0,10,0.005) tspan[0] is the intial time corresponding to the initial state y0. dW: optional array of shape (len(tspan)-1, d). This is for advanced use, if you want to use a specific realization of the d independent Wiener processes. If not provided Wiener increments will be generated randomly Returns: y: array, with shape (len(tspan), len(y0)) With the initial value y0 in the first row Raises: SDEValueError See also: G. Maruyama (1955) Continuous Markov processes and stochastic equations Kloeden and Platen (1999) Numerical Solution of Differential Equations """ (d, m, f, G, y0, tspan, dW, __) = _check_args(f, G, y0, tspan, dW, None) N = len(tspan) h = (tspan[N - 1] - tspan[0]) / (N - 1) # allocate space for result y = np.zeros((N, d), dtype=type(y0[0])) if dW is None: # pre-generate Wiener increments (for m independent Wiener processes): dW = deltaW(N - 1, m, h) # depends on [control=['if'], data=['dW']] y[0] = y0 for n in range(0, N - 1): tn = tspan[n] yn = y[n] dWn = dW[n, :] y[n + 1] = yn + f(yn, tn) * h + G(yn, tn).dot(dWn) # depends on [control=['for'], data=['n']] return y
def role_exists(role, **kwargs): ''' Checks if a role exists. CLI Example: .. code-block:: bash salt minion mssql.role_exists db_owner ''' # We should get one, and only one row return len(tsql_query(query='sp_helprole "{0}"'.format(role), as_dict=True, **kwargs)) == 1
def function[role_exists, parameter[role]]: constant[ Checks if a role exists. CLI Example: .. code-block:: bash salt minion mssql.role_exists db_owner ] return[compare[call[name[len], parameter[call[name[tsql_query], parameter[]]]] equal[==] constant[1]]]
keyword[def] identifier[role_exists] ( identifier[role] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[len] ( identifier[tsql_query] ( identifier[query] = literal[string] . identifier[format] ( identifier[role] ), identifier[as_dict] = keyword[True] ,** identifier[kwargs] ))== literal[int]
def role_exists(role, **kwargs): """ Checks if a role exists. CLI Example: .. code-block:: bash salt minion mssql.role_exists db_owner """ # We should get one, and only one row return len(tsql_query(query='sp_helprole "{0}"'.format(role), as_dict=True, **kwargs)) == 1
def post(self, url, body=None): """ Executes an HTTP POST request for the given URL. """ response = self.http.post(url, headers=self.headers, data=body, **self.requests_params) return self.process(response)
def function[post, parameter[self, url, body]]: constant[ Executes an HTTP POST request for the given URL. ] variable[response] assign[=] call[name[self].http.post, parameter[name[url]]] return[call[name[self].process, parameter[name[response]]]]
keyword[def] identifier[post] ( identifier[self] , identifier[url] , identifier[body] = keyword[None] ): literal[string] identifier[response] = identifier[self] . identifier[http] . identifier[post] ( identifier[url] , identifier[headers] = identifier[self] . identifier[headers] , identifier[data] = identifier[body] , ** identifier[self] . identifier[requests_params] ) keyword[return] identifier[self] . identifier[process] ( identifier[response] )
def post(self, url, body=None): """ Executes an HTTP POST request for the given URL. """ response = self.http.post(url, headers=self.headers, data=body, **self.requests_params) return self.process(response)
def raw_search(self, query='', params=None): """Performs a search query and returns the parsed JSON.""" if params is None: params = {} try: return self.__index.search(query, params) except AlgoliaException as e: if DEBUG: raise e else: logger.warning('ERROR DURING SEARCH ON %s: %s', self.index_name, e)
def function[raw_search, parameter[self, query, params]]: constant[Performs a search query and returns the parsed JSON.] if compare[name[params] is constant[None]] begin[:] variable[params] assign[=] dictionary[[], []] <ast.Try object at 0x7da204961f30>
keyword[def] identifier[raw_search] ( identifier[self] , identifier[query] = literal[string] , identifier[params] = keyword[None] ): literal[string] keyword[if] identifier[params] keyword[is] keyword[None] : identifier[params] ={} keyword[try] : keyword[return] identifier[self] . identifier[__index] . identifier[search] ( identifier[query] , identifier[params] ) keyword[except] identifier[AlgoliaException] keyword[as] identifier[e] : keyword[if] identifier[DEBUG] : keyword[raise] identifier[e] keyword[else] : identifier[logger] . identifier[warning] ( literal[string] , identifier[self] . identifier[index_name] , identifier[e] )
def raw_search(self, query='', params=None): """Performs a search query and returns the parsed JSON.""" if params is None: params = {} # depends on [control=['if'], data=['params']] try: return self.__index.search(query, params) # depends on [control=['try'], data=[]] except AlgoliaException as e: if DEBUG: raise e # depends on [control=['if'], data=[]] else: logger.warning('ERROR DURING SEARCH ON %s: %s', self.index_name, e) # depends on [control=['except'], data=['e']]
def forms(self): """ POST form values parsed into an instance of :class:`MultiDict`. This property contains form values parsed from an `url-encoded` or `multipart/form-data` encoded POST request bidy. The values are native strings. """ forms = MultiDict() for name, item in self.POST.iterallitems(): if not hasattr(item, 'filename'): forms[name] = item return forms
def function[forms, parameter[self]]: constant[ POST form values parsed into an instance of :class:`MultiDict`. This property contains form values parsed from an `url-encoded` or `multipart/form-data` encoded POST request bidy. The values are native strings. ] variable[forms] assign[=] call[name[MultiDict], parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b18ae380>, <ast.Name object at 0x7da1b18afbe0>]]] in starred[call[name[self].POST.iterallitems, parameter[]]] begin[:] if <ast.UnaryOp object at 0x7da1b18aca00> begin[:] call[name[forms]][name[name]] assign[=] name[item] return[name[forms]]
keyword[def] identifier[forms] ( identifier[self] ): literal[string] identifier[forms] = identifier[MultiDict] () keyword[for] identifier[name] , identifier[item] keyword[in] identifier[self] . identifier[POST] . identifier[iterallitems] (): keyword[if] keyword[not] identifier[hasattr] ( identifier[item] , literal[string] ): identifier[forms] [ identifier[name] ]= identifier[item] keyword[return] identifier[forms]
def forms(self): """ POST form values parsed into an instance of :class:`MultiDict`. This property contains form values parsed from an `url-encoded` or `multipart/form-data` encoded POST request bidy. The values are native strings. """ forms = MultiDict() for (name, item) in self.POST.iterallitems(): if not hasattr(item, 'filename'): forms[name] = item # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return forms
def broadcast(self, clients, msg): """ Optimized C{broadcast} implementation. Depending on type of the session, will json-encode message once and will call either C{send_message} or C{send_jsonifed}. @param clients: Clients iterable @param msg: Message to send """ json_msg = None count = 0 for c in clients: sess = c.session if not sess.is_closed: if sess.send_expects_json: if json_msg is None: json_msg = proto.json_encode(msg) sess.send_jsonified(json_msg, stats=False) else: sess.send_message(msg, stats=False) count += 1 self.stats.packSent(count)
def function[broadcast, parameter[self, clients, msg]]: constant[ Optimized C{broadcast} implementation. Depending on type of the session, will json-encode message once and will call either C{send_message} or C{send_jsonifed}. @param clients: Clients iterable @param msg: Message to send ] variable[json_msg] assign[=] constant[None] variable[count] assign[=] constant[0] for taget[name[c]] in starred[name[clients]] begin[:] variable[sess] assign[=] name[c].session if <ast.UnaryOp object at 0x7da20e9568c0> begin[:] if name[sess].send_expects_json begin[:] if compare[name[json_msg] is constant[None]] begin[:] variable[json_msg] assign[=] call[name[proto].json_encode, parameter[name[msg]]] call[name[sess].send_jsonified, parameter[name[json_msg]]] <ast.AugAssign object at 0x7da20e956a40> call[name[self].stats.packSent, parameter[name[count]]]
keyword[def] identifier[broadcast] ( identifier[self] , identifier[clients] , identifier[msg] ): literal[string] identifier[json_msg] = keyword[None] identifier[count] = literal[int] keyword[for] identifier[c] keyword[in] identifier[clients] : identifier[sess] = identifier[c] . identifier[session] keyword[if] keyword[not] identifier[sess] . identifier[is_closed] : keyword[if] identifier[sess] . identifier[send_expects_json] : keyword[if] identifier[json_msg] keyword[is] keyword[None] : identifier[json_msg] = identifier[proto] . identifier[json_encode] ( identifier[msg] ) identifier[sess] . identifier[send_jsonified] ( identifier[json_msg] , identifier[stats] = keyword[False] ) keyword[else] : identifier[sess] . identifier[send_message] ( identifier[msg] , identifier[stats] = keyword[False] ) identifier[count] += literal[int] identifier[self] . identifier[stats] . identifier[packSent] ( identifier[count] )
def broadcast(self, clients, msg): """ Optimized C{broadcast} implementation. Depending on type of the session, will json-encode message once and will call either C{send_message} or C{send_jsonifed}. @param clients: Clients iterable @param msg: Message to send """ json_msg = None count = 0 for c in clients: sess = c.session if not sess.is_closed: if sess.send_expects_json: if json_msg is None: json_msg = proto.json_encode(msg) # depends on [control=['if'], data=['json_msg']] sess.send_jsonified(json_msg, stats=False) # depends on [control=['if'], data=[]] else: sess.send_message(msg, stats=False) count += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c']] self.stats.packSent(count)
def top(self): """ Property. """ # For backwards compatibility. with warnings.catch_warnings(): warnings.simplefilter("always") w = "Striplog.top is deprecated; please use Striplog.unique" warnings.warn(w, DeprecationWarning, stacklevel=2) return self.unique
def function[top, parameter[self]]: constant[ Property. ] with call[name[warnings].catch_warnings, parameter[]] begin[:] call[name[warnings].simplefilter, parameter[constant[always]]] variable[w] assign[=] constant[Striplog.top is deprecated; please use Striplog.unique] call[name[warnings].warn, parameter[name[w], name[DeprecationWarning]]] return[name[self].unique]
keyword[def] identifier[top] ( identifier[self] ): literal[string] keyword[with] identifier[warnings] . identifier[catch_warnings] (): identifier[warnings] . identifier[simplefilter] ( literal[string] ) identifier[w] = literal[string] identifier[warnings] . identifier[warn] ( identifier[w] , identifier[DeprecationWarning] , identifier[stacklevel] = literal[int] ) keyword[return] identifier[self] . identifier[unique]
def top(self): """ Property. """ # For backwards compatibility. with warnings.catch_warnings(): warnings.simplefilter('always') w = 'Striplog.top is deprecated; please use Striplog.unique' warnings.warn(w, DeprecationWarning, stacklevel=2) # depends on [control=['with'], data=[]] return self.unique
def _calculate_sv_bins_cnvkit(data, cnv_group, size_calc_fn): """Calculate structural variant bins using target/anti-target approach from CNVkit. """ from bcbio.structural import cnvkit if dd.get_background_cnv_reference(data, "cnvkit"): target_bed, anti_bed = cnvkit.targets_from_background(dd.get_background_cnv_reference(data, "cnvkit"), cnv_group.work_dir, data) else: target_bed, anti_bed = cnvkit.targets_w_bins(cnv_group.region_file, cnv_group.access_file, size_calc_fn, cnv_group.work_dir, data) return target_bed, anti_bed, None
def function[_calculate_sv_bins_cnvkit, parameter[data, cnv_group, size_calc_fn]]: constant[Calculate structural variant bins using target/anti-target approach from CNVkit. ] from relative_module[bcbio.structural] import module[cnvkit] if call[name[dd].get_background_cnv_reference, parameter[name[data], constant[cnvkit]]] begin[:] <ast.Tuple object at 0x7da1b1984100> assign[=] call[name[cnvkit].targets_from_background, parameter[call[name[dd].get_background_cnv_reference, parameter[name[data], constant[cnvkit]]], name[cnv_group].work_dir, name[data]]] return[tuple[[<ast.Name object at 0x7da1b19873a0>, <ast.Name object at 0x7da1b1984dc0>, <ast.Constant object at 0x7da1b1985b70>]]]
keyword[def] identifier[_calculate_sv_bins_cnvkit] ( identifier[data] , identifier[cnv_group] , identifier[size_calc_fn] ): literal[string] keyword[from] identifier[bcbio] . identifier[structural] keyword[import] identifier[cnvkit] keyword[if] identifier[dd] . identifier[get_background_cnv_reference] ( identifier[data] , literal[string] ): identifier[target_bed] , identifier[anti_bed] = identifier[cnvkit] . identifier[targets_from_background] ( identifier[dd] . identifier[get_background_cnv_reference] ( identifier[data] , literal[string] ), identifier[cnv_group] . identifier[work_dir] , identifier[data] ) keyword[else] : identifier[target_bed] , identifier[anti_bed] = identifier[cnvkit] . identifier[targets_w_bins] ( identifier[cnv_group] . identifier[region_file] , identifier[cnv_group] . identifier[access_file] , identifier[size_calc_fn] , identifier[cnv_group] . identifier[work_dir] , identifier[data] ) keyword[return] identifier[target_bed] , identifier[anti_bed] , keyword[None]
def _calculate_sv_bins_cnvkit(data, cnv_group, size_calc_fn): """Calculate structural variant bins using target/anti-target approach from CNVkit. """ from bcbio.structural import cnvkit if dd.get_background_cnv_reference(data, 'cnvkit'): (target_bed, anti_bed) = cnvkit.targets_from_background(dd.get_background_cnv_reference(data, 'cnvkit'), cnv_group.work_dir, data) # depends on [control=['if'], data=[]] else: (target_bed, anti_bed) = cnvkit.targets_w_bins(cnv_group.region_file, cnv_group.access_file, size_calc_fn, cnv_group.work_dir, data) return (target_bed, anti_bed, None)
def raise_errors_on_nested_writes(method_name, serializer, validated_data): """ Give explicit errors when users attempt to pass writable nested data. If we don't do this explicitly they'd get a less helpful error when calling `.save()` on the serializer. We don't *automatically* support these sorts of nested writes because there are too many ambiguities to define a default behavior. Eg. Suppose we have a `UserSerializer` with a nested profile. How should we handle the case of an update, where the `profile` relationship does not exist? Any of the following might be valid: * Raise an application error. * Silently ignore the nested part of the update. * Automatically create a profile instance. """ # Ensure we don't have a writable nested field. For example: # # class UserSerializer(ModelSerializer): # ... # profile = ProfileSerializer() assert not any( isinstance(field, BaseSerializer) and (key in validated_data) and isinstance(validated_data[key], (list, dict)) for key, field in serializer.fields.items() ), ( 'The `.{method_name}()` method does not support writable nested' 'fields by default.\nWrite an explicit `.{method_name}()` method for ' 'serializer `{module}.{class_name}`, or set `read_only=True` on ' 'nested serializer fields.'.format( method_name=method_name, module=serializer.__class__.__module__, class_name=serializer.__class__.__name__ ) ) # Ensure we don't have a writable dotted-source field. For example: # # class UserSerializer(ModelSerializer): # ... # address = serializer.CharField('profile.address') assert not any( '.' in field.source and (key in validated_data) and isinstance(validated_data[key], (list, dict)) for key, field in serializer.fields.items() ), ( 'The `.{method_name}()` method does not support writable dotted-source ' 'fields by default.\nWrite an explicit `.{method_name}()` method for ' 'serializer `{module}.{class_name}`, or set `read_only=True` on ' 'dotted-source serializer fields.'.format( method_name=method_name, module=serializer.__class__.__module__, class_name=serializer.__class__.__name__ ) )
def function[raise_errors_on_nested_writes, parameter[method_name, serializer, validated_data]]: constant[ Give explicit errors when users attempt to pass writable nested data. If we don't do this explicitly they'd get a less helpful error when calling `.save()` on the serializer. We don't *automatically* support these sorts of nested writes because there are too many ambiguities to define a default behavior. Eg. Suppose we have a `UserSerializer` with a nested profile. How should we handle the case of an update, where the `profile` relationship does not exist? Any of the following might be valid: * Raise an application error. * Silently ignore the nested part of the update. * Automatically create a profile instance. ] assert[<ast.UnaryOp object at 0x7da1b1594d30>] assert[<ast.UnaryOp object at 0x7da1b1594f10>]
keyword[def] identifier[raise_errors_on_nested_writes] ( identifier[method_name] , identifier[serializer] , identifier[validated_data] ): literal[string] keyword[assert] keyword[not] identifier[any] ( identifier[isinstance] ( identifier[field] , identifier[BaseSerializer] ) keyword[and] ( identifier[key] keyword[in] identifier[validated_data] ) keyword[and] identifier[isinstance] ( identifier[validated_data] [ identifier[key] ],( identifier[list] , identifier[dict] )) keyword[for] identifier[key] , identifier[field] keyword[in] identifier[serializer] . identifier[fields] . identifier[items] () ),( literal[string] literal[string] literal[string] literal[string] . identifier[format] ( identifier[method_name] = identifier[method_name] , identifier[module] = identifier[serializer] . identifier[__class__] . identifier[__module__] , identifier[class_name] = identifier[serializer] . identifier[__class__] . identifier[__name__] ) ) keyword[assert] keyword[not] identifier[any] ( literal[string] keyword[in] identifier[field] . identifier[source] keyword[and] ( identifier[key] keyword[in] identifier[validated_data] ) keyword[and] identifier[isinstance] ( identifier[validated_data] [ identifier[key] ],( identifier[list] , identifier[dict] )) keyword[for] identifier[key] , identifier[field] keyword[in] identifier[serializer] . identifier[fields] . identifier[items] () ),( literal[string] literal[string] literal[string] literal[string] . identifier[format] ( identifier[method_name] = identifier[method_name] , identifier[module] = identifier[serializer] . identifier[__class__] . identifier[__module__] , identifier[class_name] = identifier[serializer] . identifier[__class__] . identifier[__name__] ) )
def raise_errors_on_nested_writes(method_name, serializer, validated_data): """ Give explicit errors when users attempt to pass writable nested data. If we don't do this explicitly they'd get a less helpful error when calling `.save()` on the serializer. We don't *automatically* support these sorts of nested writes because there are too many ambiguities to define a default behavior. Eg. Suppose we have a `UserSerializer` with a nested profile. How should we handle the case of an update, where the `profile` relationship does not exist? Any of the following might be valid: * Raise an application error. * Silently ignore the nested part of the update. * Automatically create a profile instance. """ # Ensure we don't have a writable nested field. For example: # # class UserSerializer(ModelSerializer): # ... # profile = ProfileSerializer() assert not any((isinstance(field, BaseSerializer) and key in validated_data and isinstance(validated_data[key], (list, dict)) for (key, field) in serializer.fields.items())), 'The `.{method_name}()` method does not support writable nestedfields by default.\nWrite an explicit `.{method_name}()` method for serializer `{module}.{class_name}`, or set `read_only=True` on nested serializer fields.'.format(method_name=method_name, module=serializer.__class__.__module__, class_name=serializer.__class__.__name__) # Ensure we don't have a writable dotted-source field. For example: # # class UserSerializer(ModelSerializer): # ... # address = serializer.CharField('profile.address') assert not any(('.' in field.source and key in validated_data and isinstance(validated_data[key], (list, dict)) for (key, field) in serializer.fields.items())), 'The `.{method_name}()` method does not support writable dotted-source fields by default.\nWrite an explicit `.{method_name}()` method for serializer `{module}.{class_name}`, or set `read_only=True` on dotted-source serializer fields.'.format(method_name=method_name, module=serializer.__class__.__module__, class_name=serializer.__class__.__name__)
def modver(self, *args): """ Switches colour of verify button """ g = get_root(self).globals if self.ok(): tname = self.val.get() if tname in self.successes: # known to be in simbad self.verify.config(bg=g.COL['start']) elif tname in self.failures: # known not to be in simbad self.verify.config(bg=g.COL['stop']) else: # not known whether in simbad self.verify.config(bg=g.COL['main']) self.verify.config(state='normal') else: self.verify.config(bg=g.COL['main']) self.verify.config(state='disable') if self.callback is not None: self.callback()
def function[modver, parameter[self]]: constant[ Switches colour of verify button ] variable[g] assign[=] call[name[get_root], parameter[name[self]]].globals if call[name[self].ok, parameter[]] begin[:] variable[tname] assign[=] call[name[self].val.get, parameter[]] if compare[name[tname] in name[self].successes] begin[:] call[name[self].verify.config, parameter[]] call[name[self].verify.config, parameter[]] if compare[name[self].callback is_not constant[None]] begin[:] call[name[self].callback, parameter[]]
keyword[def] identifier[modver] ( identifier[self] ,* identifier[args] ): literal[string] identifier[g] = identifier[get_root] ( identifier[self] ). identifier[globals] keyword[if] identifier[self] . identifier[ok] (): identifier[tname] = identifier[self] . identifier[val] . identifier[get] () keyword[if] identifier[tname] keyword[in] identifier[self] . identifier[successes] : identifier[self] . identifier[verify] . identifier[config] ( identifier[bg] = identifier[g] . identifier[COL] [ literal[string] ]) keyword[elif] identifier[tname] keyword[in] identifier[self] . identifier[failures] : identifier[self] . identifier[verify] . identifier[config] ( identifier[bg] = identifier[g] . identifier[COL] [ literal[string] ]) keyword[else] : identifier[self] . identifier[verify] . identifier[config] ( identifier[bg] = identifier[g] . identifier[COL] [ literal[string] ]) identifier[self] . identifier[verify] . identifier[config] ( identifier[state] = literal[string] ) keyword[else] : identifier[self] . identifier[verify] . identifier[config] ( identifier[bg] = identifier[g] . identifier[COL] [ literal[string] ]) identifier[self] . identifier[verify] . identifier[config] ( identifier[state] = literal[string] ) keyword[if] identifier[self] . identifier[callback] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[callback] ()
def modver(self, *args): """ Switches colour of verify button """ g = get_root(self).globals if self.ok(): tname = self.val.get() if tname in self.successes: # known to be in simbad self.verify.config(bg=g.COL['start']) # depends on [control=['if'], data=[]] elif tname in self.failures: # known not to be in simbad self.verify.config(bg=g.COL['stop']) # depends on [control=['if'], data=[]] else: # not known whether in simbad self.verify.config(bg=g.COL['main']) self.verify.config(state='normal') # depends on [control=['if'], data=[]] else: self.verify.config(bg=g.COL['main']) self.verify.config(state='disable') if self.callback is not None: self.callback() # depends on [control=['if'], data=[]]
def create_from_label_monetary_account(cls, label_monetary_account): """ :type label_monetary_account: LabelMonetaryAccount """ instance = cls.__new__(cls) instance.label_monetary_account = label_monetary_account instance.pointer = Pointer() instance.pointer._name = label_monetary_account.display_name instance.pointer._type_ = cls._POINTER_TYPE_IBAN instance.pointer._value = label_monetary_account.iban return instance
def function[create_from_label_monetary_account, parameter[cls, label_monetary_account]]: constant[ :type label_monetary_account: LabelMonetaryAccount ] variable[instance] assign[=] call[name[cls].__new__, parameter[name[cls]]] name[instance].label_monetary_account assign[=] name[label_monetary_account] name[instance].pointer assign[=] call[name[Pointer], parameter[]] name[instance].pointer._name assign[=] name[label_monetary_account].display_name name[instance].pointer._type_ assign[=] name[cls]._POINTER_TYPE_IBAN name[instance].pointer._value assign[=] name[label_monetary_account].iban return[name[instance]]
keyword[def] identifier[create_from_label_monetary_account] ( identifier[cls] , identifier[label_monetary_account] ): literal[string] identifier[instance] = identifier[cls] . identifier[__new__] ( identifier[cls] ) identifier[instance] . identifier[label_monetary_account] = identifier[label_monetary_account] identifier[instance] . identifier[pointer] = identifier[Pointer] () identifier[instance] . identifier[pointer] . identifier[_name] = identifier[label_monetary_account] . identifier[display_name] identifier[instance] . identifier[pointer] . identifier[_type_] = identifier[cls] . identifier[_POINTER_TYPE_IBAN] identifier[instance] . identifier[pointer] . identifier[_value] = identifier[label_monetary_account] . identifier[iban] keyword[return] identifier[instance]
def create_from_label_monetary_account(cls, label_monetary_account): """ :type label_monetary_account: LabelMonetaryAccount """ instance = cls.__new__(cls) instance.label_monetary_account = label_monetary_account instance.pointer = Pointer() instance.pointer._name = label_monetary_account.display_name instance.pointer._type_ = cls._POINTER_TYPE_IBAN instance.pointer._value = label_monetary_account.iban return instance
def set_mode(self, mode): """ Add modes via bitmask. Modes set before are not cleared! This method should be used with the :const:`MODE_*` constants. :param mode: The mode to add. :return: The new mode bitmask. """ if not isinstance(mode, integer_types): raise TypeError("mode must be an integer") return _lib.SSL_CTX_set_mode(self._context, mode)
def function[set_mode, parameter[self, mode]]: constant[ Add modes via bitmask. Modes set before are not cleared! This method should be used with the :const:`MODE_*` constants. :param mode: The mode to add. :return: The new mode bitmask. ] if <ast.UnaryOp object at 0x7da1b0259960> begin[:] <ast.Raise object at 0x7da1b0258f40> return[call[name[_lib].SSL_CTX_set_mode, parameter[name[self]._context, name[mode]]]]
keyword[def] identifier[set_mode] ( identifier[self] , identifier[mode] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[mode] , identifier[integer_types] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[return] identifier[_lib] . identifier[SSL_CTX_set_mode] ( identifier[self] . identifier[_context] , identifier[mode] )
def set_mode(self, mode): """ Add modes via bitmask. Modes set before are not cleared! This method should be used with the :const:`MODE_*` constants. :param mode: The mode to add. :return: The new mode bitmask. """ if not isinstance(mode, integer_types): raise TypeError('mode must be an integer') # depends on [control=['if'], data=[]] return _lib.SSL_CTX_set_mode(self._context, mode)
def get_public_ip(self): """ Gets the public IP for a host. """ r = self.local_renderer ret = r.run(r.env.get_public_ip_command) or '' ret = ret.strip() print('ip:', ret) return ret
def function[get_public_ip, parameter[self]]: constant[ Gets the public IP for a host. ] variable[r] assign[=] name[self].local_renderer variable[ret] assign[=] <ast.BoolOp object at 0x7da1b00eb4c0> variable[ret] assign[=] call[name[ret].strip, parameter[]] call[name[print], parameter[constant[ip:], name[ret]]] return[name[ret]]
keyword[def] identifier[get_public_ip] ( identifier[self] ): literal[string] identifier[r] = identifier[self] . identifier[local_renderer] identifier[ret] = identifier[r] . identifier[run] ( identifier[r] . identifier[env] . identifier[get_public_ip_command] ) keyword[or] literal[string] identifier[ret] = identifier[ret] . identifier[strip] () identifier[print] ( literal[string] , identifier[ret] ) keyword[return] identifier[ret]
def get_public_ip(self): """ Gets the public IP for a host. """ r = self.local_renderer ret = r.run(r.env.get_public_ip_command) or '' ret = ret.strip() print('ip:', ret) return ret
def _do_east_asian(self): """Fetch and update east-asian tables.""" self._do_retrieve(self.EAW_URL, self.EAW_IN) (version, date, values) = self._parse_east_asian( fname=self.EAW_IN, properties=(u'W', u'F',) ) table = self._make_table(values) self._do_write(self.EAW_OUT, 'WIDE_EASTASIAN', version, date, table)
def function[_do_east_asian, parameter[self]]: constant[Fetch and update east-asian tables.] call[name[self]._do_retrieve, parameter[name[self].EAW_URL, name[self].EAW_IN]] <ast.Tuple object at 0x7da18f58ce80> assign[=] call[name[self]._parse_east_asian, parameter[]] variable[table] assign[=] call[name[self]._make_table, parameter[name[values]]] call[name[self]._do_write, parameter[name[self].EAW_OUT, constant[WIDE_EASTASIAN], name[version], name[date], name[table]]]
keyword[def] identifier[_do_east_asian] ( identifier[self] ): literal[string] identifier[self] . identifier[_do_retrieve] ( identifier[self] . identifier[EAW_URL] , identifier[self] . identifier[EAW_IN] ) ( identifier[version] , identifier[date] , identifier[values] )= identifier[self] . identifier[_parse_east_asian] ( identifier[fname] = identifier[self] . identifier[EAW_IN] , identifier[properties] =( literal[string] , literal[string] ,) ) identifier[table] = identifier[self] . identifier[_make_table] ( identifier[values] ) identifier[self] . identifier[_do_write] ( identifier[self] . identifier[EAW_OUT] , literal[string] , identifier[version] , identifier[date] , identifier[table] )
def _do_east_asian(self): """Fetch and update east-asian tables.""" self._do_retrieve(self.EAW_URL, self.EAW_IN) (version, date, values) = self._parse_east_asian(fname=self.EAW_IN, properties=(u'W', u'F')) table = self._make_table(values) self._do_write(self.EAW_OUT, 'WIDE_EASTASIAN', version, date, table)
def open_files(filenames): """ Call the system 'open' command on a file. """ command = ['open'] + filenames process = Popen(command, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate()
def function[open_files, parameter[filenames]]: constant[ Call the system 'open' command on a file. ] variable[command] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b1e00970>]] + name[filenames]] variable[process] assign[=] call[name[Popen], parameter[name[command]]] <ast.Tuple object at 0x7da1b1e02c20> assign[=] call[name[process].communicate, parameter[]]
keyword[def] identifier[open_files] ( identifier[filenames] ): literal[string] identifier[command] =[ literal[string] ]+ identifier[filenames] identifier[process] = identifier[Popen] ( identifier[command] , identifier[stdout] = identifier[PIPE] , identifier[stderr] = identifier[PIPE] ) identifier[stdout] , identifier[stderr] = identifier[process] . identifier[communicate] ()
def open_files(filenames): """ Call the system 'open' command on a file. """ command = ['open'] + filenames process = Popen(command, stdout=PIPE, stderr=PIPE) (stdout, stderr) = process.communicate()
def magic(dataset, method='coactivation', roi_mask=None, coactivation_mask=None, features=None, feature_threshold=0.05, min_voxels_per_study=None, min_studies_per_voxel=None, reduce_reference='pca', n_components=100, distance_metric='correlation', clustering_algorithm='kmeans', n_clusters=5, clustering_kwargs={}, output_dir=None, filename=None, coactivation_images=False, coactivation_threshold=0.1): ''' Execute a full clustering analysis pipeline. Args: dataset: a Dataset instance to extract all data from. method (str): the overall clustering approach to use. Valid options: 'coactivation' (default): Clusters voxel within the ROI mask based on shared pattern of coactivation with the rest of the brain. 'studies': Treat each study as a feature in an n-dimensional space. I.e., voxels will be assigned to the same cluster if they tend to be co-reported in similar studies. 'features': Voxels will be assigned to the same cluster if they tend to have similar feature vectors (i.e., the studies that activate those voxels tend to use similar terms). roi_mask: A string, nibabel image, or numpy array providing an inclusion mask of voxels to cluster. If None, the default mask in the Dataset instance is used (typically, all in-brain voxels). coactivation_mask: If method='coactivation', this mask defines the voxels to use when generating the pairwise distance matrix. For example, if a PFC mask is passed, all voxels in the roi_mask will be clustered based on how similar their patterns of coactivation with PFC voxels are. Can be a str, nibabel image, or numpy array. features (str or list): Optional string or list of strings specifying any feature names to use for study selection. E.g., passing ['emotion', 'reward'] would retain for analysis only those studies associated with the features emotion or reward at a frequency greater than feature_threshold. feature_threshold (float): The threshold to use when selecting studies on the basis of features. min_voxels_per_study (int): Minimum number of active voxels a study must report in order to be retained in the dataset. By default, all studies are used. min_studies_per_voxel (int): Minimum number of studies a voxel must be active in in order to be retained in analysis. By default, all voxels are used. reduce_reference (str, scikit-learn object or None): The dimensionality reduction algorithm to apply to the feature space prior to the computation of pairwise distances. If a string is passed (either 'pca' or 'ica'), n_components must be specified. If None, no dimensionality reduction will be applied. Otherwise, must be a scikit-learn-style object that exposes a transform() method. n_components (int): Number of components to extract during the dimensionality reduction step. Only used if reduce_reference is a string. distance_metric (str): The distance metric to use when computing pairwise distances on the to-be-clustered voxels. Can be any of the metrics supported by sklearn.metrics.pairwise_distances. clustering_algorithm (str or scikit-learn object): the clustering algorithm to use. If a string, must be one of 'kmeans' or 'minik'. Otherwise, any sklearn class that exposes a fit_predict() method. n_clusters (int): If clustering_algorithm is a string, the number of clusters to extract. clustering_kwargs (dict): Additional keywords to pass to the clustering object. output_dir (str): The directory to write results to. If None (default), returns the cluster label image rather than saving to disk. filename (str): Name of cluster label image file. Defaults to cluster_labels_k{k}.nii.gz, where k is the number of clusters. coactivation_images (bool): If True, saves a meta-analytic coactivation map for every ROI in the resulting cluster map. coactivation_threshold (float or int): If coactivation_images is True, this is the threshold used to define whether or not a study is considered to activation within a cluster ROI. Integer values are interpreted as minimum number of voxels within the ROI; floats are interpreted as the proportion of voxels. Defaults to 0.1 (i.e., 10% of all voxels within ROI must be active). ''' roi = Clusterable(dataset, roi_mask, min_voxels=min_voxels_per_study, min_studies=min_studies_per_voxel, features=features, feature_threshold=feature_threshold) if method == 'coactivation': reference = Clusterable(dataset, coactivation_mask, min_voxels=min_voxels_per_study, min_studies=min_studies_per_voxel, features=features, feature_threshold=feature_threshold) elif method == 'features': reference = deepcopy(roi) feature_data = dataset.feature_table.data n_studies = len(feature_data) reference.data = reference.data.dot(feature_data.values) / n_studies elif method == 'studies': reference = roi if reduce_reference is not None: if isinstance(reduce_reference, string_types): # Number of components can't exceed feature count or cluster count n_feat = reference.data.shape[1] n_components = min(n_components, n_feat) reduce_reference = { 'pca': sk_decomp.PCA, 'ica': sk_decomp.FastICA }[reduce_reference](n_components) # For non-coactivation-based approaches, transpose the data matrix transpose = (method == 'coactivation') reference = reference.transform(reduce_reference, transpose=transpose) if method == 'coactivation': distances = pairwise_distances(roi.data, reference.data, metric=distance_metric) else: distances = reference.data # TODO: add additional clustering methods if isinstance(clustering_algorithm, string_types): clustering_algorithm = { 'kmeans': sk_cluster.KMeans, 'minik': sk_cluster.MiniBatchKMeans }[clustering_algorithm](n_clusters, **clustering_kwargs) labels = clustering_algorithm.fit_predict(distances) + 1. header = roi.masker.get_header() header['cal_max'] = labels.max() header['cal_min'] = labels.min() voxel_labels = roi.masker.unmask(labels) img = nifti1.Nifti1Image(voxel_labels, None, header) if output_dir is not None: if not exists(output_dir): makedirs(output_dir) if filename is None: filename = 'cluster_labels_k%d.nii.gz' % n_clusters outfile = join(output_dir, filename) img.to_filename(outfile) # Write coactivation images if coactivation_images: for l in np.unique(voxel_labels): roi_mask = np.copy(voxel_labels) roi_mask[roi_mask != l] = 0 ids = dataset.get_studies( mask=roi_mask, activation_threshold=coactivation_threshold) ma = meta.MetaAnalysis(dataset, ids) ma.save_results(output_dir=join(output_dir, 'coactivation'), prefix='cluster_%d_coactivation' % l) else: return img
def function[magic, parameter[dataset, method, roi_mask, coactivation_mask, features, feature_threshold, min_voxels_per_study, min_studies_per_voxel, reduce_reference, n_components, distance_metric, clustering_algorithm, n_clusters, clustering_kwargs, output_dir, filename, coactivation_images, coactivation_threshold]]: constant[ Execute a full clustering analysis pipeline. Args: dataset: a Dataset instance to extract all data from. method (str): the overall clustering approach to use. Valid options: 'coactivation' (default): Clusters voxel within the ROI mask based on shared pattern of coactivation with the rest of the brain. 'studies': Treat each study as a feature in an n-dimensional space. I.e., voxels will be assigned to the same cluster if they tend to be co-reported in similar studies. 'features': Voxels will be assigned to the same cluster if they tend to have similar feature vectors (i.e., the studies that activate those voxels tend to use similar terms). roi_mask: A string, nibabel image, or numpy array providing an inclusion mask of voxels to cluster. If None, the default mask in the Dataset instance is used (typically, all in-brain voxels). coactivation_mask: If method='coactivation', this mask defines the voxels to use when generating the pairwise distance matrix. For example, if a PFC mask is passed, all voxels in the roi_mask will be clustered based on how similar their patterns of coactivation with PFC voxels are. Can be a str, nibabel image, or numpy array. features (str or list): Optional string or list of strings specifying any feature names to use for study selection. E.g., passing ['emotion', 'reward'] would retain for analysis only those studies associated with the features emotion or reward at a frequency greater than feature_threshold. feature_threshold (float): The threshold to use when selecting studies on the basis of features. min_voxels_per_study (int): Minimum number of active voxels a study must report in order to be retained in the dataset. By default, all studies are used. min_studies_per_voxel (int): Minimum number of studies a voxel must be active in in order to be retained in analysis. By default, all voxels are used. reduce_reference (str, scikit-learn object or None): The dimensionality reduction algorithm to apply to the feature space prior to the computation of pairwise distances. If a string is passed (either 'pca' or 'ica'), n_components must be specified. If None, no dimensionality reduction will be applied. Otherwise, must be a scikit-learn-style object that exposes a transform() method. n_components (int): Number of components to extract during the dimensionality reduction step. Only used if reduce_reference is a string. distance_metric (str): The distance metric to use when computing pairwise distances on the to-be-clustered voxels. Can be any of the metrics supported by sklearn.metrics.pairwise_distances. clustering_algorithm (str or scikit-learn object): the clustering algorithm to use. If a string, must be one of 'kmeans' or 'minik'. Otherwise, any sklearn class that exposes a fit_predict() method. n_clusters (int): If clustering_algorithm is a string, the number of clusters to extract. clustering_kwargs (dict): Additional keywords to pass to the clustering object. output_dir (str): The directory to write results to. If None (default), returns the cluster label image rather than saving to disk. filename (str): Name of cluster label image file. Defaults to cluster_labels_k{k}.nii.gz, where k is the number of clusters. coactivation_images (bool): If True, saves a meta-analytic coactivation map for every ROI in the resulting cluster map. coactivation_threshold (float or int): If coactivation_images is True, this is the threshold used to define whether or not a study is considered to activation within a cluster ROI. Integer values are interpreted as minimum number of voxels within the ROI; floats are interpreted as the proportion of voxels. Defaults to 0.1 (i.e., 10% of all voxels within ROI must be active). ] variable[roi] assign[=] call[name[Clusterable], parameter[name[dataset], name[roi_mask]]] if compare[name[method] equal[==] constant[coactivation]] begin[:] variable[reference] assign[=] call[name[Clusterable], parameter[name[dataset], name[coactivation_mask]]] if compare[name[reduce_reference] is_not constant[None]] begin[:] if call[name[isinstance], parameter[name[reduce_reference], name[string_types]]] begin[:] variable[n_feat] assign[=] call[name[reference].data.shape][constant[1]] variable[n_components] assign[=] call[name[min], parameter[name[n_components], name[n_feat]]] variable[reduce_reference] assign[=] call[call[dictionary[[<ast.Constant object at 0x7da20c6e5000>, <ast.Constant object at 0x7da20c6e6650>], [<ast.Attribute object at 0x7da20c6e6d40>, <ast.Attribute object at 0x7da20c6e58a0>]]][name[reduce_reference]], parameter[name[n_components]]] variable[transpose] assign[=] compare[name[method] equal[==] constant[coactivation]] variable[reference] assign[=] call[name[reference].transform, parameter[name[reduce_reference]]] if compare[name[method] equal[==] constant[coactivation]] begin[:] variable[distances] assign[=] call[name[pairwise_distances], parameter[name[roi].data, name[reference].data]] if call[name[isinstance], parameter[name[clustering_algorithm], name[string_types]]] begin[:] variable[clustering_algorithm] assign[=] call[call[dictionary[[<ast.Constant object at 0x7da20c6e7e20>, <ast.Constant object at 0x7da20c6e7f40>], [<ast.Attribute object at 0x7da20c6e7520>, <ast.Attribute object at 0x7da20c6e78e0>]]][name[clustering_algorithm]], parameter[name[n_clusters]]] variable[labels] assign[=] binary_operation[call[name[clustering_algorithm].fit_predict, parameter[name[distances]]] + constant[1.0]] variable[header] assign[=] call[name[roi].masker.get_header, parameter[]] call[name[header]][constant[cal_max]] assign[=] call[name[labels].max, parameter[]] call[name[header]][constant[cal_min]] assign[=] call[name[labels].min, parameter[]] variable[voxel_labels] assign[=] call[name[roi].masker.unmask, parameter[name[labels]]] variable[img] assign[=] call[name[nifti1].Nifti1Image, parameter[name[voxel_labels], constant[None], name[header]]] if compare[name[output_dir] is_not constant[None]] begin[:] if <ast.UnaryOp object at 0x7da20c6e7d60> begin[:] call[name[makedirs], parameter[name[output_dir]]] if compare[name[filename] is constant[None]] begin[:] variable[filename] assign[=] binary_operation[constant[cluster_labels_k%d.nii.gz] <ast.Mod object at 0x7da2590d6920> name[n_clusters]] variable[outfile] assign[=] call[name[join], parameter[name[output_dir], name[filename]]] call[name[img].to_filename, parameter[name[outfile]]] if name[coactivation_images] begin[:] for taget[name[l]] in starred[call[name[np].unique, parameter[name[voxel_labels]]]] begin[:] variable[roi_mask] assign[=] call[name[np].copy, parameter[name[voxel_labels]]] call[name[roi_mask]][compare[name[roi_mask] not_equal[!=] name[l]]] assign[=] constant[0] variable[ids] assign[=] call[name[dataset].get_studies, parameter[]] variable[ma] assign[=] call[name[meta].MetaAnalysis, parameter[name[dataset], name[ids]]] call[name[ma].save_results, parameter[]]
keyword[def] identifier[magic] ( identifier[dataset] , identifier[method] = literal[string] , identifier[roi_mask] = keyword[None] , identifier[coactivation_mask] = keyword[None] , identifier[features] = keyword[None] , identifier[feature_threshold] = literal[int] , identifier[min_voxels_per_study] = keyword[None] , identifier[min_studies_per_voxel] = keyword[None] , identifier[reduce_reference] = literal[string] , identifier[n_components] = literal[int] , identifier[distance_metric] = literal[string] , identifier[clustering_algorithm] = literal[string] , identifier[n_clusters] = literal[int] , identifier[clustering_kwargs] ={}, identifier[output_dir] = keyword[None] , identifier[filename] = keyword[None] , identifier[coactivation_images] = keyword[False] , identifier[coactivation_threshold] = literal[int] ): literal[string] identifier[roi] = identifier[Clusterable] ( identifier[dataset] , identifier[roi_mask] , identifier[min_voxels] = identifier[min_voxels_per_study] , identifier[min_studies] = identifier[min_studies_per_voxel] , identifier[features] = identifier[features] , identifier[feature_threshold] = identifier[feature_threshold] ) keyword[if] identifier[method] == literal[string] : identifier[reference] = identifier[Clusterable] ( identifier[dataset] , identifier[coactivation_mask] , identifier[min_voxels] = identifier[min_voxels_per_study] , identifier[min_studies] = identifier[min_studies_per_voxel] , identifier[features] = identifier[features] , identifier[feature_threshold] = identifier[feature_threshold] ) keyword[elif] identifier[method] == literal[string] : identifier[reference] = identifier[deepcopy] ( identifier[roi] ) identifier[feature_data] = identifier[dataset] . identifier[feature_table] . identifier[data] identifier[n_studies] = identifier[len] ( identifier[feature_data] ) identifier[reference] . identifier[data] = identifier[reference] . identifier[data] . identifier[dot] ( identifier[feature_data] . identifier[values] )/ identifier[n_studies] keyword[elif] identifier[method] == literal[string] : identifier[reference] = identifier[roi] keyword[if] identifier[reduce_reference] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[isinstance] ( identifier[reduce_reference] , identifier[string_types] ): identifier[n_feat] = identifier[reference] . identifier[data] . identifier[shape] [ literal[int] ] identifier[n_components] = identifier[min] ( identifier[n_components] , identifier[n_feat] ) identifier[reduce_reference] ={ literal[string] : identifier[sk_decomp] . identifier[PCA] , literal[string] : identifier[sk_decomp] . identifier[FastICA] }[ identifier[reduce_reference] ]( identifier[n_components] ) identifier[transpose] =( identifier[method] == literal[string] ) identifier[reference] = identifier[reference] . identifier[transform] ( identifier[reduce_reference] , identifier[transpose] = identifier[transpose] ) keyword[if] identifier[method] == literal[string] : identifier[distances] = identifier[pairwise_distances] ( identifier[roi] . identifier[data] , identifier[reference] . identifier[data] , identifier[metric] = identifier[distance_metric] ) keyword[else] : identifier[distances] = identifier[reference] . identifier[data] keyword[if] identifier[isinstance] ( identifier[clustering_algorithm] , identifier[string_types] ): identifier[clustering_algorithm] ={ literal[string] : identifier[sk_cluster] . identifier[KMeans] , literal[string] : identifier[sk_cluster] . identifier[MiniBatchKMeans] }[ identifier[clustering_algorithm] ]( identifier[n_clusters] ,** identifier[clustering_kwargs] ) identifier[labels] = identifier[clustering_algorithm] . identifier[fit_predict] ( identifier[distances] )+ literal[int] identifier[header] = identifier[roi] . identifier[masker] . identifier[get_header] () identifier[header] [ literal[string] ]= identifier[labels] . identifier[max] () identifier[header] [ literal[string] ]= identifier[labels] . identifier[min] () identifier[voxel_labels] = identifier[roi] . identifier[masker] . identifier[unmask] ( identifier[labels] ) identifier[img] = identifier[nifti1] . identifier[Nifti1Image] ( identifier[voxel_labels] , keyword[None] , identifier[header] ) keyword[if] identifier[output_dir] keyword[is] keyword[not] keyword[None] : keyword[if] keyword[not] identifier[exists] ( identifier[output_dir] ): identifier[makedirs] ( identifier[output_dir] ) keyword[if] identifier[filename] keyword[is] keyword[None] : identifier[filename] = literal[string] % identifier[n_clusters] identifier[outfile] = identifier[join] ( identifier[output_dir] , identifier[filename] ) identifier[img] . identifier[to_filename] ( identifier[outfile] ) keyword[if] identifier[coactivation_images] : keyword[for] identifier[l] keyword[in] identifier[np] . identifier[unique] ( identifier[voxel_labels] ): identifier[roi_mask] = identifier[np] . identifier[copy] ( identifier[voxel_labels] ) identifier[roi_mask] [ identifier[roi_mask] != identifier[l] ]= literal[int] identifier[ids] = identifier[dataset] . identifier[get_studies] ( identifier[mask] = identifier[roi_mask] , identifier[activation_threshold] = identifier[coactivation_threshold] ) identifier[ma] = identifier[meta] . identifier[MetaAnalysis] ( identifier[dataset] , identifier[ids] ) identifier[ma] . identifier[save_results] ( identifier[output_dir] = identifier[join] ( identifier[output_dir] , literal[string] ), identifier[prefix] = literal[string] % identifier[l] ) keyword[else] : keyword[return] identifier[img]
def magic(dataset, method='coactivation', roi_mask=None, coactivation_mask=None, features=None, feature_threshold=0.05, min_voxels_per_study=None, min_studies_per_voxel=None, reduce_reference='pca', n_components=100, distance_metric='correlation', clustering_algorithm='kmeans', n_clusters=5, clustering_kwargs={}, output_dir=None, filename=None, coactivation_images=False, coactivation_threshold=0.1): """ Execute a full clustering analysis pipeline. Args: dataset: a Dataset instance to extract all data from. method (str): the overall clustering approach to use. Valid options: 'coactivation' (default): Clusters voxel within the ROI mask based on shared pattern of coactivation with the rest of the brain. 'studies': Treat each study as a feature in an n-dimensional space. I.e., voxels will be assigned to the same cluster if they tend to be co-reported in similar studies. 'features': Voxels will be assigned to the same cluster if they tend to have similar feature vectors (i.e., the studies that activate those voxels tend to use similar terms). roi_mask: A string, nibabel image, or numpy array providing an inclusion mask of voxels to cluster. If None, the default mask in the Dataset instance is used (typically, all in-brain voxels). coactivation_mask: If method='coactivation', this mask defines the voxels to use when generating the pairwise distance matrix. For example, if a PFC mask is passed, all voxels in the roi_mask will be clustered based on how similar their patterns of coactivation with PFC voxels are. Can be a str, nibabel image, or numpy array. features (str or list): Optional string or list of strings specifying any feature names to use for study selection. E.g., passing ['emotion', 'reward'] would retain for analysis only those studies associated with the features emotion or reward at a frequency greater than feature_threshold. feature_threshold (float): The threshold to use when selecting studies on the basis of features. min_voxels_per_study (int): Minimum number of active voxels a study must report in order to be retained in the dataset. By default, all studies are used. min_studies_per_voxel (int): Minimum number of studies a voxel must be active in in order to be retained in analysis. By default, all voxels are used. reduce_reference (str, scikit-learn object or None): The dimensionality reduction algorithm to apply to the feature space prior to the computation of pairwise distances. If a string is passed (either 'pca' or 'ica'), n_components must be specified. If None, no dimensionality reduction will be applied. Otherwise, must be a scikit-learn-style object that exposes a transform() method. n_components (int): Number of components to extract during the dimensionality reduction step. Only used if reduce_reference is a string. distance_metric (str): The distance metric to use when computing pairwise distances on the to-be-clustered voxels. Can be any of the metrics supported by sklearn.metrics.pairwise_distances. clustering_algorithm (str or scikit-learn object): the clustering algorithm to use. If a string, must be one of 'kmeans' or 'minik'. Otherwise, any sklearn class that exposes a fit_predict() method. n_clusters (int): If clustering_algorithm is a string, the number of clusters to extract. clustering_kwargs (dict): Additional keywords to pass to the clustering object. output_dir (str): The directory to write results to. If None (default), returns the cluster label image rather than saving to disk. filename (str): Name of cluster label image file. Defaults to cluster_labels_k{k}.nii.gz, where k is the number of clusters. coactivation_images (bool): If True, saves a meta-analytic coactivation map for every ROI in the resulting cluster map. coactivation_threshold (float or int): If coactivation_images is True, this is the threshold used to define whether or not a study is considered to activation within a cluster ROI. Integer values are interpreted as minimum number of voxels within the ROI; floats are interpreted as the proportion of voxels. Defaults to 0.1 (i.e., 10% of all voxels within ROI must be active). """ roi = Clusterable(dataset, roi_mask, min_voxels=min_voxels_per_study, min_studies=min_studies_per_voxel, features=features, feature_threshold=feature_threshold) if method == 'coactivation': reference = Clusterable(dataset, coactivation_mask, min_voxels=min_voxels_per_study, min_studies=min_studies_per_voxel, features=features, feature_threshold=feature_threshold) # depends on [control=['if'], data=[]] elif method == 'features': reference = deepcopy(roi) feature_data = dataset.feature_table.data n_studies = len(feature_data) reference.data = reference.data.dot(feature_data.values) / n_studies # depends on [control=['if'], data=[]] elif method == 'studies': reference = roi # depends on [control=['if'], data=[]] if reduce_reference is not None: if isinstance(reduce_reference, string_types): # Number of components can't exceed feature count or cluster count n_feat = reference.data.shape[1] n_components = min(n_components, n_feat) reduce_reference = {'pca': sk_decomp.PCA, 'ica': sk_decomp.FastICA}[reduce_reference](n_components) # depends on [control=['if'], data=[]] # For non-coactivation-based approaches, transpose the data matrix transpose = method == 'coactivation' reference = reference.transform(reduce_reference, transpose=transpose) # depends on [control=['if'], data=['reduce_reference']] if method == 'coactivation': distances = pairwise_distances(roi.data, reference.data, metric=distance_metric) # depends on [control=['if'], data=[]] else: distances = reference.data # TODO: add additional clustering methods if isinstance(clustering_algorithm, string_types): clustering_algorithm = {'kmeans': sk_cluster.KMeans, 'minik': sk_cluster.MiniBatchKMeans}[clustering_algorithm](n_clusters, **clustering_kwargs) # depends on [control=['if'], data=[]] labels = clustering_algorithm.fit_predict(distances) + 1.0 header = roi.masker.get_header() header['cal_max'] = labels.max() header['cal_min'] = labels.min() voxel_labels = roi.masker.unmask(labels) img = nifti1.Nifti1Image(voxel_labels, None, header) if output_dir is not None: if not exists(output_dir): makedirs(output_dir) # depends on [control=['if'], data=[]] if filename is None: filename = 'cluster_labels_k%d.nii.gz' % n_clusters # depends on [control=['if'], data=['filename']] outfile = join(output_dir, filename) img.to_filename(outfile) # Write coactivation images if coactivation_images: for l in np.unique(voxel_labels): roi_mask = np.copy(voxel_labels) roi_mask[roi_mask != l] = 0 ids = dataset.get_studies(mask=roi_mask, activation_threshold=coactivation_threshold) ma = meta.MetaAnalysis(dataset, ids) ma.save_results(output_dir=join(output_dir, 'coactivation'), prefix='cluster_%d_coactivation' % l) # depends on [control=['for'], data=['l']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['output_dir']] else: return img
def save_profile_id(self, profile: Profile): """ Store ID of profile locally. .. versionadded:: 4.0.6 """ os.makedirs(self.dirname_pattern.format(profile=profile.username, target=profile.username), exist_ok=True) with open(self._get_id_filename(profile.username), 'w') as text_file: text_file.write(str(profile.userid) + "\n") self.context.log("Stored ID {0} for profile {1}.".format(profile.userid, profile.username))
def function[save_profile_id, parameter[self, profile]]: constant[ Store ID of profile locally. .. versionadded:: 4.0.6 ] call[name[os].makedirs, parameter[call[name[self].dirname_pattern.format, parameter[]]]] with call[name[open], parameter[call[name[self]._get_id_filename, parameter[name[profile].username]], constant[w]]] begin[:] call[name[text_file].write, parameter[binary_operation[call[name[str], parameter[name[profile].userid]] + constant[ ]]]] call[name[self].context.log, parameter[call[constant[Stored ID {0} for profile {1}.].format, parameter[name[profile].userid, name[profile].username]]]]
keyword[def] identifier[save_profile_id] ( identifier[self] , identifier[profile] : identifier[Profile] ): literal[string] identifier[os] . identifier[makedirs] ( identifier[self] . identifier[dirname_pattern] . identifier[format] ( identifier[profile] = identifier[profile] . identifier[username] , identifier[target] = identifier[profile] . identifier[username] ), identifier[exist_ok] = keyword[True] ) keyword[with] identifier[open] ( identifier[self] . identifier[_get_id_filename] ( identifier[profile] . identifier[username] ), literal[string] ) keyword[as] identifier[text_file] : identifier[text_file] . identifier[write] ( identifier[str] ( identifier[profile] . identifier[userid] )+ literal[string] ) identifier[self] . identifier[context] . identifier[log] ( literal[string] . identifier[format] ( identifier[profile] . identifier[userid] , identifier[profile] . identifier[username] ))
def save_profile_id(self, profile: Profile): """ Store ID of profile locally. .. versionadded:: 4.0.6 """ os.makedirs(self.dirname_pattern.format(profile=profile.username, target=profile.username), exist_ok=True) with open(self._get_id_filename(profile.username), 'w') as text_file: text_file.write(str(profile.userid) + '\n') self.context.log('Stored ID {0} for profile {1}.'.format(profile.userid, profile.username)) # depends on [control=['with'], data=['text_file']]
def create(self, request): """ Read the GeoJSON feature collection from the request body and create new objects in the database. """ if self.readonly: return HTTPMethodNotAllowed(headers={'Allow': 'GET, HEAD'}) collection = loads(request.body, object_hook=GeoJSON.to_instance) if not isinstance(collection, FeatureCollection): return HTTPBadRequest() session = self.Session() objects = [] for feature in collection.features: create = False obj = None if hasattr(feature, 'id') and feature.id is not None: obj = session.query(self.mapped_class).get(feature.id) if self.before_create is not None: self.before_create(request, feature, obj) if obj is None: obj = self.mapped_class(feature) create = True else: obj.__update__(feature) if create: session.add(obj) objects.append(obj) session.flush() collection = FeatureCollection(objects) if len(objects) > 0 else None request.response.status_int = 201 return collection
def function[create, parameter[self, request]]: constant[ Read the GeoJSON feature collection from the request body and create new objects in the database. ] if name[self].readonly begin[:] return[call[name[HTTPMethodNotAllowed], parameter[]]] variable[collection] assign[=] call[name[loads], parameter[name[request].body]] if <ast.UnaryOp object at 0x7da18dc04580> begin[:] return[call[name[HTTPBadRequest], parameter[]]] variable[session] assign[=] call[name[self].Session, parameter[]] variable[objects] assign[=] list[[]] for taget[name[feature]] in starred[name[collection].features] begin[:] variable[create] assign[=] constant[False] variable[obj] assign[=] constant[None] if <ast.BoolOp object at 0x7da18dc07eb0> begin[:] variable[obj] assign[=] call[call[name[session].query, parameter[name[self].mapped_class]].get, parameter[name[feature].id]] if compare[name[self].before_create is_not constant[None]] begin[:] call[name[self].before_create, parameter[name[request], name[feature], name[obj]]] if compare[name[obj] is constant[None]] begin[:] variable[obj] assign[=] call[name[self].mapped_class, parameter[name[feature]]] variable[create] assign[=] constant[True] if name[create] begin[:] call[name[session].add, parameter[name[obj]]] call[name[objects].append, parameter[name[obj]]] call[name[session].flush, parameter[]] variable[collection] assign[=] <ast.IfExp object at 0x7da207f98e20> name[request].response.status_int assign[=] constant[201] return[name[collection]]
keyword[def] identifier[create] ( identifier[self] , identifier[request] ): literal[string] keyword[if] identifier[self] . identifier[readonly] : keyword[return] identifier[HTTPMethodNotAllowed] ( identifier[headers] ={ literal[string] : literal[string] }) identifier[collection] = identifier[loads] ( identifier[request] . identifier[body] , identifier[object_hook] = identifier[GeoJSON] . identifier[to_instance] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[collection] , identifier[FeatureCollection] ): keyword[return] identifier[HTTPBadRequest] () identifier[session] = identifier[self] . identifier[Session] () identifier[objects] =[] keyword[for] identifier[feature] keyword[in] identifier[collection] . identifier[features] : identifier[create] = keyword[False] identifier[obj] = keyword[None] keyword[if] identifier[hasattr] ( identifier[feature] , literal[string] ) keyword[and] identifier[feature] . identifier[id] keyword[is] keyword[not] keyword[None] : identifier[obj] = identifier[session] . identifier[query] ( identifier[self] . identifier[mapped_class] ). identifier[get] ( identifier[feature] . identifier[id] ) keyword[if] identifier[self] . identifier[before_create] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[before_create] ( identifier[request] , identifier[feature] , identifier[obj] ) keyword[if] identifier[obj] keyword[is] keyword[None] : identifier[obj] = identifier[self] . identifier[mapped_class] ( identifier[feature] ) identifier[create] = keyword[True] keyword[else] : identifier[obj] . identifier[__update__] ( identifier[feature] ) keyword[if] identifier[create] : identifier[session] . identifier[add] ( identifier[obj] ) identifier[objects] . identifier[append] ( identifier[obj] ) identifier[session] . identifier[flush] () identifier[collection] = identifier[FeatureCollection] ( identifier[objects] ) keyword[if] identifier[len] ( identifier[objects] )> literal[int] keyword[else] keyword[None] identifier[request] . identifier[response] . identifier[status_int] = literal[int] keyword[return] identifier[collection]
def create(self, request): """ Read the GeoJSON feature collection from the request body and create new objects in the database. """ if self.readonly: return HTTPMethodNotAllowed(headers={'Allow': 'GET, HEAD'}) # depends on [control=['if'], data=[]] collection = loads(request.body, object_hook=GeoJSON.to_instance) if not isinstance(collection, FeatureCollection): return HTTPBadRequest() # depends on [control=['if'], data=[]] session = self.Session() objects = [] for feature in collection.features: create = False obj = None if hasattr(feature, 'id') and feature.id is not None: obj = session.query(self.mapped_class).get(feature.id) # depends on [control=['if'], data=[]] if self.before_create is not None: self.before_create(request, feature, obj) # depends on [control=['if'], data=[]] if obj is None: obj = self.mapped_class(feature) create = True # depends on [control=['if'], data=['obj']] else: obj.__update__(feature) if create: session.add(obj) # depends on [control=['if'], data=[]] objects.append(obj) # depends on [control=['for'], data=['feature']] session.flush() collection = FeatureCollection(objects) if len(objects) > 0 else None request.response.status_int = 201 return collection
def refund_payment(payment_psp_object: Model, amount: Money, client_ref: str) -> Tuple[bool, Model]: """ :param payment_psp_object: an instance representing the original payment in the psp :param amount: the amount to refund :param client_ref: a reference that will appear on the customer's credit card statement :return: a tuple (success, refund_psp_object) """ logger.debug('refund-payment', payment_psp_object=payment_psp_object, amount=amount, client_ref=client_ref) if amount.amount <= 0: raise PreconditionError('Can only refund positive amounts') psp = psp_for_model_instance(payment_psp_object) return psp.refund_payment(payment_psp_object, amount, client_ref)
def function[refund_payment, parameter[payment_psp_object, amount, client_ref]]: constant[ :param payment_psp_object: an instance representing the original payment in the psp :param amount: the amount to refund :param client_ref: a reference that will appear on the customer's credit card statement :return: a tuple (success, refund_psp_object) ] call[name[logger].debug, parameter[constant[refund-payment]]] if compare[name[amount].amount less_or_equal[<=] constant[0]] begin[:] <ast.Raise object at 0x7da2043447c0> variable[psp] assign[=] call[name[psp_for_model_instance], parameter[name[payment_psp_object]]] return[call[name[psp].refund_payment, parameter[name[payment_psp_object], name[amount], name[client_ref]]]]
keyword[def] identifier[refund_payment] ( identifier[payment_psp_object] : identifier[Model] , identifier[amount] : identifier[Money] , identifier[client_ref] : identifier[str] )-> identifier[Tuple] [ identifier[bool] , identifier[Model] ]: literal[string] identifier[logger] . identifier[debug] ( literal[string] , identifier[payment_psp_object] = identifier[payment_psp_object] , identifier[amount] = identifier[amount] , identifier[client_ref] = identifier[client_ref] ) keyword[if] identifier[amount] . identifier[amount] <= literal[int] : keyword[raise] identifier[PreconditionError] ( literal[string] ) identifier[psp] = identifier[psp_for_model_instance] ( identifier[payment_psp_object] ) keyword[return] identifier[psp] . identifier[refund_payment] ( identifier[payment_psp_object] , identifier[amount] , identifier[client_ref] )
def refund_payment(payment_psp_object: Model, amount: Money, client_ref: str) -> Tuple[bool, Model]: """ :param payment_psp_object: an instance representing the original payment in the psp :param amount: the amount to refund :param client_ref: a reference that will appear on the customer's credit card statement :return: a tuple (success, refund_psp_object) """ logger.debug('refund-payment', payment_psp_object=payment_psp_object, amount=amount, client_ref=client_ref) if amount.amount <= 0: raise PreconditionError('Can only refund positive amounts') # depends on [control=['if'], data=[]] psp = psp_for_model_instance(payment_psp_object) return psp.refund_payment(payment_psp_object, amount, client_ref)
def to_dict(self): """ Response as dict :return: response :rtype: dict """ cache_info = None if self.cache_info: cache_info = self.cache_info.to_dict() return { 'cache_info': cache_info, 'html': self.html, 'scraped': self.scraped, 'raw': self.raw }
def function[to_dict, parameter[self]]: constant[ Response as dict :return: response :rtype: dict ] variable[cache_info] assign[=] constant[None] if name[self].cache_info begin[:] variable[cache_info] assign[=] call[name[self].cache_info.to_dict, parameter[]] return[dictionary[[<ast.Constant object at 0x7da18fe924a0>, <ast.Constant object at 0x7da18fe905b0>, <ast.Constant object at 0x7da18fe93550>, <ast.Constant object at 0x7da18fe92590>], [<ast.Name object at 0x7da18fe92b30>, <ast.Attribute object at 0x7da18fe90d30>, <ast.Attribute object at 0x7da18fe92470>, <ast.Attribute object at 0x7da18fe92d70>]]]
keyword[def] identifier[to_dict] ( identifier[self] ): literal[string] identifier[cache_info] = keyword[None] keyword[if] identifier[self] . identifier[cache_info] : identifier[cache_info] = identifier[self] . identifier[cache_info] . identifier[to_dict] () keyword[return] { literal[string] : identifier[cache_info] , literal[string] : identifier[self] . identifier[html] , literal[string] : identifier[self] . identifier[scraped] , literal[string] : identifier[self] . identifier[raw] }
def to_dict(self): """ Response as dict :return: response :rtype: dict """ cache_info = None if self.cache_info: cache_info = self.cache_info.to_dict() # depends on [control=['if'], data=[]] return {'cache_info': cache_info, 'html': self.html, 'scraped': self.scraped, 'raw': self.raw}
async def submit_request(pool_handle: int, request_json: str) -> str: """ Publishes request message to validator pool (no signing, unlike sign_and_submit_request). The request is sent to the validator pool as is. It's assumed that it's already prepared. :param pool_handle: pool handle (created by open_pool_ledger). :param request_json: Request data json. :return: Request result as json. """ logger = logging.getLogger(__name__) logger.debug("submit_request: >>> pool_handle: %r, request_json: %r", pool_handle, request_json) if not hasattr(submit_request, "cb"): logger.debug("submit_request: Creating callback") submit_request.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p)) c_pool_handle = c_int32(pool_handle) c_request_json = c_char_p(request_json.encode('utf-8')) request_result = await do_call('indy_submit_request', c_pool_handle, c_request_json, submit_request.cb) res = request_result.decode() logger.debug("submit_request: <<< res: %r", res) return res
<ast.AsyncFunctionDef object at 0x7da1b26af2b0>
keyword[async] keyword[def] identifier[submit_request] ( identifier[pool_handle] : identifier[int] , identifier[request_json] : identifier[str] )-> identifier[str] : literal[string] identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] ) identifier[logger] . identifier[debug] ( literal[string] , identifier[pool_handle] , identifier[request_json] ) keyword[if] keyword[not] identifier[hasattr] ( identifier[submit_request] , literal[string] ): identifier[logger] . identifier[debug] ( literal[string] ) identifier[submit_request] . identifier[cb] = identifier[create_cb] ( identifier[CFUNCTYPE] ( keyword[None] , identifier[c_int32] , identifier[c_int32] , identifier[c_char_p] )) identifier[c_pool_handle] = identifier[c_int32] ( identifier[pool_handle] ) identifier[c_request_json] = identifier[c_char_p] ( identifier[request_json] . identifier[encode] ( literal[string] )) identifier[request_result] = keyword[await] identifier[do_call] ( literal[string] , identifier[c_pool_handle] , identifier[c_request_json] , identifier[submit_request] . identifier[cb] ) identifier[res] = identifier[request_result] . identifier[decode] () identifier[logger] . identifier[debug] ( literal[string] , identifier[res] ) keyword[return] identifier[res]
async def submit_request(pool_handle: int, request_json: str) -> str: """ Publishes request message to validator pool (no signing, unlike sign_and_submit_request). The request is sent to the validator pool as is. It's assumed that it's already prepared. :param pool_handle: pool handle (created by open_pool_ledger). :param request_json: Request data json. :return: Request result as json. """ logger = logging.getLogger(__name__) logger.debug('submit_request: >>> pool_handle: %r, request_json: %r', pool_handle, request_json) if not hasattr(submit_request, 'cb'): logger.debug('submit_request: Creating callback') submit_request.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p)) # depends on [control=['if'], data=[]] c_pool_handle = c_int32(pool_handle) c_request_json = c_char_p(request_json.encode('utf-8')) request_result = await do_call('indy_submit_request', c_pool_handle, c_request_json, submit_request.cb) res = request_result.decode() logger.debug('submit_request: <<< res: %r', res) return res
def _push_textbuffer(self): """Push the textbuffer onto the stack as a Text node and clear it.""" if self._textbuffer: self._stack.append(tokens.Text(text="".join(self._textbuffer))) self._textbuffer = []
def function[_push_textbuffer, parameter[self]]: constant[Push the textbuffer onto the stack as a Text node and clear it.] if name[self]._textbuffer begin[:] call[name[self]._stack.append, parameter[call[name[tokens].Text, parameter[]]]] name[self]._textbuffer assign[=] list[[]]
keyword[def] identifier[_push_textbuffer] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_textbuffer] : identifier[self] . identifier[_stack] . identifier[append] ( identifier[tokens] . identifier[Text] ( identifier[text] = literal[string] . identifier[join] ( identifier[self] . identifier[_textbuffer] ))) identifier[self] . identifier[_textbuffer] =[]
def _push_textbuffer(self): """Push the textbuffer onto the stack as a Text node and clear it.""" if self._textbuffer: self._stack.append(tokens.Text(text=''.join(self._textbuffer))) self._textbuffer = [] # depends on [control=['if'], data=[]]
def from_directory(cls, directory, type, **kwargs): """ Creates an VSGSuite instance from a filename. :param str filename: The fully qualified path to the VSG configuration file. :param str type: The configuration type to generate. :param kwargs: List of additional keyworded arguments to be passed into the VSGSuite. """ # Resolve the suite class from the type suite_class = entrypoint('vsgen.suites', type) # Merge the default and any additional, maybe override, params. params = { 'root': os.path.abspath(directory), 'name': os.path.basename(os.path.abspath(directory)) } params.update({k: v for k, v in kwargs.items() if v is not None}) return suite_class(**params)
def function[from_directory, parameter[cls, directory, type]]: constant[ Creates an VSGSuite instance from a filename. :param str filename: The fully qualified path to the VSG configuration file. :param str type: The configuration type to generate. :param kwargs: List of additional keyworded arguments to be passed into the VSGSuite. ] variable[suite_class] assign[=] call[name[entrypoint], parameter[constant[vsgen.suites], name[type]]] variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b24509a0>, <ast.Constant object at 0x7da1b2450820>], [<ast.Call object at 0x7da1b2450a60>, <ast.Call object at 0x7da1b24505e0>]] call[name[params].update, parameter[<ast.DictComp object at 0x7da1b2451630>]] return[call[name[suite_class], parameter[]]]
keyword[def] identifier[from_directory] ( identifier[cls] , identifier[directory] , identifier[type] ,** identifier[kwargs] ): literal[string] identifier[suite_class] = identifier[entrypoint] ( literal[string] , identifier[type] ) identifier[params] ={ literal[string] : identifier[os] . identifier[path] . identifier[abspath] ( identifier[directory] ), literal[string] : identifier[os] . identifier[path] . identifier[basename] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[directory] )) } identifier[params] . identifier[update] ({ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[kwargs] . identifier[items] () keyword[if] identifier[v] keyword[is] keyword[not] keyword[None] }) keyword[return] identifier[suite_class] (** identifier[params] )
def from_directory(cls, directory, type, **kwargs): """ Creates an VSGSuite instance from a filename. :param str filename: The fully qualified path to the VSG configuration file. :param str type: The configuration type to generate. :param kwargs: List of additional keyworded arguments to be passed into the VSGSuite. """ # Resolve the suite class from the type suite_class = entrypoint('vsgen.suites', type) # Merge the default and any additional, maybe override, params. params = {'root': os.path.abspath(directory), 'name': os.path.basename(os.path.abspath(directory))} params.update({k: v for (k, v) in kwargs.items() if v is not None}) return suite_class(**params)
def _do_search(self): """ Perform the mlt call, then convert that raw format into a SearchResults instance and return it. """ if self._results_cache is None: response = self.raw() results = self.to_python(response.get('hits', {}).get('hits', [])) self._results_cache = DictSearchResults( self.type, response, results, None) return self._results_cache
def function[_do_search, parameter[self]]: constant[ Perform the mlt call, then convert that raw format into a SearchResults instance and return it. ] if compare[name[self]._results_cache is constant[None]] begin[:] variable[response] assign[=] call[name[self].raw, parameter[]] variable[results] assign[=] call[name[self].to_python, parameter[call[call[name[response].get, parameter[constant[hits], dictionary[[], []]]].get, parameter[constant[hits], list[[]]]]]] name[self]._results_cache assign[=] call[name[DictSearchResults], parameter[name[self].type, name[response], name[results], constant[None]]] return[name[self]._results_cache]
keyword[def] identifier[_do_search] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_results_cache] keyword[is] keyword[None] : identifier[response] = identifier[self] . identifier[raw] () identifier[results] = identifier[self] . identifier[to_python] ( identifier[response] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] ,[])) identifier[self] . identifier[_results_cache] = identifier[DictSearchResults] ( identifier[self] . identifier[type] , identifier[response] , identifier[results] , keyword[None] ) keyword[return] identifier[self] . identifier[_results_cache]
def _do_search(self): """ Perform the mlt call, then convert that raw format into a SearchResults instance and return it. """ if self._results_cache is None: response = self.raw() results = self.to_python(response.get('hits', {}).get('hits', [])) self._results_cache = DictSearchResults(self.type, response, results, None) # depends on [control=['if'], data=[]] return self._results_cache
def tag(words: List[str], corpus: str) -> List[Tuple[str, str]]: """ รับค่าเป็น ''list'' คืนค่าเป็น ''list'' เช่น [('คำ', 'ชนิดคำ'), ('คำ', 'ชนิดคำ'), ...] """ if not words: return [] if corpus == "orchid": tagger = nltk.tag.UnigramTagger(model=_orchid_tagger()) i = 0 while i < len(words): if words[i] == " ": words[i] = "<space>" elif words[i] == "+": words[i] = "<plus>" elif words[i] == "-": words[i] = "<minus>" elif words[i] == "=": words[i] = "<equal>" elif words[i] == ",": words[i] = "<comma>" elif words[i] == "$": words[i] = "<dollar>" elif words[i] == ".": words[i] = "<full_stop>" elif words[i] == "(": words[i] = "<left_parenthesis>" elif words[i] == ")": words[i] = "<right_parenthesis>" elif words[i] == '"': words[i] = "<quotation>" elif words[i] == "@": words[i] = "<at_mark>" elif words[i] == "&": words[i] = "<ampersand>" elif words[i] == "{": words[i] = "<left_curly_bracket>" elif words[i] == "^": words[i] = "<circumflex_accent>" elif words[i] == "?": words[i] = "<question_mark>" elif words[i] == "<": words[i] = "<less_than>" elif words[i] == ">": words[i] = "<greater_than>" elif words[i] == "=": words[i] = "<equal>" elif words[i] == "!": words[i] = "<exclamation>" elif words[i] == "’": words[i] = "<apostrophe>" elif words[i] == ":": words[i] = "<colon>" elif words[i] == "*": words[i] = "<asterisk>" elif words[i] == ";": words[i] = "<semi_colon>" elif words[i] == "/": words[i] = "<slash>" i += 1 t = tagger.tag(words) temp = [] i = 0 while i < len(t): word = t[i][0] tag = t[i][1] if word == "<space>": word = " " elif word == "<plus>": word = "+" elif word == "<minus>": word = "-" elif word == "<equal>": word = "=" elif word == "<comma>": word = "," elif word == "<dollar>": word = "$" elif word == "<full_stop>": word = "." elif word == "<left_parenthesis>": word = "(" elif word == "<right_parenthesis>": word = ")" elif word == "<quotation>": word = '"' elif word == "<at_mark>": word = "@" elif word == "<ampersand>": word = "&" elif word == "<left_curly_bracket>": word = "{" elif word == "<circumflex_accent>": word = "^" elif word == "<question_mark>": word = "?" elif word == "<less_than>": word = "<" elif word == "<greater_than>": word = ">" elif word == "<equal>": word = "=" elif word == "<exclamation>": word = "!" elif word == "<apostrophe>": word = "’" elif word == "<colon>": word = ":" elif word == "<asterisk>": word = "*" elif word == "<semi_colon>": word = ";" elif word == "<slash>": word = "/" temp.append((word, tag)) i += 1 t = temp else: tagger = _pud_tagger() t = tagger.tag(words) return t
def function[tag, parameter[words, corpus]]: constant[ รับค่าเป็น ''list'' คืนค่าเป็น ''list'' เช่น [('คำ', 'ชนิดคำ'), ('คำ', 'ชนิดคำ'), ...] ] if <ast.UnaryOp object at 0x7da1b1769360> begin[:] return[list[[]]] if compare[name[corpus] equal[==] constant[orchid]] begin[:] variable[tagger] assign[=] call[name[nltk].tag.UnigramTagger, parameter[]] variable[i] assign[=] constant[0] while compare[name[i] less[<] call[name[len], parameter[name[words]]]] begin[:] if compare[call[name[words]][name[i]] equal[==] constant[ ]] begin[:] call[name[words]][name[i]] assign[=] constant[<space>] <ast.AugAssign object at 0x7da1b18a1de0> variable[t] assign[=] call[name[tagger].tag, parameter[name[words]]] variable[temp] assign[=] list[[]] variable[i] assign[=] constant[0] while compare[name[i] less[<] call[name[len], parameter[name[t]]]] begin[:] variable[word] assign[=] call[call[name[t]][name[i]]][constant[0]] variable[tag] assign[=] call[call[name[t]][name[i]]][constant[1]] if compare[name[word] equal[==] constant[<space>]] begin[:] variable[word] assign[=] constant[ ] call[name[temp].append, parameter[tuple[[<ast.Name object at 0x7da1b1986950>, <ast.Name object at 0x7da1b1986800>]]]] <ast.AugAssign object at 0x7da1b1987af0> variable[t] assign[=] name[temp] return[name[t]]
keyword[def] identifier[tag] ( identifier[words] : identifier[List] [ identifier[str] ], identifier[corpus] : identifier[str] )-> identifier[List] [ identifier[Tuple] [ identifier[str] , identifier[str] ]]: literal[string] keyword[if] keyword[not] identifier[words] : keyword[return] [] keyword[if] identifier[corpus] == literal[string] : identifier[tagger] = identifier[nltk] . identifier[tag] . identifier[UnigramTagger] ( identifier[model] = identifier[_orchid_tagger] ()) identifier[i] = literal[int] keyword[while] identifier[i] < identifier[len] ( identifier[words] ): keyword[if] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] keyword[elif] identifier[words] [ identifier[i] ]== literal[string] : identifier[words] [ identifier[i] ]= literal[string] identifier[i] += literal[int] identifier[t] = identifier[tagger] . identifier[tag] ( identifier[words] ) identifier[temp] =[] identifier[i] = literal[int] keyword[while] identifier[i] < identifier[len] ( identifier[t] ): identifier[word] = identifier[t] [ identifier[i] ][ literal[int] ] identifier[tag] = identifier[t] [ identifier[i] ][ literal[int] ] keyword[if] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] keyword[elif] identifier[word] == literal[string] : identifier[word] = literal[string] identifier[temp] . identifier[append] (( identifier[word] , identifier[tag] )) identifier[i] += literal[int] identifier[t] = identifier[temp] keyword[else] : identifier[tagger] = identifier[_pud_tagger] () identifier[t] = identifier[tagger] . identifier[tag] ( identifier[words] ) keyword[return] identifier[t]
def tag(words: List[str], corpus: str) -> List[Tuple[str, str]]: """ รับค่าเป็น ''list'' คืนค่าเป็น ''list'' เช่น [('คำ', 'ชนิดคำ'), ('คำ', 'ชนิดคำ'), ...] """ if not words: return [] # depends on [control=['if'], data=[]] if corpus == 'orchid': tagger = nltk.tag.UnigramTagger(model=_orchid_tagger()) i = 0 while i < len(words): if words[i] == ' ': words[i] = '<space>' # depends on [control=['if'], data=[]] elif words[i] == '+': words[i] = '<plus>' # depends on [control=['if'], data=[]] elif words[i] == '-': words[i] = '<minus>' # depends on [control=['if'], data=[]] elif words[i] == '=': words[i] = '<equal>' # depends on [control=['if'], data=[]] elif words[i] == ',': words[i] = '<comma>' # depends on [control=['if'], data=[]] elif words[i] == '$': words[i] = '<dollar>' # depends on [control=['if'], data=[]] elif words[i] == '.': words[i] = '<full_stop>' # depends on [control=['if'], data=[]] elif words[i] == '(': words[i] = '<left_parenthesis>' # depends on [control=['if'], data=[]] elif words[i] == ')': words[i] = '<right_parenthesis>' # depends on [control=['if'], data=[]] elif words[i] == '"': words[i] = '<quotation>' # depends on [control=['if'], data=[]] elif words[i] == '@': words[i] = '<at_mark>' # depends on [control=['if'], data=[]] elif words[i] == '&': words[i] = '<ampersand>' # depends on [control=['if'], data=[]] elif words[i] == '{': words[i] = '<left_curly_bracket>' # depends on [control=['if'], data=[]] elif words[i] == '^': words[i] = '<circumflex_accent>' # depends on [control=['if'], data=[]] elif words[i] == '?': words[i] = '<question_mark>' # depends on [control=['if'], data=[]] elif words[i] == '<': words[i] = '<less_than>' # depends on [control=['if'], data=[]] elif words[i] == '>': words[i] = '<greater_than>' # depends on [control=['if'], data=[]] elif words[i] == '=': words[i] = '<equal>' # depends on [control=['if'], data=[]] elif words[i] == '!': words[i] = '<exclamation>' # depends on [control=['if'], data=[]] elif words[i] == '’': words[i] = '<apostrophe>' # depends on [control=['if'], data=[]] elif words[i] == ':': words[i] = '<colon>' # depends on [control=['if'], data=[]] elif words[i] == '*': words[i] = '<asterisk>' # depends on [control=['if'], data=[]] elif words[i] == ';': words[i] = '<semi_colon>' # depends on [control=['if'], data=[]] elif words[i] == '/': words[i] = '<slash>' # depends on [control=['if'], data=[]] i += 1 # depends on [control=['while'], data=['i']] t = tagger.tag(words) temp = [] i = 0 while i < len(t): word = t[i][0] tag = t[i][1] if word == '<space>': word = ' ' # depends on [control=['if'], data=['word']] elif word == '<plus>': word = '+' # depends on [control=['if'], data=['word']] elif word == '<minus>': word = '-' # depends on [control=['if'], data=['word']] elif word == '<equal>': word = '=' # depends on [control=['if'], data=['word']] elif word == '<comma>': word = ',' # depends on [control=['if'], data=['word']] elif word == '<dollar>': word = '$' # depends on [control=['if'], data=['word']] elif word == '<full_stop>': word = '.' # depends on [control=['if'], data=['word']] elif word == '<left_parenthesis>': word = '(' # depends on [control=['if'], data=['word']] elif word == '<right_parenthesis>': word = ')' # depends on [control=['if'], data=['word']] elif word == '<quotation>': word = '"' # depends on [control=['if'], data=['word']] elif word == '<at_mark>': word = '@' # depends on [control=['if'], data=['word']] elif word == '<ampersand>': word = '&' # depends on [control=['if'], data=['word']] elif word == '<left_curly_bracket>': word = '{' # depends on [control=['if'], data=['word']] elif word == '<circumflex_accent>': word = '^' # depends on [control=['if'], data=['word']] elif word == '<question_mark>': word = '?' # depends on [control=['if'], data=['word']] elif word == '<less_than>': word = '<' # depends on [control=['if'], data=['word']] elif word == '<greater_than>': word = '>' # depends on [control=['if'], data=['word']] elif word == '<equal>': word = '=' # depends on [control=['if'], data=['word']] elif word == '<exclamation>': word = '!' # depends on [control=['if'], data=['word']] elif word == '<apostrophe>': word = '’' # depends on [control=['if'], data=['word']] elif word == '<colon>': word = ':' # depends on [control=['if'], data=['word']] elif word == '<asterisk>': word = '*' # depends on [control=['if'], data=['word']] elif word == '<semi_colon>': word = ';' # depends on [control=['if'], data=['word']] elif word == '<slash>': word = '/' # depends on [control=['if'], data=['word']] temp.append((word, tag)) i += 1 # depends on [control=['while'], data=['i']] t = temp # depends on [control=['if'], data=[]] else: tagger = _pud_tagger() t = tagger.tag(words) return t
def poll(self, timeout=None): """Poll a GPIO for the edge event configured with the .edge property. `timeout` can be a positive number for a timeout in seconds, 0 for a non-blocking poll, or negative or None for a blocking poll. Defaults to blocking poll. Args: timeout (int, float, None): timeout duration in seconds. Returns: bool: ``True`` if an edge event occurred, ``False`` on timeout. Raises: GPIOError: if an I/O or OS error occurs. TypeError: if `timeout` type is not None or int. """ if not isinstance(timeout, (int, float, type(None))): raise TypeError("Invalid timeout type, should be integer, float, or None.") # Setup epoll p = select.epoll() p.register(self._fd, select.EPOLLIN | select.EPOLLET | select.EPOLLPRI) # Poll twice, as first call returns with current state for _ in range(2): events = p.poll(timeout) # If GPIO edge interrupt occurred if events: # Rewind try: os.lseek(self._fd, 0, os.SEEK_SET) except OSError as e: raise GPIOError(e.errno, "Rewinding GPIO: " + e.strerror) return True return False
def function[poll, parameter[self, timeout]]: constant[Poll a GPIO for the edge event configured with the .edge property. `timeout` can be a positive number for a timeout in seconds, 0 for a non-blocking poll, or negative or None for a blocking poll. Defaults to blocking poll. Args: timeout (int, float, None): timeout duration in seconds. Returns: bool: ``True`` if an edge event occurred, ``False`` on timeout. Raises: GPIOError: if an I/O or OS error occurs. TypeError: if `timeout` type is not None or int. ] if <ast.UnaryOp object at 0x7da20c795270> begin[:] <ast.Raise object at 0x7da18dc9a080> variable[p] assign[=] call[name[select].epoll, parameter[]] call[name[p].register, parameter[name[self]._fd, binary_operation[binary_operation[name[select].EPOLLIN <ast.BitOr object at 0x7da2590d6aa0> name[select].EPOLLET] <ast.BitOr object at 0x7da2590d6aa0> name[select].EPOLLPRI]]] for taget[name[_]] in starred[call[name[range], parameter[constant[2]]]] begin[:] variable[events] assign[=] call[name[p].poll, parameter[name[timeout]]] if name[events] begin[:] <ast.Try object at 0x7da18dc99db0> return[constant[True]] return[constant[False]]
keyword[def] identifier[poll] ( identifier[self] , identifier[timeout] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[timeout] ,( identifier[int] , identifier[float] , identifier[type] ( keyword[None] ))): keyword[raise] identifier[TypeError] ( literal[string] ) identifier[p] = identifier[select] . identifier[epoll] () identifier[p] . identifier[register] ( identifier[self] . identifier[_fd] , identifier[select] . identifier[EPOLLIN] | identifier[select] . identifier[EPOLLET] | identifier[select] . identifier[EPOLLPRI] ) keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] ): identifier[events] = identifier[p] . identifier[poll] ( identifier[timeout] ) keyword[if] identifier[events] : keyword[try] : identifier[os] . identifier[lseek] ( identifier[self] . identifier[_fd] , literal[int] , identifier[os] . identifier[SEEK_SET] ) keyword[except] identifier[OSError] keyword[as] identifier[e] : keyword[raise] identifier[GPIOError] ( identifier[e] . identifier[errno] , literal[string] + identifier[e] . identifier[strerror] ) keyword[return] keyword[True] keyword[return] keyword[False]
def poll(self, timeout=None): """Poll a GPIO for the edge event configured with the .edge property. `timeout` can be a positive number for a timeout in seconds, 0 for a non-blocking poll, or negative or None for a blocking poll. Defaults to blocking poll. Args: timeout (int, float, None): timeout duration in seconds. Returns: bool: ``True`` if an edge event occurred, ``False`` on timeout. Raises: GPIOError: if an I/O or OS error occurs. TypeError: if `timeout` type is not None or int. """ if not isinstance(timeout, (int, float, type(None))): raise TypeError('Invalid timeout type, should be integer, float, or None.') # depends on [control=['if'], data=[]] # Setup epoll p = select.epoll() p.register(self._fd, select.EPOLLIN | select.EPOLLET | select.EPOLLPRI) # Poll twice, as first call returns with current state for _ in range(2): events = p.poll(timeout) # depends on [control=['for'], data=[]] # If GPIO edge interrupt occurred if events: # Rewind try: os.lseek(self._fd, 0, os.SEEK_SET) # depends on [control=['try'], data=[]] except OSError as e: raise GPIOError(e.errno, 'Rewinding GPIO: ' + e.strerror) # depends on [control=['except'], data=['e']] return True # depends on [control=['if'], data=[]] return False
def add_event(self, func: Callable, event: str) -> None: """ Adds a function to a event. :param func: The function to call when event is emitted :type func: Callable :param event: Name of the event. :type event: str """ self._events[event].add(func)
def function[add_event, parameter[self, func, event]]: constant[ Adds a function to a event. :param func: The function to call when event is emitted :type func: Callable :param event: Name of the event. :type event: str ] call[call[name[self]._events][name[event]].add, parameter[name[func]]]
keyword[def] identifier[add_event] ( identifier[self] , identifier[func] : identifier[Callable] , identifier[event] : identifier[str] )-> keyword[None] : literal[string] identifier[self] . identifier[_events] [ identifier[event] ]. identifier[add] ( identifier[func] )
def add_event(self, func: Callable, event: str) -> None: """ Adds a function to a event. :param func: The function to call when event is emitted :type func: Callable :param event: Name of the event. :type event: str """ self._events[event].add(func)
def _is_authenticated(self): """Checks if credentials allow for authenticated carto access""" if not self.auth_api_client.is_valid_api_key(): raise CartoException( 'Cannot authenticate user `{}`. Check credentials.'.format( self.creds.username()))
def function[_is_authenticated, parameter[self]]: constant[Checks if credentials allow for authenticated carto access] if <ast.UnaryOp object at 0x7da2044c0be0> begin[:] <ast.Raise object at 0x7da2044c0250>
keyword[def] identifier[_is_authenticated] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[auth_api_client] . identifier[is_valid_api_key] (): keyword[raise] identifier[CartoException] ( literal[string] . identifier[format] ( identifier[self] . identifier[creds] . identifier[username] ()))
def _is_authenticated(self): """Checks if credentials allow for authenticated carto access""" if not self.auth_api_client.is_valid_api_key(): raise CartoException('Cannot authenticate user `{}`. Check credentials.'.format(self.creds.username())) # depends on [control=['if'], data=[]]
def copy(self): """ Creates a copy of model """ return self.__class__(field_type=self.get_field_type(), data=self.export_data())
def function[copy, parameter[self]]: constant[ Creates a copy of model ] return[call[name[self].__class__, parameter[]]]
keyword[def] identifier[copy] ( identifier[self] ): literal[string] keyword[return] identifier[self] . identifier[__class__] ( identifier[field_type] = identifier[self] . identifier[get_field_type] (), identifier[data] = identifier[self] . identifier[export_data] ())
def copy(self): """ Creates a copy of model """ return self.__class__(field_type=self.get_field_type(), data=self.export_data())
def as_srec(self, number_of_data_bytes=32, address_length_bits=32): """Format the binary file as Motorola S-Records records and return them as a string. `number_of_data_bytes` is the number of data bytes in each record. `address_length_bits` is the number of address bits in each record. >>> print(binfile.as_srec()) S32500000100214601360121470136007EFE09D219012146017E17C20001FF5F16002148011973 S32500000120194E79234623965778239EDA3F01B2CA3F0156702B5E712B722B73214601342199 S5030002FA """ header = [] if self._header is not None: record = pack_srec('0', 0, len(self._header), self._header) header.append(record) type_ = str((address_length_bits // 8) - 1) if type_ not in '123': raise Error("expected data record type 1..3, but got {}".format( type_)) data = [pack_srec(type_, address, len(data), data) for address, data in self._segments.chunks(number_of_data_bytes)] number_of_records = len(data) if number_of_records <= 0xffff: footer = [pack_srec('5', number_of_records, 0, None)] elif number_of_records <= 0xffffff: footer = [pack_srec('6', number_of_records, 0, None)] else: raise Error('too many records {}'.format(number_of_records)) # Add the execution start address. if self.execution_start_address is not None: if type_ == '1': record = pack_srec('9', self.execution_start_address, 0, None) elif type_ == '2': record = pack_srec('8', self.execution_start_address, 0, None) else: record = pack_srec('7', self.execution_start_address, 0, None) footer.append(record) return '\n'.join(header + data + footer) + '\n'
def function[as_srec, parameter[self, number_of_data_bytes, address_length_bits]]: constant[Format the binary file as Motorola S-Records records and return them as a string. `number_of_data_bytes` is the number of data bytes in each record. `address_length_bits` is the number of address bits in each record. >>> print(binfile.as_srec()) S32500000100214601360121470136007EFE09D219012146017E17C20001FF5F16002148011973 S32500000120194E79234623965778239EDA3F01B2CA3F0156702B5E712B722B73214601342199 S5030002FA ] variable[header] assign[=] list[[]] if compare[name[self]._header is_not constant[None]] begin[:] variable[record] assign[=] call[name[pack_srec], parameter[constant[0], constant[0], call[name[len], parameter[name[self]._header]], name[self]._header]] call[name[header].append, parameter[name[record]]] variable[type_] assign[=] call[name[str], parameter[binary_operation[binary_operation[name[address_length_bits] <ast.FloorDiv object at 0x7da2590d6bc0> constant[8]] - constant[1]]]] if compare[name[type_] <ast.NotIn object at 0x7da2590d7190> constant[123]] begin[:] <ast.Raise object at 0x7da18bccbb20> variable[data] assign[=] <ast.ListComp object at 0x7da18bccace0> variable[number_of_records] assign[=] call[name[len], parameter[name[data]]] if compare[name[number_of_records] less_or_equal[<=] constant[65535]] begin[:] variable[footer] assign[=] list[[<ast.Call object at 0x7da18c4cf550>]] if compare[name[self].execution_start_address is_not constant[None]] begin[:] if compare[name[type_] equal[==] constant[1]] begin[:] variable[record] assign[=] call[name[pack_srec], parameter[constant[9], name[self].execution_start_address, constant[0], constant[None]]] call[name[footer].append, parameter[name[record]]] return[binary_operation[call[constant[ ].join, parameter[binary_operation[binary_operation[name[header] + name[data]] + name[footer]]]] + constant[ ]]]
keyword[def] identifier[as_srec] ( identifier[self] , identifier[number_of_data_bytes] = literal[int] , identifier[address_length_bits] = literal[int] ): literal[string] identifier[header] =[] keyword[if] identifier[self] . identifier[_header] keyword[is] keyword[not] keyword[None] : identifier[record] = identifier[pack_srec] ( literal[string] , literal[int] , identifier[len] ( identifier[self] . identifier[_header] ), identifier[self] . identifier[_header] ) identifier[header] . identifier[append] ( identifier[record] ) identifier[type_] = identifier[str] (( identifier[address_length_bits] // literal[int] )- literal[int] ) keyword[if] identifier[type_] keyword[not] keyword[in] literal[string] : keyword[raise] identifier[Error] ( literal[string] . identifier[format] ( identifier[type_] )) identifier[data] =[ identifier[pack_srec] ( identifier[type_] , identifier[address] , identifier[len] ( identifier[data] ), identifier[data] ) keyword[for] identifier[address] , identifier[data] keyword[in] identifier[self] . identifier[_segments] . identifier[chunks] ( identifier[number_of_data_bytes] )] identifier[number_of_records] = identifier[len] ( identifier[data] ) keyword[if] identifier[number_of_records] <= literal[int] : identifier[footer] =[ identifier[pack_srec] ( literal[string] , identifier[number_of_records] , literal[int] , keyword[None] )] keyword[elif] identifier[number_of_records] <= literal[int] : identifier[footer] =[ identifier[pack_srec] ( literal[string] , identifier[number_of_records] , literal[int] , keyword[None] )] keyword[else] : keyword[raise] identifier[Error] ( literal[string] . identifier[format] ( identifier[number_of_records] )) keyword[if] identifier[self] . identifier[execution_start_address] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[type_] == literal[string] : identifier[record] = identifier[pack_srec] ( literal[string] , identifier[self] . identifier[execution_start_address] , literal[int] , keyword[None] ) keyword[elif] identifier[type_] == literal[string] : identifier[record] = identifier[pack_srec] ( literal[string] , identifier[self] . identifier[execution_start_address] , literal[int] , keyword[None] ) keyword[else] : identifier[record] = identifier[pack_srec] ( literal[string] , identifier[self] . identifier[execution_start_address] , literal[int] , keyword[None] ) identifier[footer] . identifier[append] ( identifier[record] ) keyword[return] literal[string] . identifier[join] ( identifier[header] + identifier[data] + identifier[footer] )+ literal[string]
def as_srec(self, number_of_data_bytes=32, address_length_bits=32): """Format the binary file as Motorola S-Records records and return them as a string. `number_of_data_bytes` is the number of data bytes in each record. `address_length_bits` is the number of address bits in each record. >>> print(binfile.as_srec()) S32500000100214601360121470136007EFE09D219012146017E17C20001FF5F16002148011973 S32500000120194E79234623965778239EDA3F01B2CA3F0156702B5E712B722B73214601342199 S5030002FA """ header = [] if self._header is not None: record = pack_srec('0', 0, len(self._header), self._header) header.append(record) # depends on [control=['if'], data=[]] type_ = str(address_length_bits // 8 - 1) if type_ not in '123': raise Error('expected data record type 1..3, but got {}'.format(type_)) # depends on [control=['if'], data=['type_']] data = [pack_srec(type_, address, len(data), data) for (address, data) in self._segments.chunks(number_of_data_bytes)] number_of_records = len(data) if number_of_records <= 65535: footer = [pack_srec('5', number_of_records, 0, None)] # depends on [control=['if'], data=['number_of_records']] elif number_of_records <= 16777215: footer = [pack_srec('6', number_of_records, 0, None)] # depends on [control=['if'], data=['number_of_records']] else: raise Error('too many records {}'.format(number_of_records)) # Add the execution start address. if self.execution_start_address is not None: if type_ == '1': record = pack_srec('9', self.execution_start_address, 0, None) # depends on [control=['if'], data=[]] elif type_ == '2': record = pack_srec('8', self.execution_start_address, 0, None) # depends on [control=['if'], data=[]] else: record = pack_srec('7', self.execution_start_address, 0, None) footer.append(record) # depends on [control=['if'], data=[]] return '\n'.join(header + data + footer) + '\n'
def metadata(self, map_id, secure=False): """Returns TileJSON metadata for a tileset. Parameters ---------- map_id : str The map's unique identifier in the format username.id. secure : bool, optional The representation of the requested resources, where True indicates representation as HTTPS endpoints. The default value is False. Returns ------- request.Response The response object with TileJSON metadata for the specified tileset. """ # Create dict to assist in building URI resource path. path_values = dict( map_id=map_id ) # Build URI resource path. path_part = "/{map_id}.json" uri = URITemplate(self.base_uri + path_part).expand(**path_values) # Build URI query parameters. query_parameters = dict() if secure: query_parameters["secure"] = "" # Send HTTP GET request. response = self.session.get(uri, params=query_parameters) self.handle_http_error(response) return response
def function[metadata, parameter[self, map_id, secure]]: constant[Returns TileJSON metadata for a tileset. Parameters ---------- map_id : str The map's unique identifier in the format username.id. secure : bool, optional The representation of the requested resources, where True indicates representation as HTTPS endpoints. The default value is False. Returns ------- request.Response The response object with TileJSON metadata for the specified tileset. ] variable[path_values] assign[=] call[name[dict], parameter[]] variable[path_part] assign[=] constant[/{map_id}.json] variable[uri] assign[=] call[call[name[URITemplate], parameter[binary_operation[name[self].base_uri + name[path_part]]]].expand, parameter[]] variable[query_parameters] assign[=] call[name[dict], parameter[]] if name[secure] begin[:] call[name[query_parameters]][constant[secure]] assign[=] constant[] variable[response] assign[=] call[name[self].session.get, parameter[name[uri]]] call[name[self].handle_http_error, parameter[name[response]]] return[name[response]]
keyword[def] identifier[metadata] ( identifier[self] , identifier[map_id] , identifier[secure] = keyword[False] ): literal[string] identifier[path_values] = identifier[dict] ( identifier[map_id] = identifier[map_id] ) identifier[path_part] = literal[string] identifier[uri] = identifier[URITemplate] ( identifier[self] . identifier[base_uri] + identifier[path_part] ). identifier[expand] (** identifier[path_values] ) identifier[query_parameters] = identifier[dict] () keyword[if] identifier[secure] : identifier[query_parameters] [ literal[string] ]= literal[string] identifier[response] = identifier[self] . identifier[session] . identifier[get] ( identifier[uri] , identifier[params] = identifier[query_parameters] ) identifier[self] . identifier[handle_http_error] ( identifier[response] ) keyword[return] identifier[response]
def metadata(self, map_id, secure=False): """Returns TileJSON metadata for a tileset. Parameters ---------- map_id : str The map's unique identifier in the format username.id. secure : bool, optional The representation of the requested resources, where True indicates representation as HTTPS endpoints. The default value is False. Returns ------- request.Response The response object with TileJSON metadata for the specified tileset. """ # Create dict to assist in building URI resource path. path_values = dict(map_id=map_id) # Build URI resource path. path_part = '/{map_id}.json' uri = URITemplate(self.base_uri + path_part).expand(**path_values) # Build URI query parameters. query_parameters = dict() if secure: query_parameters['secure'] = '' # depends on [control=['if'], data=[]] # Send HTTP GET request. response = self.session.get(uri, params=query_parameters) self.handle_http_error(response) return response
def t_ID(t): r'[a-zA-Z][a-zA-Z0-9]*[$%]?' t.type = reserved.get(t.value.lower(), 'ID') callables = { api.constants.CLASS.array: 'ARRAY_ID', } if t.type != 'ID': t.value = t.type else: entry = api.global_.SYMBOL_TABLE.get_entry(t.value) if api.global_.SYMBOL_TABLE is not None else None if entry: t.type = callables.get(entry.class_, t.type) if t.type == 'BIN': t.lexer.begin('bin') return None return t
def function[t_ID, parameter[t]]: constant[[a-zA-Z][a-zA-Z0-9]*[$%]?] name[t].type assign[=] call[name[reserved].get, parameter[call[name[t].value.lower, parameter[]], constant[ID]]] variable[callables] assign[=] dictionary[[<ast.Attribute object at 0x7da18c4ce740>], [<ast.Constant object at 0x7da18c4cce80>]] if compare[name[t].type not_equal[!=] constant[ID]] begin[:] name[t].value assign[=] name[t].type if compare[name[t].type equal[==] constant[BIN]] begin[:] call[name[t].lexer.begin, parameter[constant[bin]]] return[constant[None]] return[name[t]]
keyword[def] identifier[t_ID] ( identifier[t] ): literal[string] identifier[t] . identifier[type] = identifier[reserved] . identifier[get] ( identifier[t] . identifier[value] . identifier[lower] (), literal[string] ) identifier[callables] ={ identifier[api] . identifier[constants] . identifier[CLASS] . identifier[array] : literal[string] , } keyword[if] identifier[t] . identifier[type] != literal[string] : identifier[t] . identifier[value] = identifier[t] . identifier[type] keyword[else] : identifier[entry] = identifier[api] . identifier[global_] . identifier[SYMBOL_TABLE] . identifier[get_entry] ( identifier[t] . identifier[value] ) keyword[if] identifier[api] . identifier[global_] . identifier[SYMBOL_TABLE] keyword[is] keyword[not] keyword[None] keyword[else] keyword[None] keyword[if] identifier[entry] : identifier[t] . identifier[type] = identifier[callables] . identifier[get] ( identifier[entry] . identifier[class_] , identifier[t] . identifier[type] ) keyword[if] identifier[t] . identifier[type] == literal[string] : identifier[t] . identifier[lexer] . identifier[begin] ( literal[string] ) keyword[return] keyword[None] keyword[return] identifier[t]
def t_ID(t): """[a-zA-Z][a-zA-Z0-9]*[$%]?""" t.type = reserved.get(t.value.lower(), 'ID') callables = {api.constants.CLASS.array: 'ARRAY_ID'} if t.type != 'ID': t.value = t.type # depends on [control=['if'], data=[]] else: entry = api.global_.SYMBOL_TABLE.get_entry(t.value) if api.global_.SYMBOL_TABLE is not None else None if entry: t.type = callables.get(entry.class_, t.type) # depends on [control=['if'], data=[]] if t.type == 'BIN': t.lexer.begin('bin') return None # depends on [control=['if'], data=[]] return t
def bend(self, bends, frame_rate=None, over_sample=None): """TODO Add docstring.""" self.command.append("bend") if frame_rate is not None and isinstance(frame_rate, int): self.command.append('-f %s' % frame_rate) if over_sample is not None and isinstance(over_sample, int): self.command.append('-o %s' % over_sample) for bend in bends: self.command.append(','.join(bend)) return self
def function[bend, parameter[self, bends, frame_rate, over_sample]]: constant[TODO Add docstring.] call[name[self].command.append, parameter[constant[bend]]] if <ast.BoolOp object at 0x7da1b0bd8af0> begin[:] call[name[self].command.append, parameter[binary_operation[constant[-f %s] <ast.Mod object at 0x7da2590d6920> name[frame_rate]]]] if <ast.BoolOp object at 0x7da1b0bd86a0> begin[:] call[name[self].command.append, parameter[binary_operation[constant[-o %s] <ast.Mod object at 0x7da2590d6920> name[over_sample]]]] for taget[name[bend]] in starred[name[bends]] begin[:] call[name[self].command.append, parameter[call[constant[,].join, parameter[name[bend]]]]] return[name[self]]
keyword[def] identifier[bend] ( identifier[self] , identifier[bends] , identifier[frame_rate] = keyword[None] , identifier[over_sample] = keyword[None] ): literal[string] identifier[self] . identifier[command] . identifier[append] ( literal[string] ) keyword[if] identifier[frame_rate] keyword[is] keyword[not] keyword[None] keyword[and] identifier[isinstance] ( identifier[frame_rate] , identifier[int] ): identifier[self] . identifier[command] . identifier[append] ( literal[string] % identifier[frame_rate] ) keyword[if] identifier[over_sample] keyword[is] keyword[not] keyword[None] keyword[and] identifier[isinstance] ( identifier[over_sample] , identifier[int] ): identifier[self] . identifier[command] . identifier[append] ( literal[string] % identifier[over_sample] ) keyword[for] identifier[bend] keyword[in] identifier[bends] : identifier[self] . identifier[command] . identifier[append] ( literal[string] . identifier[join] ( identifier[bend] )) keyword[return] identifier[self]
def bend(self, bends, frame_rate=None, over_sample=None): """TODO Add docstring.""" self.command.append('bend') if frame_rate is not None and isinstance(frame_rate, int): self.command.append('-f %s' % frame_rate) # depends on [control=['if'], data=[]] if over_sample is not None and isinstance(over_sample, int): self.command.append('-o %s' % over_sample) # depends on [control=['if'], data=[]] for bend in bends: self.command.append(','.join(bend)) # depends on [control=['for'], data=['bend']] return self
def save_to_data(self, in_place=False): """Save index to data structure. :param in_place: Do not copy index value to a new list object :type in_place: bool :return: Index data structure :rtype: list """ if in_place: return [ list(self._index.items()), list(self._undefined_keys.keys()) ] return ( [(key, values[:]) for key, values in self._index.items()], list(self._undefined_keys.keys()), )
def function[save_to_data, parameter[self, in_place]]: constant[Save index to data structure. :param in_place: Do not copy index value to a new list object :type in_place: bool :return: Index data structure :rtype: list ] if name[in_place] begin[:] return[list[[<ast.Call object at 0x7da1b189dc60>, <ast.Call object at 0x7da1b189cac0>]]] return[tuple[[<ast.ListComp object at 0x7da1b189cc70>, <ast.Call object at 0x7da1b189d150>]]]
keyword[def] identifier[save_to_data] ( identifier[self] , identifier[in_place] = keyword[False] ): literal[string] keyword[if] identifier[in_place] : keyword[return] [ identifier[list] ( identifier[self] . identifier[_index] . identifier[items] ()), identifier[list] ( identifier[self] . identifier[_undefined_keys] . identifier[keys] ()) ] keyword[return] ( [( identifier[key] , identifier[values] [:]) keyword[for] identifier[key] , identifier[values] keyword[in] identifier[self] . identifier[_index] . identifier[items] ()], identifier[list] ( identifier[self] . identifier[_undefined_keys] . identifier[keys] ()), )
def save_to_data(self, in_place=False): """Save index to data structure. :param in_place: Do not copy index value to a new list object :type in_place: bool :return: Index data structure :rtype: list """ if in_place: return [list(self._index.items()), list(self._undefined_keys.keys())] # depends on [control=['if'], data=[]] return ([(key, values[:]) for (key, values) in self._index.items()], list(self._undefined_keys.keys()))
def update_gsi_provisioning( table_name, table_key, gsi_name, gsi_key, reads, writes, retry_with_only_increase=False): """ Update provisioning on a global secondary index :type table_name: str :param table_name: Name of the DynamoDB table :type table_key: str :param table_key: Table configuration option key name :type gsi_name: str :param gsi_name: Name of the GSI :type gsi_key: str :param gsi_key: GSI configuration option key name :type reads: int :param reads: Number of reads to provision :type writes: int :param writes: Number of writes to provision :type retry_with_only_increase: bool :param retry_with_only_increase: Set to True to ensure only increases """ current_reads = int(get_provisioned_gsi_read_units(table_name, gsi_name)) current_writes = int(get_provisioned_gsi_write_units(table_name, gsi_name)) # Make sure we aren't scaling down if we turned off downscaling if (not get_gsi_option(table_key, gsi_key, 'enable_reads_down_scaling') or not get_gsi_option( table_key, gsi_key, 'enable_writes_down_scaling')): if (not get_gsi_option( table_key, gsi_key, 'enable_reads_down_scaling') and current_reads > reads): reads = current_reads if (not get_gsi_option( table_key, gsi_key, 'enable_writes_down_scaling') and current_writes > writes): writes = current_writes # Return if we do not need to scale at all if reads == current_reads and writes == current_writes: logger.info( '{0} - No need to scale up reads nor writes'.format( table_name)) return if retry_with_only_increase: # Ensure that we are only doing increases if current_reads > reads: reads = current_reads if current_writes > writes: writes = current_writes # Return if we do not need to scale at all if reads == current_reads and writes == current_writes: logger.info( '{0} - GSI: {1} - No need to scale up reads nor writes'.format( table_name, gsi_name)) return logger.info( '{0} - GSI: {1} - Retrying to update provisioning, ' 'excluding any decreases. ' 'Setting new reads to {2} and new writes to {3}'.format( table_name, gsi_name, reads, writes)) # Check that we are in the right time frame m_windows = get_gsi_option(table_key, gsi_key, 'maintenance_windows') if m_windows: if not __is_gsi_maintenance_window(table_name, gsi_name, m_windows): logger.warning( '{0} - GSI: {1} - We are outside a maintenace window. ' 'Will only perform up scaling activites'.format( table_name, gsi_name)) # Ensure that we are only doing increases if current_reads > reads: reads = current_reads if current_writes > writes: writes = current_writes # Return if we do not need to scale up if reads == current_reads and writes == current_writes: logger.info( '{0} - GSI: {1} - ' 'No need to scale up reads nor writes'.format( table_name, gsi_name)) return else: logger.info( '{0} - GSI: {1} - ' 'Current time is within maintenance window'.format( table_name, gsi_name)) logger.info( '{0} - GSI: {1} - ' 'Updating provisioning to {2} reads and {3} writes'.format( table_name, gsi_name, reads, writes)) # Return if dry-run if get_global_option('dry_run'): return try: DYNAMODB_CONNECTION.update_table( table_name=table_name, global_secondary_index_updates=[ { "Update": { "IndexName": gsi_name, "ProvisionedThroughput": { "ReadCapacityUnits": reads, "WriteCapacityUnits": writes } } } ]) message = [] if current_reads > reads: message.append( '{0} - GSI: {1} - Reads: DOWN from {2} to {3}\n'.format( table_name, gsi_name, current_reads, reads)) elif current_reads < reads: message.append( '{0} - GSI: {1} - Reads: UP from {2} to {3}\n'.format( table_name, gsi_name, current_reads, reads)) if current_writes > writes: message.append( '{0} - GSI: {1} - Writes: DOWN from {2} to {3}\n'.format( table_name, gsi_name, current_writes, writes)) elif current_writes < writes: message.append( '{0} - GSI: {1} - Writes: UP from {2} to {3}\n'.format( table_name, gsi_name, current_writes, writes)) # See if we should send notifications for scale-down, scale-up or both sns_message_types = [] if current_reads > reads or current_writes > writes: sns_message_types.append('scale-down') if current_reads < reads or current_writes < writes: sns_message_types.append('scale-up') sns.publish_gsi_notification( table_key, gsi_key, ''.join(message), sns_message_types, subject='Updated provisioning for GSI {0}'.format(gsi_name)) except JSONResponseError as error: exception = error.body['__type'].split('#')[1] know_exceptions = ['LimitExceededException'] if exception in know_exceptions: logger.warning('{0} - GSI: {1} - {2}: {3}'.format( table_name, gsi_name, exception, error.body['message'])) else: logger.error( ( '{0} - GSI: {1} - Unhandled exception: {2}: {3}. ' 'Please file a bug report at ' 'https://github.com/sebdah/dynamic-dynamodb/issues' ).format( table_name, gsi_name, exception, error.body['message'])) if (not retry_with_only_increase and exception == 'LimitExceededException'): logger.info( '{0} - GSI: {1} - Will retry to update provisioning ' 'with only increases'.format(table_name, gsi_name)) update_gsi_provisioning( table_name, table_key, gsi_name, gsi_key, reads, writes, retry_with_only_increase=True)
def function[update_gsi_provisioning, parameter[table_name, table_key, gsi_name, gsi_key, reads, writes, retry_with_only_increase]]: constant[ Update provisioning on a global secondary index :type table_name: str :param table_name: Name of the DynamoDB table :type table_key: str :param table_key: Table configuration option key name :type gsi_name: str :param gsi_name: Name of the GSI :type gsi_key: str :param gsi_key: GSI configuration option key name :type reads: int :param reads: Number of reads to provision :type writes: int :param writes: Number of writes to provision :type retry_with_only_increase: bool :param retry_with_only_increase: Set to True to ensure only increases ] variable[current_reads] assign[=] call[name[int], parameter[call[name[get_provisioned_gsi_read_units], parameter[name[table_name], name[gsi_name]]]]] variable[current_writes] assign[=] call[name[int], parameter[call[name[get_provisioned_gsi_write_units], parameter[name[table_name], name[gsi_name]]]]] if <ast.BoolOp object at 0x7da20e9b2b60> begin[:] if <ast.BoolOp object at 0x7da20e9b3ca0> begin[:] variable[reads] assign[=] name[current_reads] if <ast.BoolOp object at 0x7da20e9b2ef0> begin[:] variable[writes] assign[=] name[current_writes] if <ast.BoolOp object at 0x7da20e9b0370> begin[:] call[name[logger].info, parameter[call[constant[{0} - No need to scale up reads nor writes].format, parameter[name[table_name]]]]] return[None] if name[retry_with_only_increase] begin[:] if compare[name[current_reads] greater[>] name[reads]] begin[:] variable[reads] assign[=] name[current_reads] if compare[name[current_writes] greater[>] name[writes]] begin[:] variable[writes] assign[=] name[current_writes] if <ast.BoolOp object at 0x7da20e9b1840> begin[:] call[name[logger].info, parameter[call[constant[{0} - GSI: {1} - No need to scale up reads nor writes].format, parameter[name[table_name], name[gsi_name]]]]] return[None] call[name[logger].info, parameter[call[constant[{0} - GSI: {1} - Retrying to update provisioning, excluding any decreases. Setting new reads to {2} and new writes to {3}].format, parameter[name[table_name], name[gsi_name], name[reads], name[writes]]]]] variable[m_windows] assign[=] call[name[get_gsi_option], parameter[name[table_key], name[gsi_key], constant[maintenance_windows]]] if name[m_windows] begin[:] if <ast.UnaryOp object at 0x7da20e9b1210> begin[:] call[name[logger].warning, parameter[call[constant[{0} - GSI: {1} - We are outside a maintenace window. Will only perform up scaling activites].format, parameter[name[table_name], name[gsi_name]]]]] if compare[name[current_reads] greater[>] name[reads]] begin[:] variable[reads] assign[=] name[current_reads] if compare[name[current_writes] greater[>] name[writes]] begin[:] variable[writes] assign[=] name[current_writes] if <ast.BoolOp object at 0x7da20e9b0790> begin[:] call[name[logger].info, parameter[call[constant[{0} - GSI: {1} - No need to scale up reads nor writes].format, parameter[name[table_name], name[gsi_name]]]]] return[None] call[name[logger].info, parameter[call[constant[{0} - GSI: {1} - Updating provisioning to {2} reads and {3} writes].format, parameter[name[table_name], name[gsi_name], name[reads], name[writes]]]]] if call[name[get_global_option], parameter[constant[dry_run]]] begin[:] return[None] <ast.Try object at 0x7da20e9b1990>
keyword[def] identifier[update_gsi_provisioning] ( identifier[table_name] , identifier[table_key] , identifier[gsi_name] , identifier[gsi_key] , identifier[reads] , identifier[writes] , identifier[retry_with_only_increase] = keyword[False] ): literal[string] identifier[current_reads] = identifier[int] ( identifier[get_provisioned_gsi_read_units] ( identifier[table_name] , identifier[gsi_name] )) identifier[current_writes] = identifier[int] ( identifier[get_provisioned_gsi_write_units] ( identifier[table_name] , identifier[gsi_name] )) keyword[if] ( keyword[not] identifier[get_gsi_option] ( identifier[table_key] , identifier[gsi_key] , literal[string] ) keyword[or] keyword[not] identifier[get_gsi_option] ( identifier[table_key] , identifier[gsi_key] , literal[string] )): keyword[if] ( keyword[not] identifier[get_gsi_option] ( identifier[table_key] , identifier[gsi_key] , literal[string] ) keyword[and] identifier[current_reads] > identifier[reads] ): identifier[reads] = identifier[current_reads] keyword[if] ( keyword[not] identifier[get_gsi_option] ( identifier[table_key] , identifier[gsi_key] , literal[string] ) keyword[and] identifier[current_writes] > identifier[writes] ): identifier[writes] = identifier[current_writes] keyword[if] identifier[reads] == identifier[current_reads] keyword[and] identifier[writes] == identifier[current_writes] : identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[table_name] )) keyword[return] keyword[if] identifier[retry_with_only_increase] : keyword[if] identifier[current_reads] > identifier[reads] : identifier[reads] = identifier[current_reads] keyword[if] identifier[current_writes] > identifier[writes] : identifier[writes] = identifier[current_writes] keyword[if] identifier[reads] == identifier[current_reads] keyword[and] identifier[writes] == identifier[current_writes] : identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[table_name] , identifier[gsi_name] )) keyword[return] identifier[logger] . identifier[info] ( literal[string] literal[string] literal[string] . identifier[format] ( identifier[table_name] , identifier[gsi_name] , identifier[reads] , identifier[writes] )) identifier[m_windows] = identifier[get_gsi_option] ( identifier[table_key] , identifier[gsi_key] , literal[string] ) keyword[if] identifier[m_windows] : keyword[if] keyword[not] identifier[__is_gsi_maintenance_window] ( identifier[table_name] , identifier[gsi_name] , identifier[m_windows] ): identifier[logger] . identifier[warning] ( literal[string] literal[string] . identifier[format] ( identifier[table_name] , identifier[gsi_name] )) keyword[if] identifier[current_reads] > identifier[reads] : identifier[reads] = identifier[current_reads] keyword[if] identifier[current_writes] > identifier[writes] : identifier[writes] = identifier[current_writes] keyword[if] identifier[reads] == identifier[current_reads] keyword[and] identifier[writes] == identifier[current_writes] : identifier[logger] . identifier[info] ( literal[string] literal[string] . identifier[format] ( identifier[table_name] , identifier[gsi_name] )) keyword[return] keyword[else] : identifier[logger] . identifier[info] ( literal[string] literal[string] . identifier[format] ( identifier[table_name] , identifier[gsi_name] )) identifier[logger] . identifier[info] ( literal[string] literal[string] . identifier[format] ( identifier[table_name] , identifier[gsi_name] , identifier[reads] , identifier[writes] )) keyword[if] identifier[get_global_option] ( literal[string] ): keyword[return] keyword[try] : identifier[DYNAMODB_CONNECTION] . identifier[update_table] ( identifier[table_name] = identifier[table_name] , identifier[global_secondary_index_updates] =[ { literal[string] :{ literal[string] : identifier[gsi_name] , literal[string] :{ literal[string] : identifier[reads] , literal[string] : identifier[writes] } } } ]) identifier[message] =[] keyword[if] identifier[current_reads] > identifier[reads] : identifier[message] . identifier[append] ( literal[string] . identifier[format] ( identifier[table_name] , identifier[gsi_name] , identifier[current_reads] , identifier[reads] )) keyword[elif] identifier[current_reads] < identifier[reads] : identifier[message] . identifier[append] ( literal[string] . identifier[format] ( identifier[table_name] , identifier[gsi_name] , identifier[current_reads] , identifier[reads] )) keyword[if] identifier[current_writes] > identifier[writes] : identifier[message] . identifier[append] ( literal[string] . identifier[format] ( identifier[table_name] , identifier[gsi_name] , identifier[current_writes] , identifier[writes] )) keyword[elif] identifier[current_writes] < identifier[writes] : identifier[message] . identifier[append] ( literal[string] . identifier[format] ( identifier[table_name] , identifier[gsi_name] , identifier[current_writes] , identifier[writes] )) identifier[sns_message_types] =[] keyword[if] identifier[current_reads] > identifier[reads] keyword[or] identifier[current_writes] > identifier[writes] : identifier[sns_message_types] . identifier[append] ( literal[string] ) keyword[if] identifier[current_reads] < identifier[reads] keyword[or] identifier[current_writes] < identifier[writes] : identifier[sns_message_types] . identifier[append] ( literal[string] ) identifier[sns] . identifier[publish_gsi_notification] ( identifier[table_key] , identifier[gsi_key] , literal[string] . identifier[join] ( identifier[message] ), identifier[sns_message_types] , identifier[subject] = literal[string] . identifier[format] ( identifier[gsi_name] )) keyword[except] identifier[JSONResponseError] keyword[as] identifier[error] : identifier[exception] = identifier[error] . identifier[body] [ literal[string] ]. identifier[split] ( literal[string] )[ literal[int] ] identifier[know_exceptions] =[ literal[string] ] keyword[if] identifier[exception] keyword[in] identifier[know_exceptions] : identifier[logger] . identifier[warning] ( literal[string] . identifier[format] ( identifier[table_name] , identifier[gsi_name] , identifier[exception] , identifier[error] . identifier[body] [ literal[string] ])) keyword[else] : identifier[logger] . identifier[error] ( ( literal[string] literal[string] literal[string] ). identifier[format] ( identifier[table_name] , identifier[gsi_name] , identifier[exception] , identifier[error] . identifier[body] [ literal[string] ])) keyword[if] ( keyword[not] identifier[retry_with_only_increase] keyword[and] identifier[exception] == literal[string] ): identifier[logger] . identifier[info] ( literal[string] literal[string] . identifier[format] ( identifier[table_name] , identifier[gsi_name] )) identifier[update_gsi_provisioning] ( identifier[table_name] , identifier[table_key] , identifier[gsi_name] , identifier[gsi_key] , identifier[reads] , identifier[writes] , identifier[retry_with_only_increase] = keyword[True] )
def update_gsi_provisioning(table_name, table_key, gsi_name, gsi_key, reads, writes, retry_with_only_increase=False): """ Update provisioning on a global secondary index :type table_name: str :param table_name: Name of the DynamoDB table :type table_key: str :param table_key: Table configuration option key name :type gsi_name: str :param gsi_name: Name of the GSI :type gsi_key: str :param gsi_key: GSI configuration option key name :type reads: int :param reads: Number of reads to provision :type writes: int :param writes: Number of writes to provision :type retry_with_only_increase: bool :param retry_with_only_increase: Set to True to ensure only increases """ current_reads = int(get_provisioned_gsi_read_units(table_name, gsi_name)) current_writes = int(get_provisioned_gsi_write_units(table_name, gsi_name)) # Make sure we aren't scaling down if we turned off downscaling if not get_gsi_option(table_key, gsi_key, 'enable_reads_down_scaling') or not get_gsi_option(table_key, gsi_key, 'enable_writes_down_scaling'): if not get_gsi_option(table_key, gsi_key, 'enable_reads_down_scaling') and current_reads > reads: reads = current_reads # depends on [control=['if'], data=[]] if not get_gsi_option(table_key, gsi_key, 'enable_writes_down_scaling') and current_writes > writes: writes = current_writes # depends on [control=['if'], data=[]] # Return if we do not need to scale at all if reads == current_reads and writes == current_writes: logger.info('{0} - No need to scale up reads nor writes'.format(table_name)) return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if retry_with_only_increase: # Ensure that we are only doing increases if current_reads > reads: reads = current_reads # depends on [control=['if'], data=['current_reads', 'reads']] if current_writes > writes: writes = current_writes # depends on [control=['if'], data=['current_writes', 'writes']] # Return if we do not need to scale at all if reads == current_reads and writes == current_writes: logger.info('{0} - GSI: {1} - No need to scale up reads nor writes'.format(table_name, gsi_name)) return # depends on [control=['if'], data=[]] logger.info('{0} - GSI: {1} - Retrying to update provisioning, excluding any decreases. Setting new reads to {2} and new writes to {3}'.format(table_name, gsi_name, reads, writes)) # depends on [control=['if'], data=[]] # Check that we are in the right time frame m_windows = get_gsi_option(table_key, gsi_key, 'maintenance_windows') if m_windows: if not __is_gsi_maintenance_window(table_name, gsi_name, m_windows): logger.warning('{0} - GSI: {1} - We are outside a maintenace window. Will only perform up scaling activites'.format(table_name, gsi_name)) # Ensure that we are only doing increases if current_reads > reads: reads = current_reads # depends on [control=['if'], data=['current_reads', 'reads']] if current_writes > writes: writes = current_writes # depends on [control=['if'], data=['current_writes', 'writes']] # Return if we do not need to scale up if reads == current_reads and writes == current_writes: logger.info('{0} - GSI: {1} - No need to scale up reads nor writes'.format(table_name, gsi_name)) return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: logger.info('{0} - GSI: {1} - Current time is within maintenance window'.format(table_name, gsi_name)) # depends on [control=['if'], data=[]] logger.info('{0} - GSI: {1} - Updating provisioning to {2} reads and {3} writes'.format(table_name, gsi_name, reads, writes)) # Return if dry-run if get_global_option('dry_run'): return # depends on [control=['if'], data=[]] try: DYNAMODB_CONNECTION.update_table(table_name=table_name, global_secondary_index_updates=[{'Update': {'IndexName': gsi_name, 'ProvisionedThroughput': {'ReadCapacityUnits': reads, 'WriteCapacityUnits': writes}}}]) message = [] if current_reads > reads: message.append('{0} - GSI: {1} - Reads: DOWN from {2} to {3}\n'.format(table_name, gsi_name, current_reads, reads)) # depends on [control=['if'], data=['current_reads', 'reads']] elif current_reads < reads: message.append('{0} - GSI: {1} - Reads: UP from {2} to {3}\n'.format(table_name, gsi_name, current_reads, reads)) # depends on [control=['if'], data=['current_reads', 'reads']] if current_writes > writes: message.append('{0} - GSI: {1} - Writes: DOWN from {2} to {3}\n'.format(table_name, gsi_name, current_writes, writes)) # depends on [control=['if'], data=['current_writes', 'writes']] elif current_writes < writes: message.append('{0} - GSI: {1} - Writes: UP from {2} to {3}\n'.format(table_name, gsi_name, current_writes, writes)) # depends on [control=['if'], data=['current_writes', 'writes']] # See if we should send notifications for scale-down, scale-up or both sns_message_types = [] if current_reads > reads or current_writes > writes: sns_message_types.append('scale-down') # depends on [control=['if'], data=[]] if current_reads < reads or current_writes < writes: sns_message_types.append('scale-up') # depends on [control=['if'], data=[]] sns.publish_gsi_notification(table_key, gsi_key, ''.join(message), sns_message_types, subject='Updated provisioning for GSI {0}'.format(gsi_name)) # depends on [control=['try'], data=[]] except JSONResponseError as error: exception = error.body['__type'].split('#')[1] know_exceptions = ['LimitExceededException'] if exception in know_exceptions: logger.warning('{0} - GSI: {1} - {2}: {3}'.format(table_name, gsi_name, exception, error.body['message'])) # depends on [control=['if'], data=['exception']] else: logger.error('{0} - GSI: {1} - Unhandled exception: {2}: {3}. Please file a bug report at https://github.com/sebdah/dynamic-dynamodb/issues'.format(table_name, gsi_name, exception, error.body['message'])) if not retry_with_only_increase and exception == 'LimitExceededException': logger.info('{0} - GSI: {1} - Will retry to update provisioning with only increases'.format(table_name, gsi_name)) update_gsi_provisioning(table_name, table_key, gsi_name, gsi_key, reads, writes, retry_with_only_increase=True) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['error']]
def _LhD(self): """ Implements Lₕ and D. Returns ------- Lh : ndarray Uₕᵀ S₁⁻½ U₁ᵀ. D : ndarray (Sₕ ⊗ Sₓ + Iₕₓ)⁻¹. """ from numpy_sugar.linalg import ddot self._init_svd() if self._cache["LhD"] is not None: return self._cache["LhD"] S1, U1 = self.C1.eigh() U1S1 = ddot(U1, 1 / sqrt(S1)) Sh, Uh = eigh(U1S1.T @ self.C0.value() @ U1S1) self._cache["LhD"] = { "Lh": (U1S1 @ Uh).T, "D": 1 / (kron(Sh, self._Sx) + 1), "De": 1 / (kron(Sh, self._Sxe) + 1), } return self._cache["LhD"]
def function[_LhD, parameter[self]]: constant[ Implements Lₕ and D. Returns ------- Lh : ndarray Uₕᵀ S₁⁻½ U₁ᵀ. D : ndarray (Sₕ ⊗ Sₓ + Iₕₓ)⁻¹. ] from relative_module[numpy_sugar.linalg] import module[ddot] call[name[self]._init_svd, parameter[]] if compare[call[name[self]._cache][constant[LhD]] is_not constant[None]] begin[:] return[call[name[self]._cache][constant[LhD]]] <ast.Tuple object at 0x7da204962830> assign[=] call[name[self].C1.eigh, parameter[]] variable[U1S1] assign[=] call[name[ddot], parameter[name[U1], binary_operation[constant[1] / call[name[sqrt], parameter[name[S1]]]]]] <ast.Tuple object at 0x7da2049632e0> assign[=] call[name[eigh], parameter[binary_operation[binary_operation[name[U1S1].T <ast.MatMult object at 0x7da2590d6860> call[name[self].C0.value, parameter[]]] <ast.MatMult object at 0x7da2590d6860> name[U1S1]]]] call[name[self]._cache][constant[LhD]] assign[=] dictionary[[<ast.Constant object at 0x7da1b00f8eb0>, <ast.Constant object at 0x7da1b00faa40>, <ast.Constant object at 0x7da1b00f9360>], [<ast.Attribute object at 0x7da1b00f8ee0>, <ast.BinOp object at 0x7da1b00f9300>, <ast.BinOp object at 0x7da1b00f89a0>]] return[call[name[self]._cache][constant[LhD]]]
keyword[def] identifier[_LhD] ( identifier[self] ): literal[string] keyword[from] identifier[numpy_sugar] . identifier[linalg] keyword[import] identifier[ddot] identifier[self] . identifier[_init_svd] () keyword[if] identifier[self] . identifier[_cache] [ literal[string] ] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[self] . identifier[_cache] [ literal[string] ] identifier[S1] , identifier[U1] = identifier[self] . identifier[C1] . identifier[eigh] () identifier[U1S1] = identifier[ddot] ( identifier[U1] , literal[int] / identifier[sqrt] ( identifier[S1] )) identifier[Sh] , identifier[Uh] = identifier[eigh] ( identifier[U1S1] . identifier[T] @ identifier[self] . identifier[C0] . identifier[value] ()@ identifier[U1S1] ) identifier[self] . identifier[_cache] [ literal[string] ]={ literal[string] :( identifier[U1S1] @ identifier[Uh] ). identifier[T] , literal[string] : literal[int] /( identifier[kron] ( identifier[Sh] , identifier[self] . identifier[_Sx] )+ literal[int] ), literal[string] : literal[int] /( identifier[kron] ( identifier[Sh] , identifier[self] . identifier[_Sxe] )+ literal[int] ), } keyword[return] identifier[self] . identifier[_cache] [ literal[string] ]
def _LhD(self): """ Implements Lₕ and D. Returns ------- Lh : ndarray Uₕᵀ S₁⁻½ U₁ᵀ. D : ndarray (Sₕ ⊗ Sₓ + Iₕₓ)⁻¹. """ from numpy_sugar.linalg import ddot self._init_svd() if self._cache['LhD'] is not None: return self._cache['LhD'] # depends on [control=['if'], data=[]] (S1, U1) = self.C1.eigh() U1S1 = ddot(U1, 1 / sqrt(S1)) (Sh, Uh) = eigh(U1S1.T @ self.C0.value() @ U1S1) self._cache['LhD'] = {'Lh': (U1S1 @ Uh).T, 'D': 1 / (kron(Sh, self._Sx) + 1), 'De': 1 / (kron(Sh, self._Sxe) + 1)} return self._cache['LhD']
def set_slug(apps, schema_editor): """ Create a slug for each Creator already in the DB. """ Creator = apps.get_model('spectator_core', 'Creator') for c in Creator.objects.all(): c.slug = generate_slug(c.pk) c.save(update_fields=['slug'])
def function[set_slug, parameter[apps, schema_editor]]: constant[ Create a slug for each Creator already in the DB. ] variable[Creator] assign[=] call[name[apps].get_model, parameter[constant[spectator_core], constant[Creator]]] for taget[name[c]] in starred[call[name[Creator].objects.all, parameter[]]] begin[:] name[c].slug assign[=] call[name[generate_slug], parameter[name[c].pk]] call[name[c].save, parameter[]]
keyword[def] identifier[set_slug] ( identifier[apps] , identifier[schema_editor] ): literal[string] identifier[Creator] = identifier[apps] . identifier[get_model] ( literal[string] , literal[string] ) keyword[for] identifier[c] keyword[in] identifier[Creator] . identifier[objects] . identifier[all] (): identifier[c] . identifier[slug] = identifier[generate_slug] ( identifier[c] . identifier[pk] ) identifier[c] . identifier[save] ( identifier[update_fields] =[ literal[string] ])
def set_slug(apps, schema_editor): """ Create a slug for each Creator already in the DB. """ Creator = apps.get_model('spectator_core', 'Creator') for c in Creator.objects.all(): c.slug = generate_slug(c.pk) c.save(update_fields=['slug']) # depends on [control=['for'], data=['c']]
def as_xml(self,parent): """ Create XML representation of `self`. :Parameters: - `parent`: the element to which the created node should be linked to. :Types: - `parent`: `libxml2.xmlNode` :return: an XML node. :returntype: `libxml2.xmlNode` """ n=parent.newChild(None,"status",None) n.setProp("code","%03i" % (self.code,)) return n
def function[as_xml, parameter[self, parent]]: constant[ Create XML representation of `self`. :Parameters: - `parent`: the element to which the created node should be linked to. :Types: - `parent`: `libxml2.xmlNode` :return: an XML node. :returntype: `libxml2.xmlNode` ] variable[n] assign[=] call[name[parent].newChild, parameter[constant[None], constant[status], constant[None]]] call[name[n].setProp, parameter[constant[code], binary_operation[constant[%03i] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18f722200>]]]]] return[name[n]]
keyword[def] identifier[as_xml] ( identifier[self] , identifier[parent] ): literal[string] identifier[n] = identifier[parent] . identifier[newChild] ( keyword[None] , literal[string] , keyword[None] ) identifier[n] . identifier[setProp] ( literal[string] , literal[string] %( identifier[self] . identifier[code] ,)) keyword[return] identifier[n]
def as_xml(self, parent): """ Create XML representation of `self`. :Parameters: - `parent`: the element to which the created node should be linked to. :Types: - `parent`: `libxml2.xmlNode` :return: an XML node. :returntype: `libxml2.xmlNode` """ n = parent.newChild(None, 'status', None) n.setProp('code', '%03i' % (self.code,)) return n
def remove_edge(self, u, v, key=None): """Version of remove_edge that's much like normal networkx but only deletes once, since the database doesn't keep separate adj and succ mappings """ try: d = self.adj[u][v] except KeyError: raise NetworkXError( "The edge {}-{} is not in the graph.".format(u, v) ) if key is None: d.popitem() else: try: del d[key] except KeyError: raise NetworkXError( "The edge {}-{} with key {} is not in the graph.".format (u, v, key) ) if len(d) == 0: del self.succ[u][v]
def function[remove_edge, parameter[self, u, v, key]]: constant[Version of remove_edge that's much like normal networkx but only deletes once, since the database doesn't keep separate adj and succ mappings ] <ast.Try object at 0x7da1b0b83be0> if compare[name[key] is constant[None]] begin[:] call[name[d].popitem, parameter[]] if compare[call[name[len], parameter[name[d]]] equal[==] constant[0]] begin[:] <ast.Delete object at 0x7da1b0b80790>
keyword[def] identifier[remove_edge] ( identifier[self] , identifier[u] , identifier[v] , identifier[key] = keyword[None] ): literal[string] keyword[try] : identifier[d] = identifier[self] . identifier[adj] [ identifier[u] ][ identifier[v] ] keyword[except] identifier[KeyError] : keyword[raise] identifier[NetworkXError] ( literal[string] . identifier[format] ( identifier[u] , identifier[v] ) ) keyword[if] identifier[key] keyword[is] keyword[None] : identifier[d] . identifier[popitem] () keyword[else] : keyword[try] : keyword[del] identifier[d] [ identifier[key] ] keyword[except] identifier[KeyError] : keyword[raise] identifier[NetworkXError] ( literal[string] . identifier[format] ( identifier[u] , identifier[v] , identifier[key] ) ) keyword[if] identifier[len] ( identifier[d] )== literal[int] : keyword[del] identifier[self] . identifier[succ] [ identifier[u] ][ identifier[v] ]
def remove_edge(self, u, v, key=None): """Version of remove_edge that's much like normal networkx but only deletes once, since the database doesn't keep separate adj and succ mappings """ try: d = self.adj[u][v] # depends on [control=['try'], data=[]] except KeyError: raise NetworkXError('The edge {}-{} is not in the graph.'.format(u, v)) # depends on [control=['except'], data=[]] if key is None: d.popitem() # depends on [control=['if'], data=[]] else: try: del d[key] # depends on [control=['try'], data=[]] except KeyError: raise NetworkXError('The edge {}-{} with key {} is not in the graph.'.format(u, v, key)) # depends on [control=['except'], data=[]] if len(d) == 0: del self.succ[u][v] # depends on [control=['if'], data=[]]
def query_gfy(self, gfyname): """ Query a gfy name for URLs and more information. """ self.check_token() r = requests.get(QUERY_ENDPOINT + gfyname, headers=self.headers) response = r.json() if r.status_code != 200 and not ERROR_KEY in response: raise GfycatClientError('Bad response from Gfycat', r.status_code) elif ERROR_KEY in response: raise GfycatClientError(response[ERROR_KEY], r.status_code) return response
def function[query_gfy, parameter[self, gfyname]]: constant[ Query a gfy name for URLs and more information. ] call[name[self].check_token, parameter[]] variable[r] assign[=] call[name[requests].get, parameter[binary_operation[name[QUERY_ENDPOINT] + name[gfyname]]]] variable[response] assign[=] call[name[r].json, parameter[]] if <ast.BoolOp object at 0x7da1b0c0d360> begin[:] <ast.Raise object at 0x7da1b0c0cb20> return[name[response]]
keyword[def] identifier[query_gfy] ( identifier[self] , identifier[gfyname] ): literal[string] identifier[self] . identifier[check_token] () identifier[r] = identifier[requests] . identifier[get] ( identifier[QUERY_ENDPOINT] + identifier[gfyname] , identifier[headers] = identifier[self] . identifier[headers] ) identifier[response] = identifier[r] . identifier[json] () keyword[if] identifier[r] . identifier[status_code] != literal[int] keyword[and] keyword[not] identifier[ERROR_KEY] keyword[in] identifier[response] : keyword[raise] identifier[GfycatClientError] ( literal[string] , identifier[r] . identifier[status_code] ) keyword[elif] identifier[ERROR_KEY] keyword[in] identifier[response] : keyword[raise] identifier[GfycatClientError] ( identifier[response] [ identifier[ERROR_KEY] ], identifier[r] . identifier[status_code] ) keyword[return] identifier[response]
def query_gfy(self, gfyname): """ Query a gfy name for URLs and more information. """ self.check_token() r = requests.get(QUERY_ENDPOINT + gfyname, headers=self.headers) response = r.json() if r.status_code != 200 and (not ERROR_KEY in response): raise GfycatClientError('Bad response from Gfycat', r.status_code) # depends on [control=['if'], data=[]] elif ERROR_KEY in response: raise GfycatClientError(response[ERROR_KEY], r.status_code) # depends on [control=['if'], data=['ERROR_KEY', 'response']] return response
def _write_series(self, workbook, worksheet): """ Write the series column(s) to *worksheet*. Series start in the column following the last categories column, placing the series title in the first cell. """ col_offset = self._chart_data.categories.depth for idx, series in enumerate(self._chart_data): num_format = ( workbook.add_format({'num_format': series.number_format}) ) series_col = idx + col_offset worksheet.write(0, series_col, series.name) worksheet.write_column(1, series_col, series.values, num_format)
def function[_write_series, parameter[self, workbook, worksheet]]: constant[ Write the series column(s) to *worksheet*. Series start in the column following the last categories column, placing the series title in the first cell. ] variable[col_offset] assign[=] name[self]._chart_data.categories.depth for taget[tuple[[<ast.Name object at 0x7da20c76ec20>, <ast.Name object at 0x7da20c76d750>]]] in starred[call[name[enumerate], parameter[name[self]._chart_data]]] begin[:] variable[num_format] assign[=] call[name[workbook].add_format, parameter[dictionary[[<ast.Constant object at 0x7da20c6aa7d0>], [<ast.Attribute object at 0x7da20c6ab700>]]]] variable[series_col] assign[=] binary_operation[name[idx] + name[col_offset]] call[name[worksheet].write, parameter[constant[0], name[series_col], name[series].name]] call[name[worksheet].write_column, parameter[constant[1], name[series_col], name[series].values, name[num_format]]]
keyword[def] identifier[_write_series] ( identifier[self] , identifier[workbook] , identifier[worksheet] ): literal[string] identifier[col_offset] = identifier[self] . identifier[_chart_data] . identifier[categories] . identifier[depth] keyword[for] identifier[idx] , identifier[series] keyword[in] identifier[enumerate] ( identifier[self] . identifier[_chart_data] ): identifier[num_format] =( identifier[workbook] . identifier[add_format] ({ literal[string] : identifier[series] . identifier[number_format] }) ) identifier[series_col] = identifier[idx] + identifier[col_offset] identifier[worksheet] . identifier[write] ( literal[int] , identifier[series_col] , identifier[series] . identifier[name] ) identifier[worksheet] . identifier[write_column] ( literal[int] , identifier[series_col] , identifier[series] . identifier[values] , identifier[num_format] )
def _write_series(self, workbook, worksheet): """ Write the series column(s) to *worksheet*. Series start in the column following the last categories column, placing the series title in the first cell. """ col_offset = self._chart_data.categories.depth for (idx, series) in enumerate(self._chart_data): num_format = workbook.add_format({'num_format': series.number_format}) series_col = idx + col_offset worksheet.write(0, series_col, series.name) worksheet.write_column(1, series_col, series.values, num_format) # depends on [control=['for'], data=[]]
def nanopub_stats(ctx, input_fn): """Collect statistics on nanopub file input_fn can be json, jsonl or yaml and additionally gzipped """ counts = { "nanopubs": 0, "assertions": {"total": 0, "subject_only": 0, "nested": 0, "relations": {}}, } for np in bnf.read_nanopubs(input_fn): if "nanopub" in np: counts["nanopubs"] += 1 counts["assertions"]["total"] += len(np["nanopub"]["assertions"]) for assertion in np["nanopub"]["assertions"]: if assertion["relation"] is None: counts["assertions"]["subject_only"] += 1 else: if re.match("\s*\(", assertion["object"]): counts["assertions"]["nested"] += 1 if ( not assertion.get("relation") in counts["assertions"]["relations"] ): counts["assertions"]["relations"][assertion.get("relation")] = 1 else: counts["assertions"]["relations"][ assertion.get("relation") ] += 1 counts["assertions"]["relations"] = sorted(counts["assertions"]["relations"]) print("DumpVar:\n", json.dumps(counts, indent=4))
def function[nanopub_stats, parameter[ctx, input_fn]]: constant[Collect statistics on nanopub file input_fn can be json, jsonl or yaml and additionally gzipped ] variable[counts] assign[=] dictionary[[<ast.Constant object at 0x7da20c6a84c0>, <ast.Constant object at 0x7da20c6ab7f0>], [<ast.Constant object at 0x7da20c6a94e0>, <ast.Dict object at 0x7da20c6a9210>]] for taget[name[np]] in starred[call[name[bnf].read_nanopubs, parameter[name[input_fn]]]] begin[:] if compare[constant[nanopub] in name[np]] begin[:] <ast.AugAssign object at 0x7da20c6a93c0> <ast.AugAssign object at 0x7da20c6a8880> for taget[name[assertion]] in starred[call[call[name[np]][constant[nanopub]]][constant[assertions]]] begin[:] if compare[call[name[assertion]][constant[relation]] is constant[None]] begin[:] <ast.AugAssign object at 0x7da20c6ab370> call[call[name[counts]][constant[assertions]]][constant[relations]] assign[=] call[name[sorted], parameter[call[call[name[counts]][constant[assertions]]][constant[relations]]]] call[name[print], parameter[constant[DumpVar: ], call[name[json].dumps, parameter[name[counts]]]]]
keyword[def] identifier[nanopub_stats] ( identifier[ctx] , identifier[input_fn] ): literal[string] identifier[counts] ={ literal[string] : literal[int] , literal[string] :{ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] :{}}, } keyword[for] identifier[np] keyword[in] identifier[bnf] . identifier[read_nanopubs] ( identifier[input_fn] ): keyword[if] literal[string] keyword[in] identifier[np] : identifier[counts] [ literal[string] ]+= literal[int] identifier[counts] [ literal[string] ][ literal[string] ]+= identifier[len] ( identifier[np] [ literal[string] ][ literal[string] ]) keyword[for] identifier[assertion] keyword[in] identifier[np] [ literal[string] ][ literal[string] ]: keyword[if] identifier[assertion] [ literal[string] ] keyword[is] keyword[None] : identifier[counts] [ literal[string] ][ literal[string] ]+= literal[int] keyword[else] : keyword[if] identifier[re] . identifier[match] ( literal[string] , identifier[assertion] [ literal[string] ]): identifier[counts] [ literal[string] ][ literal[string] ]+= literal[int] keyword[if] ( keyword[not] identifier[assertion] . identifier[get] ( literal[string] ) keyword[in] identifier[counts] [ literal[string] ][ literal[string] ] ): identifier[counts] [ literal[string] ][ literal[string] ][ identifier[assertion] . identifier[get] ( literal[string] )]= literal[int] keyword[else] : identifier[counts] [ literal[string] ][ literal[string] ][ identifier[assertion] . identifier[get] ( literal[string] ) ]+= literal[int] identifier[counts] [ literal[string] ][ literal[string] ]= identifier[sorted] ( identifier[counts] [ literal[string] ][ literal[string] ]) identifier[print] ( literal[string] , identifier[json] . identifier[dumps] ( identifier[counts] , identifier[indent] = literal[int] ))
def nanopub_stats(ctx, input_fn): """Collect statistics on nanopub file input_fn can be json, jsonl or yaml and additionally gzipped """ counts = {'nanopubs': 0, 'assertions': {'total': 0, 'subject_only': 0, 'nested': 0, 'relations': {}}} for np in bnf.read_nanopubs(input_fn): if 'nanopub' in np: counts['nanopubs'] += 1 counts['assertions']['total'] += len(np['nanopub']['assertions']) for assertion in np['nanopub']['assertions']: if assertion['relation'] is None: counts['assertions']['subject_only'] += 1 # depends on [control=['if'], data=[]] else: if re.match('\\s*\\(', assertion['object']): counts['assertions']['nested'] += 1 # depends on [control=['if'], data=[]] if not assertion.get('relation') in counts['assertions']['relations']: counts['assertions']['relations'][assertion.get('relation')] = 1 # depends on [control=['if'], data=[]] else: counts['assertions']['relations'][assertion.get('relation')] += 1 # depends on [control=['for'], data=['assertion']] # depends on [control=['if'], data=['np']] # depends on [control=['for'], data=['np']] counts['assertions']['relations'] = sorted(counts['assertions']['relations']) print('DumpVar:\n', json.dumps(counts, indent=4))
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'document_id') and self.document_id is not None: _dict['document_id'] = self.document_id if hasattr(self, 'passage_score') and self.passage_score is not None: _dict['passage_score'] = self.passage_score if hasattr(self, 'passage_text') and self.passage_text is not None: _dict['passage_text'] = self.passage_text if hasattr(self, 'start_offset') and self.start_offset is not None: _dict['start_offset'] = self.start_offset if hasattr(self, 'end_offset') and self.end_offset is not None: _dict['end_offset'] = self.end_offset if hasattr(self, 'field') and self.field is not None: _dict['field'] = self.field return _dict
def function[_to_dict, parameter[self]]: constant[Return a json dictionary representing this model.] variable[_dict] assign[=] dictionary[[], []] if <ast.BoolOp object at 0x7da18fe92020> begin[:] call[name[_dict]][constant[document_id]] assign[=] name[self].document_id if <ast.BoolOp object at 0x7da18fe923b0> begin[:] call[name[_dict]][constant[passage_score]] assign[=] name[self].passage_score if <ast.BoolOp object at 0x7da18fe92a10> begin[:] call[name[_dict]][constant[passage_text]] assign[=] name[self].passage_text if <ast.BoolOp object at 0x7da18fe92b30> begin[:] call[name[_dict]][constant[start_offset]] assign[=] name[self].start_offset if <ast.BoolOp object at 0x7da18fe92fe0> begin[:] call[name[_dict]][constant[end_offset]] assign[=] name[self].end_offset if <ast.BoolOp object at 0x7da18fe91390> begin[:] call[name[_dict]][constant[field]] assign[=] name[self].field return[name[_dict]]
keyword[def] identifier[_to_dict] ( identifier[self] ): literal[string] identifier[_dict] ={} keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[document_id] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[document_id] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[passage_score] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[passage_score] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[passage_text] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[passage_text] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[start_offset] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[start_offset] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[end_offset] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[end_offset] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[field] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[field] keyword[return] identifier[_dict]
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'document_id') and self.document_id is not None: _dict['document_id'] = self.document_id # depends on [control=['if'], data=[]] if hasattr(self, 'passage_score') and self.passage_score is not None: _dict['passage_score'] = self.passage_score # depends on [control=['if'], data=[]] if hasattr(self, 'passage_text') and self.passage_text is not None: _dict['passage_text'] = self.passage_text # depends on [control=['if'], data=[]] if hasattr(self, 'start_offset') and self.start_offset is not None: _dict['start_offset'] = self.start_offset # depends on [control=['if'], data=[]] if hasattr(self, 'end_offset') and self.end_offset is not None: _dict['end_offset'] = self.end_offset # depends on [control=['if'], data=[]] if hasattr(self, 'field') and self.field is not None: _dict['field'] = self.field # depends on [control=['if'], data=[]] return _dict
def string_annotation(typ, default): """ Construct a string representation of a type annotation. Parameters ---------- typ : type Type to turn into a string default : any Default value (if any) of the type Returns ------- str String version of the type annotation """ try: type_string = ( f"`{typ.__name__}`" if typ.__module__ == "builtins" else f"`{typ.__module__}.{typ.__name__}`" ) except AttributeError: type_string = f"`{str(typ)}`" if default is None: type_string = f"{type_string}, default ``None``" elif default == inspect._empty: pass else: type_string = f"{type_string}, default ``{default}``" return type_string
def function[string_annotation, parameter[typ, default]]: constant[ Construct a string representation of a type annotation. Parameters ---------- typ : type Type to turn into a string default : any Default value (if any) of the type Returns ------- str String version of the type annotation ] <ast.Try object at 0x7da2041db400> if compare[name[default] is constant[None]] begin[:] variable[type_string] assign[=] <ast.JoinedStr object at 0x7da18f722b30> return[name[type_string]]
keyword[def] identifier[string_annotation] ( identifier[typ] , identifier[default] ): literal[string] keyword[try] : identifier[type_string] =( literal[string] keyword[if] identifier[typ] . identifier[__module__] == literal[string] keyword[else] literal[string] ) keyword[except] identifier[AttributeError] : identifier[type_string] = literal[string] keyword[if] identifier[default] keyword[is] keyword[None] : identifier[type_string] = literal[string] keyword[elif] identifier[default] == identifier[inspect] . identifier[_empty] : keyword[pass] keyword[else] : identifier[type_string] = literal[string] keyword[return] identifier[type_string]
def string_annotation(typ, default): """ Construct a string representation of a type annotation. Parameters ---------- typ : type Type to turn into a string default : any Default value (if any) of the type Returns ------- str String version of the type annotation """ try: type_string = f'`{typ.__name__}`' if typ.__module__ == 'builtins' else f'`{typ.__module__}.{typ.__name__}`' # depends on [control=['try'], data=[]] except AttributeError: type_string = f'`{str(typ)}`' # depends on [control=['except'], data=[]] if default is None: type_string = f'{type_string}, default ``None``' # depends on [control=['if'], data=[]] elif default == inspect._empty: pass # depends on [control=['if'], data=[]] else: type_string = f'{type_string}, default ``{default}``' return type_string
def get_affinity(pid): """ Returns the affinity mask of the process whose ID is pid. @param pid: process PID (0 == current process) @type pid: C{int} @return: set of CPU ids @rtype: C{set} """ cpuset = cpu_set_t() result = set() libnuma.sched_getaffinity(pid, sizeof(cpu_set_t), byref(cpuset)) for i in range(0, sizeof(cpu_set_t)*8): if __CPU_ISSET(i, cpuset): result.add(i) return result
def function[get_affinity, parameter[pid]]: constant[ Returns the affinity mask of the process whose ID is pid. @param pid: process PID (0 == current process) @type pid: C{int} @return: set of CPU ids @rtype: C{set} ] variable[cpuset] assign[=] call[name[cpu_set_t], parameter[]] variable[result] assign[=] call[name[set], parameter[]] call[name[libnuma].sched_getaffinity, parameter[name[pid], call[name[sizeof], parameter[name[cpu_set_t]]], call[name[byref], parameter[name[cpuset]]]]] for taget[name[i]] in starred[call[name[range], parameter[constant[0], binary_operation[call[name[sizeof], parameter[name[cpu_set_t]]] * constant[8]]]]] begin[:] if call[name[__CPU_ISSET], parameter[name[i], name[cpuset]]] begin[:] call[name[result].add, parameter[name[i]]] return[name[result]]
keyword[def] identifier[get_affinity] ( identifier[pid] ): literal[string] identifier[cpuset] = identifier[cpu_set_t] () identifier[result] = identifier[set] () identifier[libnuma] . identifier[sched_getaffinity] ( identifier[pid] , identifier[sizeof] ( identifier[cpu_set_t] ), identifier[byref] ( identifier[cpuset] )) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[sizeof] ( identifier[cpu_set_t] )* literal[int] ): keyword[if] identifier[__CPU_ISSET] ( identifier[i] , identifier[cpuset] ): identifier[result] . identifier[add] ( identifier[i] ) keyword[return] identifier[result]
def get_affinity(pid): """ Returns the affinity mask of the process whose ID is pid. @param pid: process PID (0 == current process) @type pid: C{int} @return: set of CPU ids @rtype: C{set} """ cpuset = cpu_set_t() result = set() libnuma.sched_getaffinity(pid, sizeof(cpu_set_t), byref(cpuset)) for i in range(0, sizeof(cpu_set_t) * 8): if __CPU_ISSET(i, cpuset): result.add(i) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] return result
def _doc(from_func): '''copy doc from one function to another use as a decorator eg:: @_doc(file.tell) def tell(..): ... ''' def decorator(to_func): to_func.__doc__ = from_func.__doc__ return to_func return decorator
def function[_doc, parameter[from_func]]: constant[copy doc from one function to another use as a decorator eg:: @_doc(file.tell) def tell(..): ... ] def function[decorator, parameter[to_func]]: name[to_func].__doc__ assign[=] name[from_func].__doc__ return[name[to_func]] return[name[decorator]]
keyword[def] identifier[_doc] ( identifier[from_func] ): literal[string] keyword[def] identifier[decorator] ( identifier[to_func] ): identifier[to_func] . identifier[__doc__] = identifier[from_func] . identifier[__doc__] keyword[return] identifier[to_func] keyword[return] identifier[decorator]
def _doc(from_func): """copy doc from one function to another use as a decorator eg:: @_doc(file.tell) def tell(..): ... """ def decorator(to_func): to_func.__doc__ = from_func.__doc__ return to_func return decorator
def no_duplicates_constructor(loader, node, deep=False): """Check for duplicate keys while loading YAML https://gist.github.com/pypt/94d747fe5180851196eb """ mapping = {} for key_node, value_node in node.value: key = loader.construct_object(key_node, deep=deep) value = loader.construct_object(value_node, deep=deep) if key in mapping: from intake.catalog.exceptions import DuplicateKeyError raise DuplicateKeyError("while constructing a mapping", node.start_mark, "found duplicate key (%s)" % key, key_node.start_mark) mapping[key] = value return loader.construct_mapping(node, deep)
def function[no_duplicates_constructor, parameter[loader, node, deep]]: constant[Check for duplicate keys while loading YAML https://gist.github.com/pypt/94d747fe5180851196eb ] variable[mapping] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da1b17f69b0>, <ast.Name object at 0x7da1b17f48e0>]]] in starred[name[node].value] begin[:] variable[key] assign[=] call[name[loader].construct_object, parameter[name[key_node]]] variable[value] assign[=] call[name[loader].construct_object, parameter[name[value_node]]] if compare[name[key] in name[mapping]] begin[:] from relative_module[intake.catalog.exceptions] import module[DuplicateKeyError] <ast.Raise object at 0x7da1b17f55d0> call[name[mapping]][name[key]] assign[=] name[value] return[call[name[loader].construct_mapping, parameter[name[node], name[deep]]]]
keyword[def] identifier[no_duplicates_constructor] ( identifier[loader] , identifier[node] , identifier[deep] = keyword[False] ): literal[string] identifier[mapping] ={} keyword[for] identifier[key_node] , identifier[value_node] keyword[in] identifier[node] . identifier[value] : identifier[key] = identifier[loader] . identifier[construct_object] ( identifier[key_node] , identifier[deep] = identifier[deep] ) identifier[value] = identifier[loader] . identifier[construct_object] ( identifier[value_node] , identifier[deep] = identifier[deep] ) keyword[if] identifier[key] keyword[in] identifier[mapping] : keyword[from] identifier[intake] . identifier[catalog] . identifier[exceptions] keyword[import] identifier[DuplicateKeyError] keyword[raise] identifier[DuplicateKeyError] ( literal[string] , identifier[node] . identifier[start_mark] , literal[string] % identifier[key] , identifier[key_node] . identifier[start_mark] ) identifier[mapping] [ identifier[key] ]= identifier[value] keyword[return] identifier[loader] . identifier[construct_mapping] ( identifier[node] , identifier[deep] )
def no_duplicates_constructor(loader, node, deep=False): """Check for duplicate keys while loading YAML https://gist.github.com/pypt/94d747fe5180851196eb """ mapping = {} for (key_node, value_node) in node.value: key = loader.construct_object(key_node, deep=deep) value = loader.construct_object(value_node, deep=deep) if key in mapping: from intake.catalog.exceptions import DuplicateKeyError raise DuplicateKeyError('while constructing a mapping', node.start_mark, 'found duplicate key (%s)' % key, key_node.start_mark) # depends on [control=['if'], data=['key']] mapping[key] = value # depends on [control=['for'], data=[]] return loader.construct_mapping(node, deep)
def get_partitions(self, persistence=None): """ Returns the partitioned data based on a specified persistence level. @ In, persistence, a floating point value specifying the size of the smallest feature we want to track. Default = None means consider all features. @ Out, a dictionary lists where each key is a min-max tuple specifying the index of the minimum and maximum, respectively. Each entry will hold a list of indices specifying points that are associated to this min-max pair. """ if persistence is None: persistence = self.persistence partitions = {} # TODO: Possibly cache at the critical persistence values, # previously caching was done at every query level, but that # does not make sense as the partitions will only change once # the next value in self.persistences is attained. Honestly, # this is probably not a necessary optimization that needs to # be made. Consider instead, Yarden's way of storing the points # such that merged arrays will be adjacent. for key, items in self.base_partitions.items(): min_index = key[0] max_index = key[1] while ( self.merge_sequence[min_index][0] < persistence and self.merge_sequence[min_index][1] != min_index ): min_index = self.merge_sequence[min_index][1] while ( self.merge_sequence[max_index][0] < persistence and self.merge_sequence[max_index][1] != max_index ): max_index = self.merge_sequence[max_index][1] new_key = (min_index, max_index) if new_key not in partitions: partitions[new_key] = [] partitions[new_key].extend(items.tolist()) for key in partitions: partitions[key] = sorted(list(set(partitions[key]))) return partitions
def function[get_partitions, parameter[self, persistence]]: constant[ Returns the partitioned data based on a specified persistence level. @ In, persistence, a floating point value specifying the size of the smallest feature we want to track. Default = None means consider all features. @ Out, a dictionary lists where each key is a min-max tuple specifying the index of the minimum and maximum, respectively. Each entry will hold a list of indices specifying points that are associated to this min-max pair. ] if compare[name[persistence] is constant[None]] begin[:] variable[persistence] assign[=] name[self].persistence variable[partitions] assign[=] dictionary[[], []] for taget[tuple[[<ast.Name object at 0x7da1b24b0730>, <ast.Name object at 0x7da1b24b1a50>]]] in starred[call[name[self].base_partitions.items, parameter[]]] begin[:] variable[min_index] assign[=] call[name[key]][constant[0]] variable[max_index] assign[=] call[name[key]][constant[1]] while <ast.BoolOp object at 0x7da1b24b3460> begin[:] variable[min_index] assign[=] call[call[name[self].merge_sequence][name[min_index]]][constant[1]] while <ast.BoolOp object at 0x7da1b24b0970> begin[:] variable[max_index] assign[=] call[call[name[self].merge_sequence][name[max_index]]][constant[1]] variable[new_key] assign[=] tuple[[<ast.Name object at 0x7da1b24b3490>, <ast.Name object at 0x7da1b24b2e90>]] if compare[name[new_key] <ast.NotIn object at 0x7da2590d7190> name[partitions]] begin[:] call[name[partitions]][name[new_key]] assign[=] list[[]] call[call[name[partitions]][name[new_key]].extend, parameter[call[name[items].tolist, parameter[]]]] for taget[name[key]] in starred[name[partitions]] begin[:] call[name[partitions]][name[key]] assign[=] call[name[sorted], parameter[call[name[list], parameter[call[name[set], parameter[call[name[partitions]][name[key]]]]]]]] return[name[partitions]]
keyword[def] identifier[get_partitions] ( identifier[self] , identifier[persistence] = keyword[None] ): literal[string] keyword[if] identifier[persistence] keyword[is] keyword[None] : identifier[persistence] = identifier[self] . identifier[persistence] identifier[partitions] ={} keyword[for] identifier[key] , identifier[items] keyword[in] identifier[self] . identifier[base_partitions] . identifier[items] (): identifier[min_index] = identifier[key] [ literal[int] ] identifier[max_index] = identifier[key] [ literal[int] ] keyword[while] ( identifier[self] . identifier[merge_sequence] [ identifier[min_index] ][ literal[int] ]< identifier[persistence] keyword[and] identifier[self] . identifier[merge_sequence] [ identifier[min_index] ][ literal[int] ]!= identifier[min_index] ): identifier[min_index] = identifier[self] . identifier[merge_sequence] [ identifier[min_index] ][ literal[int] ] keyword[while] ( identifier[self] . identifier[merge_sequence] [ identifier[max_index] ][ literal[int] ]< identifier[persistence] keyword[and] identifier[self] . identifier[merge_sequence] [ identifier[max_index] ][ literal[int] ]!= identifier[max_index] ): identifier[max_index] = identifier[self] . identifier[merge_sequence] [ identifier[max_index] ][ literal[int] ] identifier[new_key] =( identifier[min_index] , identifier[max_index] ) keyword[if] identifier[new_key] keyword[not] keyword[in] identifier[partitions] : identifier[partitions] [ identifier[new_key] ]=[] identifier[partitions] [ identifier[new_key] ]. identifier[extend] ( identifier[items] . identifier[tolist] ()) keyword[for] identifier[key] keyword[in] identifier[partitions] : identifier[partitions] [ identifier[key] ]= identifier[sorted] ( identifier[list] ( identifier[set] ( identifier[partitions] [ identifier[key] ]))) keyword[return] identifier[partitions]
def get_partitions(self, persistence=None): """ Returns the partitioned data based on a specified persistence level. @ In, persistence, a floating point value specifying the size of the smallest feature we want to track. Default = None means consider all features. @ Out, a dictionary lists where each key is a min-max tuple specifying the index of the minimum and maximum, respectively. Each entry will hold a list of indices specifying points that are associated to this min-max pair. """ if persistence is None: persistence = self.persistence # depends on [control=['if'], data=['persistence']] partitions = {} # TODO: Possibly cache at the critical persistence values, # previously caching was done at every query level, but that # does not make sense as the partitions will only change once # the next value in self.persistences is attained. Honestly, # this is probably not a necessary optimization that needs to # be made. Consider instead, Yarden's way of storing the points # such that merged arrays will be adjacent. for (key, items) in self.base_partitions.items(): min_index = key[0] max_index = key[1] while self.merge_sequence[min_index][0] < persistence and self.merge_sequence[min_index][1] != min_index: min_index = self.merge_sequence[min_index][1] # depends on [control=['while'], data=[]] while self.merge_sequence[max_index][0] < persistence and self.merge_sequence[max_index][1] != max_index: max_index = self.merge_sequence[max_index][1] # depends on [control=['while'], data=[]] new_key = (min_index, max_index) if new_key not in partitions: partitions[new_key] = [] # depends on [control=['if'], data=['new_key', 'partitions']] partitions[new_key].extend(items.tolist()) # depends on [control=['for'], data=[]] for key in partitions: partitions[key] = sorted(list(set(partitions[key]))) # depends on [control=['for'], data=['key']] return partitions
def project_ecef_vector_onto_sc(inst, x_label, y_label, z_label, new_x_label, new_y_label, new_z_label, meta=None): """Express input vector using s/c attitude directions x - ram pointing y - generally southward z - generally nadir Parameters ---------- x_label : string Label used to get ECEF-X component of vector to be projected y_label : string Label used to get ECEF-Y component of vector to be projected z_label : string Label used to get ECEF-Z component of vector to be projected new_x_label : string Label used to set X component of projected vector new_y_label : string Label used to set Y component of projected vector new_z_label : string Label used to set Z component of projected vector meta : array_like of dicts (None) Dicts contain metadata to be assigned. """ import pysatMagVect x, y, z = pysatMagVect.project_ecef_vector_onto_basis(inst[x_label], inst[y_label], inst[z_label], inst['sc_xhat_ecef_x'], inst['sc_xhat_ecef_y'], inst['sc_xhat_ecef_z'], inst['sc_yhat_ecef_x'], inst['sc_yhat_ecef_y'], inst['sc_yhat_ecef_z'], inst['sc_zhat_ecef_x'], inst['sc_zhat_ecef_y'], inst['sc_zhat_ecef_z']) inst[new_x_label] = x inst[new_y_label] = y inst[new_z_label] = z if meta is not None: inst.meta[new_x_label] = meta[0] inst.meta[new_y_label] = meta[1] inst.meta[new_z_label] = meta[2] return
def function[project_ecef_vector_onto_sc, parameter[inst, x_label, y_label, z_label, new_x_label, new_y_label, new_z_label, meta]]: constant[Express input vector using s/c attitude directions x - ram pointing y - generally southward z - generally nadir Parameters ---------- x_label : string Label used to get ECEF-X component of vector to be projected y_label : string Label used to get ECEF-Y component of vector to be projected z_label : string Label used to get ECEF-Z component of vector to be projected new_x_label : string Label used to set X component of projected vector new_y_label : string Label used to set Y component of projected vector new_z_label : string Label used to set Z component of projected vector meta : array_like of dicts (None) Dicts contain metadata to be assigned. ] import module[pysatMagVect] <ast.Tuple object at 0x7da1b0f50a00> assign[=] call[name[pysatMagVect].project_ecef_vector_onto_basis, parameter[call[name[inst]][name[x_label]], call[name[inst]][name[y_label]], call[name[inst]][name[z_label]], call[name[inst]][constant[sc_xhat_ecef_x]], call[name[inst]][constant[sc_xhat_ecef_y]], call[name[inst]][constant[sc_xhat_ecef_z]], call[name[inst]][constant[sc_yhat_ecef_x]], call[name[inst]][constant[sc_yhat_ecef_y]], call[name[inst]][constant[sc_yhat_ecef_z]], call[name[inst]][constant[sc_zhat_ecef_x]], call[name[inst]][constant[sc_zhat_ecef_y]], call[name[inst]][constant[sc_zhat_ecef_z]]]] call[name[inst]][name[new_x_label]] assign[=] name[x] call[name[inst]][name[new_y_label]] assign[=] name[y] call[name[inst]][name[new_z_label]] assign[=] name[z] if compare[name[meta] is_not constant[None]] begin[:] call[name[inst].meta][name[new_x_label]] assign[=] call[name[meta]][constant[0]] call[name[inst].meta][name[new_y_label]] assign[=] call[name[meta]][constant[1]] call[name[inst].meta][name[new_z_label]] assign[=] call[name[meta]][constant[2]] return[None]
keyword[def] identifier[project_ecef_vector_onto_sc] ( identifier[inst] , identifier[x_label] , identifier[y_label] , identifier[z_label] , identifier[new_x_label] , identifier[new_y_label] , identifier[new_z_label] , identifier[meta] = keyword[None] ): literal[string] keyword[import] identifier[pysatMagVect] identifier[x] , identifier[y] , identifier[z] = identifier[pysatMagVect] . identifier[project_ecef_vector_onto_basis] ( identifier[inst] [ identifier[x_label] ], identifier[inst] [ identifier[y_label] ], identifier[inst] [ identifier[z_label] ], identifier[inst] [ literal[string] ], identifier[inst] [ literal[string] ], identifier[inst] [ literal[string] ], identifier[inst] [ literal[string] ], identifier[inst] [ literal[string] ], identifier[inst] [ literal[string] ], identifier[inst] [ literal[string] ], identifier[inst] [ literal[string] ], identifier[inst] [ literal[string] ]) identifier[inst] [ identifier[new_x_label] ]= identifier[x] identifier[inst] [ identifier[new_y_label] ]= identifier[y] identifier[inst] [ identifier[new_z_label] ]= identifier[z] keyword[if] identifier[meta] keyword[is] keyword[not] keyword[None] : identifier[inst] . identifier[meta] [ identifier[new_x_label] ]= identifier[meta] [ literal[int] ] identifier[inst] . identifier[meta] [ identifier[new_y_label] ]= identifier[meta] [ literal[int] ] identifier[inst] . identifier[meta] [ identifier[new_z_label] ]= identifier[meta] [ literal[int] ] keyword[return]
def project_ecef_vector_onto_sc(inst, x_label, y_label, z_label, new_x_label, new_y_label, new_z_label, meta=None): """Express input vector using s/c attitude directions x - ram pointing y - generally southward z - generally nadir Parameters ---------- x_label : string Label used to get ECEF-X component of vector to be projected y_label : string Label used to get ECEF-Y component of vector to be projected z_label : string Label used to get ECEF-Z component of vector to be projected new_x_label : string Label used to set X component of projected vector new_y_label : string Label used to set Y component of projected vector new_z_label : string Label used to set Z component of projected vector meta : array_like of dicts (None) Dicts contain metadata to be assigned. """ import pysatMagVect (x, y, z) = pysatMagVect.project_ecef_vector_onto_basis(inst[x_label], inst[y_label], inst[z_label], inst['sc_xhat_ecef_x'], inst['sc_xhat_ecef_y'], inst['sc_xhat_ecef_z'], inst['sc_yhat_ecef_x'], inst['sc_yhat_ecef_y'], inst['sc_yhat_ecef_z'], inst['sc_zhat_ecef_x'], inst['sc_zhat_ecef_y'], inst['sc_zhat_ecef_z']) inst[new_x_label] = x inst[new_y_label] = y inst[new_z_label] = z if meta is not None: inst.meta[new_x_label] = meta[0] inst.meta[new_y_label] = meta[1] inst.meta[new_z_label] = meta[2] # depends on [control=['if'], data=['meta']] return
def get_cache_key(**kwargs): """ Get MD5 encoded cache key for given arguments. Here is the format of key before MD5 encryption. key1:value1__key2:value2 ... Example: >>> get_cache_key(site_domain="example.com", resource="enterprise") # Here is key format for above call # "site_domain:example.com__resource:enterprise" a54349175618ff1659dee0978e3149ca Arguments: **kwargs: Key word arguments that need to be present in cache key. Returns: An MD5 encoded key uniquely identified by the key word arguments. """ key = '__'.join(['{}:{}'.format(item, value) for item, value in iteritems(kwargs)]) return hashlib.md5(key.encode('utf-8')).hexdigest()
def function[get_cache_key, parameter[]]: constant[ Get MD5 encoded cache key for given arguments. Here is the format of key before MD5 encryption. key1:value1__key2:value2 ... Example: >>> get_cache_key(site_domain="example.com", resource="enterprise") # Here is key format for above call # "site_domain:example.com__resource:enterprise" a54349175618ff1659dee0978e3149ca Arguments: **kwargs: Key word arguments that need to be present in cache key. Returns: An MD5 encoded key uniquely identified by the key word arguments. ] variable[key] assign[=] call[constant[__].join, parameter[<ast.ListComp object at 0x7da18f09c190>]] return[call[call[name[hashlib].md5, parameter[call[name[key].encode, parameter[constant[utf-8]]]]].hexdigest, parameter[]]]
keyword[def] identifier[get_cache_key] (** identifier[kwargs] ): literal[string] identifier[key] = literal[string] . identifier[join] ([ literal[string] . identifier[format] ( identifier[item] , identifier[value] ) keyword[for] identifier[item] , identifier[value] keyword[in] identifier[iteritems] ( identifier[kwargs] )]) keyword[return] identifier[hashlib] . identifier[md5] ( identifier[key] . identifier[encode] ( literal[string] )). identifier[hexdigest] ()
def get_cache_key(**kwargs): """ Get MD5 encoded cache key for given arguments. Here is the format of key before MD5 encryption. key1:value1__key2:value2 ... Example: >>> get_cache_key(site_domain="example.com", resource="enterprise") # Here is key format for above call # "site_domain:example.com__resource:enterprise" a54349175618ff1659dee0978e3149ca Arguments: **kwargs: Key word arguments that need to be present in cache key. Returns: An MD5 encoded key uniquely identified by the key word arguments. """ key = '__'.join(['{}:{}'.format(item, value) for (item, value) in iteritems(kwargs)]) return hashlib.md5(key.encode('utf-8')).hexdigest()
def quick_scan(zap_helper, url, **options): """ Run a quick scan of a site by opening a URL, optionally spidering the URL, running an Active Scan, and reporting any issues found. This command contains most scan options as parameters, so you can do everything in one go. If any alerts are found for the given alert level, this command will exit with a status code of 1. """ if options['self_contained']: console.info('Starting ZAP daemon') with helpers.zap_error_handler(): zap_helper.start(options['start_options']) console.info('Running a quick scan for {0}'.format(url)) with helpers.zap_error_handler(): if options['scanners']: zap_helper.set_enabled_scanners(options['scanners']) if options['exclude']: zap_helper.exclude_from_all(options['exclude']) zap_helper.open_url(url) if options['spider']: zap_helper.run_spider(url, options['context_name'], options['user_name']) if options['ajax_spider']: zap_helper.run_ajax_spider(url) zap_helper.run_active_scan(url, options['recursive'], options['context_name'], options['user_name']) alerts = zap_helper.alerts(options['alert_level']) helpers.report_alerts(alerts, options['output_format']) if options['self_contained']: console.info('Shutting down ZAP daemon') with helpers.zap_error_handler(): zap_helper.shutdown() exit_code = 1 if len(alerts) > 0 else 0 sys.exit(exit_code)
def function[quick_scan, parameter[zap_helper, url]]: constant[ Run a quick scan of a site by opening a URL, optionally spidering the URL, running an Active Scan, and reporting any issues found. This command contains most scan options as parameters, so you can do everything in one go. If any alerts are found for the given alert level, this command will exit with a status code of 1. ] if call[name[options]][constant[self_contained]] begin[:] call[name[console].info, parameter[constant[Starting ZAP daemon]]] with call[name[helpers].zap_error_handler, parameter[]] begin[:] call[name[zap_helper].start, parameter[call[name[options]][constant[start_options]]]] call[name[console].info, parameter[call[constant[Running a quick scan for {0}].format, parameter[name[url]]]]] with call[name[helpers].zap_error_handler, parameter[]] begin[:] if call[name[options]][constant[scanners]] begin[:] call[name[zap_helper].set_enabled_scanners, parameter[call[name[options]][constant[scanners]]]] if call[name[options]][constant[exclude]] begin[:] call[name[zap_helper].exclude_from_all, parameter[call[name[options]][constant[exclude]]]] call[name[zap_helper].open_url, parameter[name[url]]] if call[name[options]][constant[spider]] begin[:] call[name[zap_helper].run_spider, parameter[name[url], call[name[options]][constant[context_name]], call[name[options]][constant[user_name]]]] if call[name[options]][constant[ajax_spider]] begin[:] call[name[zap_helper].run_ajax_spider, parameter[name[url]]] call[name[zap_helper].run_active_scan, parameter[name[url], call[name[options]][constant[recursive]], call[name[options]][constant[context_name]], call[name[options]][constant[user_name]]]] variable[alerts] assign[=] call[name[zap_helper].alerts, parameter[call[name[options]][constant[alert_level]]]] call[name[helpers].report_alerts, parameter[name[alerts], call[name[options]][constant[output_format]]]] if call[name[options]][constant[self_contained]] begin[:] call[name[console].info, parameter[constant[Shutting down ZAP daemon]]] with call[name[helpers].zap_error_handler, parameter[]] begin[:] call[name[zap_helper].shutdown, parameter[]] variable[exit_code] assign[=] <ast.IfExp object at 0x7da2054a6740> call[name[sys].exit, parameter[name[exit_code]]]
keyword[def] identifier[quick_scan] ( identifier[zap_helper] , identifier[url] ,** identifier[options] ): literal[string] keyword[if] identifier[options] [ literal[string] ]: identifier[console] . identifier[info] ( literal[string] ) keyword[with] identifier[helpers] . identifier[zap_error_handler] (): identifier[zap_helper] . identifier[start] ( identifier[options] [ literal[string] ]) identifier[console] . identifier[info] ( literal[string] . identifier[format] ( identifier[url] )) keyword[with] identifier[helpers] . identifier[zap_error_handler] (): keyword[if] identifier[options] [ literal[string] ]: identifier[zap_helper] . identifier[set_enabled_scanners] ( identifier[options] [ literal[string] ]) keyword[if] identifier[options] [ literal[string] ]: identifier[zap_helper] . identifier[exclude_from_all] ( identifier[options] [ literal[string] ]) identifier[zap_helper] . identifier[open_url] ( identifier[url] ) keyword[if] identifier[options] [ literal[string] ]: identifier[zap_helper] . identifier[run_spider] ( identifier[url] , identifier[options] [ literal[string] ], identifier[options] [ literal[string] ]) keyword[if] identifier[options] [ literal[string] ]: identifier[zap_helper] . identifier[run_ajax_spider] ( identifier[url] ) identifier[zap_helper] . identifier[run_active_scan] ( identifier[url] , identifier[options] [ literal[string] ], identifier[options] [ literal[string] ], identifier[options] [ literal[string] ]) identifier[alerts] = identifier[zap_helper] . identifier[alerts] ( identifier[options] [ literal[string] ]) identifier[helpers] . identifier[report_alerts] ( identifier[alerts] , identifier[options] [ literal[string] ]) keyword[if] identifier[options] [ literal[string] ]: identifier[console] . identifier[info] ( literal[string] ) keyword[with] identifier[helpers] . identifier[zap_error_handler] (): identifier[zap_helper] . identifier[shutdown] () identifier[exit_code] = literal[int] keyword[if] identifier[len] ( identifier[alerts] )> literal[int] keyword[else] literal[int] identifier[sys] . identifier[exit] ( identifier[exit_code] )
def quick_scan(zap_helper, url, **options): """ Run a quick scan of a site by opening a URL, optionally spidering the URL, running an Active Scan, and reporting any issues found. This command contains most scan options as parameters, so you can do everything in one go. If any alerts are found for the given alert level, this command will exit with a status code of 1. """ if options['self_contained']: console.info('Starting ZAP daemon') with helpers.zap_error_handler(): zap_helper.start(options['start_options']) # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]] console.info('Running a quick scan for {0}'.format(url)) with helpers.zap_error_handler(): if options['scanners']: zap_helper.set_enabled_scanners(options['scanners']) # depends on [control=['if'], data=[]] if options['exclude']: zap_helper.exclude_from_all(options['exclude']) # depends on [control=['if'], data=[]] zap_helper.open_url(url) if options['spider']: zap_helper.run_spider(url, options['context_name'], options['user_name']) # depends on [control=['if'], data=[]] if options['ajax_spider']: zap_helper.run_ajax_spider(url) # depends on [control=['if'], data=[]] zap_helper.run_active_scan(url, options['recursive'], options['context_name'], options['user_name']) # depends on [control=['with'], data=[]] alerts = zap_helper.alerts(options['alert_level']) helpers.report_alerts(alerts, options['output_format']) if options['self_contained']: console.info('Shutting down ZAP daemon') with helpers.zap_error_handler(): zap_helper.shutdown() # depends on [control=['with'], data=[]] # depends on [control=['if'], data=[]] exit_code = 1 if len(alerts) > 0 else 0 sys.exit(exit_code)
def add_scope_ip(ipaddress, name, description, scopeid, auth, url): """ Function to add new host IP address allocation to existing scope ID :param ipaddress: :param name: name of the owner of this host :param description: Description of the host :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: :rtype: >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> add_scope_ip('10.50.0.5', 'cyoung', 'New Test Host','175', auth.creds, auth.url) """ new_ip = { "ip": ipaddress, "name": name, "description": description} add_scope_ip_url = '/imcrs/res/access/assignedIpScope/ip?ipScopeId='+str(scopeid) f_url = url + add_scope_ip_url payload = json.dumps(new_ip) r = requests.post(f_url, auth=auth, headers=HEADERS, data=payload) # creates the URL using the payload variable as the contents try: if r.status_code == 200: #print("IP Scope Successfully Created") return r.status_code elif r.status_code == 409: #print("IP Scope Already Exists") return r.status_code except requests.exceptions.RequestException as e: return "Error:\n" + str(e) + " add_ip_scope: An Error has occured"
def function[add_scope_ip, parameter[ipaddress, name, description, scopeid, auth, url]]: constant[ Function to add new host IP address allocation to existing scope ID :param ipaddress: :param name: name of the owner of this host :param description: Description of the host :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: :rtype: >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> add_scope_ip('10.50.0.5', 'cyoung', 'New Test Host','175', auth.creds, auth.url) ] variable[new_ip] assign[=] dictionary[[<ast.Constant object at 0x7da18eb57580>, <ast.Constant object at 0x7da18eb56bf0>, <ast.Constant object at 0x7da18eb56bc0>], [<ast.Name object at 0x7da18eb54550>, <ast.Name object at 0x7da18eb57190>, <ast.Name object at 0x7da18eb56fe0>]] variable[add_scope_ip_url] assign[=] binary_operation[constant[/imcrs/res/access/assignedIpScope/ip?ipScopeId=] + call[name[str], parameter[name[scopeid]]]] variable[f_url] assign[=] binary_operation[name[url] + name[add_scope_ip_url]] variable[payload] assign[=] call[name[json].dumps, parameter[name[new_ip]]] variable[r] assign[=] call[name[requests].post, parameter[name[f_url]]] <ast.Try object at 0x7da18eb55450>
keyword[def] identifier[add_scope_ip] ( identifier[ipaddress] , identifier[name] , identifier[description] , identifier[scopeid] , identifier[auth] , identifier[url] ): literal[string] identifier[new_ip] ={ literal[string] : identifier[ipaddress] , literal[string] : identifier[name] , literal[string] : identifier[description] } identifier[add_scope_ip_url] = literal[string] + identifier[str] ( identifier[scopeid] ) identifier[f_url] = identifier[url] + identifier[add_scope_ip_url] identifier[payload] = identifier[json] . identifier[dumps] ( identifier[new_ip] ) identifier[r] = identifier[requests] . identifier[post] ( identifier[f_url] , identifier[auth] = identifier[auth] , identifier[headers] = identifier[HEADERS] , identifier[data] = identifier[payload] ) keyword[try] : keyword[if] identifier[r] . identifier[status_code] == literal[int] : keyword[return] identifier[r] . identifier[status_code] keyword[elif] identifier[r] . identifier[status_code] == literal[int] : keyword[return] identifier[r] . identifier[status_code] keyword[except] identifier[requests] . identifier[exceptions] . identifier[RequestException] keyword[as] identifier[e] : keyword[return] literal[string] + identifier[str] ( identifier[e] )+ literal[string]
def add_scope_ip(ipaddress, name, description, scopeid, auth, url): """ Function to add new host IP address allocation to existing scope ID :param ipaddress: :param name: name of the owner of this host :param description: Description of the host :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: :rtype: >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> add_scope_ip('10.50.0.5', 'cyoung', 'New Test Host','175', auth.creds, auth.url) """ new_ip = {'ip': ipaddress, 'name': name, 'description': description} add_scope_ip_url = '/imcrs/res/access/assignedIpScope/ip?ipScopeId=' + str(scopeid) f_url = url + add_scope_ip_url payload = json.dumps(new_ip) r = requests.post(f_url, auth=auth, headers=HEADERS, data=payload) # creates the URL using the payload variable as the contents try: if r.status_code == 200: #print("IP Scope Successfully Created") return r.status_code # depends on [control=['if'], data=[]] elif r.status_code == 409: #print("IP Scope Already Exists") return r.status_code # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except requests.exceptions.RequestException as e: return 'Error:\n' + str(e) + ' add_ip_scope: An Error has occured' # depends on [control=['except'], data=['e']]
def _get_counter_reference(self): """Identify which factory should be used for a shared counter.""" if (self.model is not None and self.base_factory is not None and self.base_factory._meta.model is not None and issubclass(self.model, self.base_factory._meta.model)): return self.base_factory._meta.counter_reference else: return self
def function[_get_counter_reference, parameter[self]]: constant[Identify which factory should be used for a shared counter.] if <ast.BoolOp object at 0x7da1b1d36530> begin[:] return[name[self].base_factory._meta.counter_reference]
keyword[def] identifier[_get_counter_reference] ( identifier[self] ): literal[string] keyword[if] ( identifier[self] . identifier[model] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[base_factory] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[base_factory] . identifier[_meta] . identifier[model] keyword[is] keyword[not] keyword[None] keyword[and] identifier[issubclass] ( identifier[self] . identifier[model] , identifier[self] . identifier[base_factory] . identifier[_meta] . identifier[model] )): keyword[return] identifier[self] . identifier[base_factory] . identifier[_meta] . identifier[counter_reference] keyword[else] : keyword[return] identifier[self]
def _get_counter_reference(self): """Identify which factory should be used for a shared counter.""" if self.model is not None and self.base_factory is not None and (self.base_factory._meta.model is not None) and issubclass(self.model, self.base_factory._meta.model): return self.base_factory._meta.counter_reference # depends on [control=['if'], data=[]] else: return self
def get_default_config_help(self): """ Returns the help text for the configuration options for this handler """ config = super(zmqHandler, self).get_default_config_help() config.update({ 'port': '', }) return config
def function[get_default_config_help, parameter[self]]: constant[ Returns the help text for the configuration options for this handler ] variable[config] assign[=] call[call[name[super], parameter[name[zmqHandler], name[self]]].get_default_config_help, parameter[]] call[name[config].update, parameter[dictionary[[<ast.Constant object at 0x7da18dc07af0>], [<ast.Constant object at 0x7da18dc05ab0>]]]] return[name[config]]
keyword[def] identifier[get_default_config_help] ( identifier[self] ): literal[string] identifier[config] = identifier[super] ( identifier[zmqHandler] , identifier[self] ). identifier[get_default_config_help] () identifier[config] . identifier[update] ({ literal[string] : literal[string] , }) keyword[return] identifier[config]
def get_default_config_help(self): """ Returns the help text for the configuration options for this handler """ config = super(zmqHandler, self).get_default_config_help() config.update({'port': ''}) return config
def select_one(self, *args): ''' Select a single instance from the instance pool. Query operators such as where_eq(), order_by() or filter functions may be passed as optional arguments. ''' s = apply_query_operators(self.storage, args) return next(iter(s), None)
def function[select_one, parameter[self]]: constant[ Select a single instance from the instance pool. Query operators such as where_eq(), order_by() or filter functions may be passed as optional arguments. ] variable[s] assign[=] call[name[apply_query_operators], parameter[name[self].storage, name[args]]] return[call[name[next], parameter[call[name[iter], parameter[name[s]]], constant[None]]]]
keyword[def] identifier[select_one] ( identifier[self] ,* identifier[args] ): literal[string] identifier[s] = identifier[apply_query_operators] ( identifier[self] . identifier[storage] , identifier[args] ) keyword[return] identifier[next] ( identifier[iter] ( identifier[s] ), keyword[None] )
def select_one(self, *args): """ Select a single instance from the instance pool. Query operators such as where_eq(), order_by() or filter functions may be passed as optional arguments. """ s = apply_query_operators(self.storage, args) return next(iter(s), None)
def read(self, path, **params): """ Read data from Vault. Returns the JSON-decoded response. """ d = self.request('GET', '/v1/' + path, params=params) return d.addCallback(self._handle_response)
def function[read, parameter[self, path]]: constant[ Read data from Vault. Returns the JSON-decoded response. ] variable[d] assign[=] call[name[self].request, parameter[constant[GET], binary_operation[constant[/v1/] + name[path]]]] return[call[name[d].addCallback, parameter[name[self]._handle_response]]]
keyword[def] identifier[read] ( identifier[self] , identifier[path] ,** identifier[params] ): literal[string] identifier[d] = identifier[self] . identifier[request] ( literal[string] , literal[string] + identifier[path] , identifier[params] = identifier[params] ) keyword[return] identifier[d] . identifier[addCallback] ( identifier[self] . identifier[_handle_response] )
def read(self, path, **params): """ Read data from Vault. Returns the JSON-decoded response. """ d = self.request('GET', '/v1/' + path, params=params) return d.addCallback(self._handle_response)
def save_authorization_code(self, client_id, code, request, *args, **kwargs): """Persist the authorization code.""" log.debug( 'Persist authorization code %r for client %r', code, client_id ) request.client = request.client or self._clientgetter(client_id) self._grantsetter(client_id, code, request, *args, **kwargs) return request.client.default_redirect_uri
def function[save_authorization_code, parameter[self, client_id, code, request]]: constant[Persist the authorization code.] call[name[log].debug, parameter[constant[Persist authorization code %r for client %r], name[code], name[client_id]]] name[request].client assign[=] <ast.BoolOp object at 0x7da1b025b250> call[name[self]._grantsetter, parameter[name[client_id], name[code], name[request], <ast.Starred object at 0x7da1b025a050>]] return[name[request].client.default_redirect_uri]
keyword[def] identifier[save_authorization_code] ( identifier[self] , identifier[client_id] , identifier[code] , identifier[request] , * identifier[args] ,** identifier[kwargs] ): literal[string] identifier[log] . identifier[debug] ( literal[string] , identifier[code] , identifier[client_id] ) identifier[request] . identifier[client] = identifier[request] . identifier[client] keyword[or] identifier[self] . identifier[_clientgetter] ( identifier[client_id] ) identifier[self] . identifier[_grantsetter] ( identifier[client_id] , identifier[code] , identifier[request] ,* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[request] . identifier[client] . identifier[default_redirect_uri]
def save_authorization_code(self, client_id, code, request, *args, **kwargs): """Persist the authorization code.""" log.debug('Persist authorization code %r for client %r', code, client_id) request.client = request.client or self._clientgetter(client_id) self._grantsetter(client_id, code, request, *args, **kwargs) return request.client.default_redirect_uri
def getobject(bunchdt, key, name): """get the object if you have the key and the name returns a list of objects, in case you have more than one You should not have more than one""" # TODO : throw exception if more than one object, or return more objects idfobjects = bunchdt[key] if idfobjects: # second item in list is a unique ID unique_id = idfobjects[0].objls[1] theobjs = [idfobj for idfobj in idfobjects if idfobj[unique_id].upper() == name.upper()] try: return theobjs[0] except IndexError: return None
def function[getobject, parameter[bunchdt, key, name]]: constant[get the object if you have the key and the name returns a list of objects, in case you have more than one You should not have more than one] variable[idfobjects] assign[=] call[name[bunchdt]][name[key]] if name[idfobjects] begin[:] variable[unique_id] assign[=] call[call[name[idfobjects]][constant[0]].objls][constant[1]] variable[theobjs] assign[=] <ast.ListComp object at 0x7da20c76c670> <ast.Try object at 0x7da20c76e6e0>
keyword[def] identifier[getobject] ( identifier[bunchdt] , identifier[key] , identifier[name] ): literal[string] identifier[idfobjects] = identifier[bunchdt] [ identifier[key] ] keyword[if] identifier[idfobjects] : identifier[unique_id] = identifier[idfobjects] [ literal[int] ]. identifier[objls] [ literal[int] ] identifier[theobjs] =[ identifier[idfobj] keyword[for] identifier[idfobj] keyword[in] identifier[idfobjects] keyword[if] identifier[idfobj] [ identifier[unique_id] ]. identifier[upper] ()== identifier[name] . identifier[upper] ()] keyword[try] : keyword[return] identifier[theobjs] [ literal[int] ] keyword[except] identifier[IndexError] : keyword[return] keyword[None]
def getobject(bunchdt, key, name): """get the object if you have the key and the name returns a list of objects, in case you have more than one You should not have more than one""" # TODO : throw exception if more than one object, or return more objects idfobjects = bunchdt[key] if idfobjects: # second item in list is a unique ID unique_id = idfobjects[0].objls[1] # depends on [control=['if'], data=[]] theobjs = [idfobj for idfobj in idfobjects if idfobj[unique_id].upper() == name.upper()] try: return theobjs[0] # depends on [control=['try'], data=[]] except IndexError: return None # depends on [control=['except'], data=[]]
def _ensure_tree(path): """Create a directory (and any ancestor directories required). :param path: Directory to create """ try: os.makedirs(path) except OSError as e: if e.errno == errno.EEXIST: if not os.path.isdir(path): raise else: return False elif e.errno == errno.EISDIR: return False else: raise else: return True
def function[_ensure_tree, parameter[path]]: constant[Create a directory (and any ancestor directories required). :param path: Directory to create ] <ast.Try object at 0x7da1b11bed10>
keyword[def] identifier[_ensure_tree] ( identifier[path] ): literal[string] keyword[try] : identifier[os] . identifier[makedirs] ( identifier[path] ) keyword[except] identifier[OSError] keyword[as] identifier[e] : keyword[if] identifier[e] . identifier[errno] == identifier[errno] . identifier[EEXIST] : keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[path] ): keyword[raise] keyword[else] : keyword[return] keyword[False] keyword[elif] identifier[e] . identifier[errno] == identifier[errno] . identifier[EISDIR] : keyword[return] keyword[False] keyword[else] : keyword[raise] keyword[else] : keyword[return] keyword[True]
def _ensure_tree(path): """Create a directory (and any ancestor directories required). :param path: Directory to create """ try: os.makedirs(path) # depends on [control=['try'], data=[]] except OSError as e: if e.errno == errno.EEXIST: if not os.path.isdir(path): raise # depends on [control=['if'], data=[]] else: return False # depends on [control=['if'], data=[]] elif e.errno == errno.EISDIR: return False # depends on [control=['if'], data=[]] else: raise # depends on [control=['except'], data=['e']] else: return True
def timezone(zone): r''' Return a datetime.tzinfo implementation for the given timezone >>> from datetime import datetime, timedelta >>> utc = timezone('UTC') >>> eastern = timezone('US/Eastern') >>> eastern.zone 'US/Eastern' >>> timezone(unicode('US/Eastern')) is eastern True >>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) >>> loc_dt = utc_dt.astimezone(eastern) >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' >>> loc_dt.strftime(fmt) '2002-10-27 01:00:00 EST (-0500)' >>> (loc_dt - timedelta(minutes=10)).strftime(fmt) '2002-10-27 00:50:00 EST (-0500)' >>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt) '2002-10-27 01:50:00 EDT (-0400)' >>> (loc_dt + timedelta(minutes=10)).strftime(fmt) '2002-10-27 01:10:00 EST (-0500)' Raises UnknownTimeZoneError if passed an unknown zone. >>> try: ... timezone('Asia/Shangri-La') ... except UnknownTimeZoneError: ... print('Unknown') Unknown >>> try: ... timezone(unicode('\N{TRADE MARK SIGN}')) ... except UnknownTimeZoneError: ... print('Unknown') Unknown ''' if zone.upper() == 'UTC': return utc try: zone = ascii(zone) except UnicodeEncodeError: # All valid timezones are ASCII raise UnknownTimeZoneError(zone) zone = _unmunge_zone(zone) if zone not in _tzinfo_cache: if zone in all_timezones_set: fp = open_resource(zone) try: _tzinfo_cache[zone] = build_tzinfo(zone, fp) finally: fp.close() else: raise UnknownTimeZoneError(zone) return _tzinfo_cache[zone]
def function[timezone, parameter[zone]]: constant[ Return a datetime.tzinfo implementation for the given timezone >>> from datetime import datetime, timedelta >>> utc = timezone('UTC') >>> eastern = timezone('US/Eastern') >>> eastern.zone 'US/Eastern' >>> timezone(unicode('US/Eastern')) is eastern True >>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) >>> loc_dt = utc_dt.astimezone(eastern) >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' >>> loc_dt.strftime(fmt) '2002-10-27 01:00:00 EST (-0500)' >>> (loc_dt - timedelta(minutes=10)).strftime(fmt) '2002-10-27 00:50:00 EST (-0500)' >>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt) '2002-10-27 01:50:00 EDT (-0400)' >>> (loc_dt + timedelta(minutes=10)).strftime(fmt) '2002-10-27 01:10:00 EST (-0500)' Raises UnknownTimeZoneError if passed an unknown zone. >>> try: ... timezone('Asia/Shangri-La') ... except UnknownTimeZoneError: ... print('Unknown') Unknown >>> try: ... timezone(unicode('\N{TRADE MARK SIGN}')) ... except UnknownTimeZoneError: ... print('Unknown') Unknown ] if compare[call[name[zone].upper, parameter[]] equal[==] constant[UTC]] begin[:] return[name[utc]] <ast.Try object at 0x7da20c6c5db0> variable[zone] assign[=] call[name[_unmunge_zone], parameter[name[zone]]] if compare[name[zone] <ast.NotIn object at 0x7da2590d7190> name[_tzinfo_cache]] begin[:] if compare[name[zone] in name[all_timezones_set]] begin[:] variable[fp] assign[=] call[name[open_resource], parameter[name[zone]]] <ast.Try object at 0x7da2047e9c60> return[call[name[_tzinfo_cache]][name[zone]]]
keyword[def] identifier[timezone] ( identifier[zone] ): literal[string] keyword[if] identifier[zone] . identifier[upper] ()== literal[string] : keyword[return] identifier[utc] keyword[try] : identifier[zone] = identifier[ascii] ( identifier[zone] ) keyword[except] identifier[UnicodeEncodeError] : keyword[raise] identifier[UnknownTimeZoneError] ( identifier[zone] ) identifier[zone] = identifier[_unmunge_zone] ( identifier[zone] ) keyword[if] identifier[zone] keyword[not] keyword[in] identifier[_tzinfo_cache] : keyword[if] identifier[zone] keyword[in] identifier[all_timezones_set] : identifier[fp] = identifier[open_resource] ( identifier[zone] ) keyword[try] : identifier[_tzinfo_cache] [ identifier[zone] ]= identifier[build_tzinfo] ( identifier[zone] , identifier[fp] ) keyword[finally] : identifier[fp] . identifier[close] () keyword[else] : keyword[raise] identifier[UnknownTimeZoneError] ( identifier[zone] ) keyword[return] identifier[_tzinfo_cache] [ identifier[zone] ]
def timezone(zone): """ Return a datetime.tzinfo implementation for the given timezone >>> from datetime import datetime, timedelta >>> utc = timezone('UTC') >>> eastern = timezone('US/Eastern') >>> eastern.zone 'US/Eastern' >>> timezone(unicode('US/Eastern')) is eastern True >>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) >>> loc_dt = utc_dt.astimezone(eastern) >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' >>> loc_dt.strftime(fmt) '2002-10-27 01:00:00 EST (-0500)' >>> (loc_dt - timedelta(minutes=10)).strftime(fmt) '2002-10-27 00:50:00 EST (-0500)' >>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt) '2002-10-27 01:50:00 EDT (-0400)' >>> (loc_dt + timedelta(minutes=10)).strftime(fmt) '2002-10-27 01:10:00 EST (-0500)' Raises UnknownTimeZoneError if passed an unknown zone. >>> try: ... timezone('Asia/Shangri-La') ... except UnknownTimeZoneError: ... print('Unknown') Unknown >>> try: ... timezone(unicode('\\N{TRADE MARK SIGN}')) ... except UnknownTimeZoneError: ... print('Unknown') Unknown """ if zone.upper() == 'UTC': return utc # depends on [control=['if'], data=[]] try: zone = ascii(zone) # depends on [control=['try'], data=[]] except UnicodeEncodeError: # All valid timezones are ASCII raise UnknownTimeZoneError(zone) # depends on [control=['except'], data=[]] zone = _unmunge_zone(zone) if zone not in _tzinfo_cache: if zone in all_timezones_set: fp = open_resource(zone) try: _tzinfo_cache[zone] = build_tzinfo(zone, fp) # depends on [control=['try'], data=[]] finally: fp.close() # depends on [control=['if'], data=['zone']] else: raise UnknownTimeZoneError(zone) # depends on [control=['if'], data=['zone', '_tzinfo_cache']] return _tzinfo_cache[zone]
def parse_rule(name, rule_text, do_raise=False): """ Parses the given rule text. :param name: The name of the rule. Used when emitting log messages regarding a failure to parse the rule. :param rule_text: The text of the rule to parse. :param do_raise: If ``False`` and the rule fails to parse, a log message is emitted to the "policies" logger at level WARN, and a rule that always evaluates to ``False`` will be returned. If ``True``, a ``pyparsing.ParseException`` will be raised. :returns: An instance of ``policies.instructions.Instructions``, containing the instructions necessary to evaluate the authorization rule. """ try: return rule.parseString(rule_text, parseAll=True)[0] except pyparsing.ParseException as exc: # Allow for debugging if do_raise: raise # Get the logger and emit our log messages log = logging.getLogger('policies') log.warn("Failed to parse rule %r: %s" % (name, exc)) log.warn("Rule line: %s" % exc.line) log.warn("Location : %s^" % (" " * (exc.col - 1))) # Construct and return a fail-closed instruction return Instructions([Constant(False), set_authz])
def function[parse_rule, parameter[name, rule_text, do_raise]]: constant[ Parses the given rule text. :param name: The name of the rule. Used when emitting log messages regarding a failure to parse the rule. :param rule_text: The text of the rule to parse. :param do_raise: If ``False`` and the rule fails to parse, a log message is emitted to the "policies" logger at level WARN, and a rule that always evaluates to ``False`` will be returned. If ``True``, a ``pyparsing.ParseException`` will be raised. :returns: An instance of ``policies.instructions.Instructions``, containing the instructions necessary to evaluate the authorization rule. ] <ast.Try object at 0x7da20e954610>
keyword[def] identifier[parse_rule] ( identifier[name] , identifier[rule_text] , identifier[do_raise] = keyword[False] ): literal[string] keyword[try] : keyword[return] identifier[rule] . identifier[parseString] ( identifier[rule_text] , identifier[parseAll] = keyword[True] )[ literal[int] ] keyword[except] identifier[pyparsing] . identifier[ParseException] keyword[as] identifier[exc] : keyword[if] identifier[do_raise] : keyword[raise] identifier[log] = identifier[logging] . identifier[getLogger] ( literal[string] ) identifier[log] . identifier[warn] ( literal[string] %( identifier[name] , identifier[exc] )) identifier[log] . identifier[warn] ( literal[string] % identifier[exc] . identifier[line] ) identifier[log] . identifier[warn] ( literal[string] %( literal[string] *( identifier[exc] . identifier[col] - literal[int] ))) keyword[return] identifier[Instructions] ([ identifier[Constant] ( keyword[False] ), identifier[set_authz] ])
def parse_rule(name, rule_text, do_raise=False): """ Parses the given rule text. :param name: The name of the rule. Used when emitting log messages regarding a failure to parse the rule. :param rule_text: The text of the rule to parse. :param do_raise: If ``False`` and the rule fails to parse, a log message is emitted to the "policies" logger at level WARN, and a rule that always evaluates to ``False`` will be returned. If ``True``, a ``pyparsing.ParseException`` will be raised. :returns: An instance of ``policies.instructions.Instructions``, containing the instructions necessary to evaluate the authorization rule. """ try: return rule.parseString(rule_text, parseAll=True)[0] # depends on [control=['try'], data=[]] except pyparsing.ParseException as exc: # Allow for debugging if do_raise: raise # depends on [control=['if'], data=[]] # Get the logger and emit our log messages log = logging.getLogger('policies') log.warn('Failed to parse rule %r: %s' % (name, exc)) log.warn('Rule line: %s' % exc.line) log.warn('Location : %s^' % (' ' * (exc.col - 1))) # Construct and return a fail-closed instruction return Instructions([Constant(False), set_authz]) # depends on [control=['except'], data=['exc']]
def get_routing_attributes(obj, modify_doc=False, keys=None): """ Loops through the provided object (using the dir() function) and finds any callables which match the name signature (e.g. get_foo()) AND has a docstring beginning with a path-like char string. This does process things in alphabetical order (rather than than the unpredictable __dict__ attribute) so take this into consideration if certain routes should be checked before others. Unfortunately, this is a problem because the 'all' method will always come before others, so there is no capturing one type followed by a catch-all 'all'. Until a solution is found, just make a router by hand. """ if keys is None: keys = dir(obj) for val, method_str in _find_routeable_attributes(obj, keys): path, *doc = val.__doc__.split(maxsplit=1) or ('', '') if not path: continue if modify_doc: val.__doc__ = ''.join(doc) method = HTTPMethod[method_str] yield method, path, val
def function[get_routing_attributes, parameter[obj, modify_doc, keys]]: constant[ Loops through the provided object (using the dir() function) and finds any callables which match the name signature (e.g. get_foo()) AND has a docstring beginning with a path-like char string. This does process things in alphabetical order (rather than than the unpredictable __dict__ attribute) so take this into consideration if certain routes should be checked before others. Unfortunately, this is a problem because the 'all' method will always come before others, so there is no capturing one type followed by a catch-all 'all'. Until a solution is found, just make a router by hand. ] if compare[name[keys] is constant[None]] begin[:] variable[keys] assign[=] call[name[dir], parameter[name[obj]]] for taget[tuple[[<ast.Name object at 0x7da20c7c8220>, <ast.Name object at 0x7da20c7c9f30>]]] in starred[call[name[_find_routeable_attributes], parameter[name[obj], name[keys]]]] begin[:] <ast.Tuple object at 0x7da20c7c98d0> assign[=] <ast.BoolOp object at 0x7da20c7cb610> if <ast.UnaryOp object at 0x7da2054a5ea0> begin[:] continue if name[modify_doc] begin[:] name[val].__doc__ assign[=] call[constant[].join, parameter[name[doc]]] variable[method] assign[=] call[name[HTTPMethod]][name[method_str]] <ast.Yield object at 0x7da2054a5570>
keyword[def] identifier[get_routing_attributes] ( identifier[obj] , identifier[modify_doc] = keyword[False] , identifier[keys] = keyword[None] ): literal[string] keyword[if] identifier[keys] keyword[is] keyword[None] : identifier[keys] = identifier[dir] ( identifier[obj] ) keyword[for] identifier[val] , identifier[method_str] keyword[in] identifier[_find_routeable_attributes] ( identifier[obj] , identifier[keys] ): identifier[path] ,* identifier[doc] = identifier[val] . identifier[__doc__] . identifier[split] ( identifier[maxsplit] = literal[int] ) keyword[or] ( literal[string] , literal[string] ) keyword[if] keyword[not] identifier[path] : keyword[continue] keyword[if] identifier[modify_doc] : identifier[val] . identifier[__doc__] = literal[string] . identifier[join] ( identifier[doc] ) identifier[method] = identifier[HTTPMethod] [ identifier[method_str] ] keyword[yield] identifier[method] , identifier[path] , identifier[val]
def get_routing_attributes(obj, modify_doc=False, keys=None): """ Loops through the provided object (using the dir() function) and finds any callables which match the name signature (e.g. get_foo()) AND has a docstring beginning with a path-like char string. This does process things in alphabetical order (rather than than the unpredictable __dict__ attribute) so take this into consideration if certain routes should be checked before others. Unfortunately, this is a problem because the 'all' method will always come before others, so there is no capturing one type followed by a catch-all 'all'. Until a solution is found, just make a router by hand. """ if keys is None: keys = dir(obj) # depends on [control=['if'], data=['keys']] for (val, method_str) in _find_routeable_attributes(obj, keys): (path, *doc) = val.__doc__.split(maxsplit=1) or ('', '') if not path: continue # depends on [control=['if'], data=[]] if modify_doc: val.__doc__ = ''.join(doc) # depends on [control=['if'], data=[]] method = HTTPMethod[method_str] yield (method, path, val) # depends on [control=['for'], data=[]]
def create(vm_): ''' Create a VM in Xen The configuration for this function is read from the profile settings. .. code-block:: bash salt-cloud -p some_profile xenvm01 ''' name = vm_['name'] record = {} ret = {} # fire creating event __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(name), args={ 'name': name, 'profile': vm_['profile'], 'provider': vm_['driver'], }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.debug('Adding %s to cloud cache.', name) __utils__['cloud.cachedir_index_add']( vm_['name'], vm_['profile'], 'xen', vm_['driver'] ) # connect to xen session = _get_session() # determine resource pool resource_pool = _determine_resource_pool(session, vm_) # determine storage repo storage_repo = _determine_storage_repo(session, resource_pool, vm_) # build VM image = vm_.get('image') clone = vm_.get('clone') if clone is None: clone = True log.debug('Clone: %s ', clone) # fire event to read new vm properties (requesting) __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(name), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) # create by cloning template if clone: _clone_vm(image, name, session) else: _copy_vm(image, name, session, storage_repo) # provision template to vm _provision_vm(name, session) vm = _get_vm(name, session) # start vm start(name, None, session) # get new VM vm = _get_vm(name, session) # wait for vm to report IP via guest tools _wait_for_ip(name, session) # set static IP if configured _set_static_ip(name, session, vm_) # if not deploying salt then exit deploy = vm_.get('deploy', True) log.debug('delopy is set to %s', deploy) if deploy: record = session.xenapi.VM.get_record(vm) if record is not None: _deploy_salt_minion(name, session, vm_) else: log.debug( 'The Salt minion will not be installed, deploy: %s', vm_['deploy'] ) record = session.xenapi.VM.get_record(vm) ret = show_instance(name) ret.update({'extra': record}) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(name), args={ 'name': name, 'profile': vm_['profile'], 'provider': vm_['driver'], }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return ret
def function[create, parameter[vm_]]: constant[ Create a VM in Xen The configuration for this function is read from the profile settings. .. code-block:: bash salt-cloud -p some_profile xenvm01 ] variable[name] assign[=] call[name[vm_]][constant[name]] variable[record] assign[=] dictionary[[], []] variable[ret] assign[=] dictionary[[], []] call[call[name[__utils__]][constant[cloud.fire_event]], parameter[constant[event], constant[starting create], call[constant[salt/cloud/{0}/creating].format, parameter[name[name]]]]] call[name[log].debug, parameter[constant[Adding %s to cloud cache.], name[name]]] call[call[name[__utils__]][constant[cloud.cachedir_index_add]], parameter[call[name[vm_]][constant[name]], call[name[vm_]][constant[profile]], constant[xen], call[name[vm_]][constant[driver]]]] variable[session] assign[=] call[name[_get_session], parameter[]] variable[resource_pool] assign[=] call[name[_determine_resource_pool], parameter[name[session], name[vm_]]] variable[storage_repo] assign[=] call[name[_determine_storage_repo], parameter[name[session], name[resource_pool], name[vm_]]] variable[image] assign[=] call[name[vm_].get, parameter[constant[image]]] variable[clone] assign[=] call[name[vm_].get, parameter[constant[clone]]] if compare[name[clone] is constant[None]] begin[:] variable[clone] assign[=] constant[True] call[name[log].debug, parameter[constant[Clone: %s ], name[clone]]] call[call[name[__utils__]][constant[cloud.fire_event]], parameter[constant[event], constant[requesting instance], call[constant[salt/cloud/{0}/requesting].format, parameter[name[name]]]]] if name[clone] begin[:] call[name[_clone_vm], parameter[name[image], name[name], name[session]]] call[name[_provision_vm], parameter[name[name], name[session]]] variable[vm] assign[=] call[name[_get_vm], parameter[name[name], name[session]]] call[name[start], parameter[name[name], constant[None], name[session]]] variable[vm] assign[=] call[name[_get_vm], parameter[name[name], name[session]]] call[name[_wait_for_ip], parameter[name[name], name[session]]] call[name[_set_static_ip], parameter[name[name], name[session], name[vm_]]] variable[deploy] assign[=] call[name[vm_].get, parameter[constant[deploy], constant[True]]] call[name[log].debug, parameter[constant[delopy is set to %s], name[deploy]]] if name[deploy] begin[:] variable[record] assign[=] call[name[session].xenapi.VM.get_record, parameter[name[vm]]] if compare[name[record] is_not constant[None]] begin[:] call[name[_deploy_salt_minion], parameter[name[name], name[session], name[vm_]]] variable[record] assign[=] call[name[session].xenapi.VM.get_record, parameter[name[vm]]] variable[ret] assign[=] call[name[show_instance], parameter[name[name]]] call[name[ret].update, parameter[dictionary[[<ast.Constant object at 0x7da1b1c80490>], [<ast.Name object at 0x7da1b1c80040>]]]] call[call[name[__utils__]][constant[cloud.fire_event]], parameter[constant[event], constant[created instance], call[constant[salt/cloud/{0}/created].format, parameter[name[name]]]]] return[name[ret]]
keyword[def] identifier[create] ( identifier[vm_] ): literal[string] identifier[name] = identifier[vm_] [ literal[string] ] identifier[record] ={} identifier[ret] ={} identifier[__utils__] [ literal[string] ]( literal[string] , literal[string] , literal[string] . identifier[format] ( identifier[name] ), identifier[args] ={ literal[string] : identifier[name] , literal[string] : identifier[vm_] [ literal[string] ], literal[string] : identifier[vm_] [ literal[string] ], }, identifier[sock_dir] = identifier[__opts__] [ literal[string] ], identifier[transport] = identifier[__opts__] [ literal[string] ] ) identifier[log] . identifier[debug] ( literal[string] , identifier[name] ) identifier[__utils__] [ literal[string] ]( identifier[vm_] [ literal[string] ], identifier[vm_] [ literal[string] ], literal[string] , identifier[vm_] [ literal[string] ] ) identifier[session] = identifier[_get_session] () identifier[resource_pool] = identifier[_determine_resource_pool] ( identifier[session] , identifier[vm_] ) identifier[storage_repo] = identifier[_determine_storage_repo] ( identifier[session] , identifier[resource_pool] , identifier[vm_] ) identifier[image] = identifier[vm_] . identifier[get] ( literal[string] ) identifier[clone] = identifier[vm_] . identifier[get] ( literal[string] ) keyword[if] identifier[clone] keyword[is] keyword[None] : identifier[clone] = keyword[True] identifier[log] . identifier[debug] ( literal[string] , identifier[clone] ) identifier[__utils__] [ literal[string] ]( literal[string] , literal[string] , literal[string] . identifier[format] ( identifier[name] ), identifier[sock_dir] = identifier[__opts__] [ literal[string] ], identifier[transport] = identifier[__opts__] [ literal[string] ] ) keyword[if] identifier[clone] : identifier[_clone_vm] ( identifier[image] , identifier[name] , identifier[session] ) keyword[else] : identifier[_copy_vm] ( identifier[image] , identifier[name] , identifier[session] , identifier[storage_repo] ) identifier[_provision_vm] ( identifier[name] , identifier[session] ) identifier[vm] = identifier[_get_vm] ( identifier[name] , identifier[session] ) identifier[start] ( identifier[name] , keyword[None] , identifier[session] ) identifier[vm] = identifier[_get_vm] ( identifier[name] , identifier[session] ) identifier[_wait_for_ip] ( identifier[name] , identifier[session] ) identifier[_set_static_ip] ( identifier[name] , identifier[session] , identifier[vm_] ) identifier[deploy] = identifier[vm_] . identifier[get] ( literal[string] , keyword[True] ) identifier[log] . identifier[debug] ( literal[string] , identifier[deploy] ) keyword[if] identifier[deploy] : identifier[record] = identifier[session] . identifier[xenapi] . identifier[VM] . identifier[get_record] ( identifier[vm] ) keyword[if] identifier[record] keyword[is] keyword[not] keyword[None] : identifier[_deploy_salt_minion] ( identifier[name] , identifier[session] , identifier[vm_] ) keyword[else] : identifier[log] . identifier[debug] ( literal[string] , identifier[vm_] [ literal[string] ] ) identifier[record] = identifier[session] . identifier[xenapi] . identifier[VM] . identifier[get_record] ( identifier[vm] ) identifier[ret] = identifier[show_instance] ( identifier[name] ) identifier[ret] . identifier[update] ({ literal[string] : identifier[record] }) identifier[__utils__] [ literal[string] ]( literal[string] , literal[string] , literal[string] . identifier[format] ( identifier[name] ), identifier[args] ={ literal[string] : identifier[name] , literal[string] : identifier[vm_] [ literal[string] ], literal[string] : identifier[vm_] [ literal[string] ], }, identifier[sock_dir] = identifier[__opts__] [ literal[string] ], identifier[transport] = identifier[__opts__] [ literal[string] ] ) keyword[return] identifier[ret]
def create(vm_): """ Create a VM in Xen The configuration for this function is read from the profile settings. .. code-block:: bash salt-cloud -p some_profile xenvm01 """ name = vm_['name'] record = {} ret = {} # fire creating event __utils__['cloud.fire_event']('event', 'starting create', 'salt/cloud/{0}/creating'.format(name), args={'name': name, 'profile': vm_['profile'], 'provider': vm_['driver']}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) log.debug('Adding %s to cloud cache.', name) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'xen', vm_['driver']) # connect to xen session = _get_session() # determine resource pool resource_pool = _determine_resource_pool(session, vm_) # determine storage repo storage_repo = _determine_storage_repo(session, resource_pool, vm_) # build VM image = vm_.get('image') clone = vm_.get('clone') if clone is None: clone = True # depends on [control=['if'], data=['clone']] log.debug('Clone: %s ', clone) # fire event to read new vm properties (requesting) __utils__['cloud.fire_event']('event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(name), sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) # create by cloning template if clone: _clone_vm(image, name, session) # depends on [control=['if'], data=[]] else: _copy_vm(image, name, session, storage_repo) # provision template to vm _provision_vm(name, session) vm = _get_vm(name, session) # start vm start(name, None, session) # get new VM vm = _get_vm(name, session) # wait for vm to report IP via guest tools _wait_for_ip(name, session) # set static IP if configured _set_static_ip(name, session, vm_) # if not deploying salt then exit deploy = vm_.get('deploy', True) log.debug('delopy is set to %s', deploy) if deploy: record = session.xenapi.VM.get_record(vm) if record is not None: _deploy_salt_minion(name, session, vm_) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: log.debug('The Salt minion will not be installed, deploy: %s', vm_['deploy']) record = session.xenapi.VM.get_record(vm) ret = show_instance(name) ret.update({'extra': record}) __utils__['cloud.fire_event']('event', 'created instance', 'salt/cloud/{0}/created'.format(name), args={'name': name, 'profile': vm_['profile'], 'provider': vm_['driver']}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) return ret
def draw_image(data, obj): """Returns the PGFPlots code for an image environment. """ content = [] filename, rel_filepath = files.new_filename(data, "img", ".png") # store the image as in a file img_array = obj.get_array() dims = img_array.shape if len(dims) == 2: # the values are given as one real number: look at cmap clims = obj.get_clim() mpl.pyplot.imsave( fname=filename, arr=img_array, cmap=obj.get_cmap(), vmin=clims[0], vmax=clims[1], origin=obj.origin, ) else: # RGB (+alpha) information at each point assert len(dims) == 3 and dims[2] in [3, 4] # convert to PIL image if obj.origin == "lower": img_array = numpy.flipud(img_array) # Convert mpl image to PIL image = PIL.Image.fromarray(numpy.uint8(img_array * 255)) # If the input image is PIL: # image = PIL.Image.fromarray(img_array) image.save(filename, origin=obj.origin) # write the corresponding information to the TikZ file extent = obj.get_extent() # the format specification will only accept tuples if not isinstance(extent, tuple): extent = tuple(extent) # Explicitly use \pgfimage as includegrapics command, as the default # \includegraphics fails unexpectedly in some cases ff = data["float format"] content.append( ( "\\addplot graphics [includegraphics cmd=\\pgfimage," "xmin=" + ff + ", xmax=" + ff + ", " "ymin=" + ff + ", ymax=" + ff + "] {{{}}};\n" ).format(*(extent + (rel_filepath,))) ) return data, content
def function[draw_image, parameter[data, obj]]: constant[Returns the PGFPlots code for an image environment. ] variable[content] assign[=] list[[]] <ast.Tuple object at 0x7da1b16be8c0> assign[=] call[name[files].new_filename, parameter[name[data], constant[img], constant[.png]]] variable[img_array] assign[=] call[name[obj].get_array, parameter[]] variable[dims] assign[=] name[img_array].shape if compare[call[name[len], parameter[name[dims]]] equal[==] constant[2]] begin[:] variable[clims] assign[=] call[name[obj].get_clim, parameter[]] call[name[mpl].pyplot.imsave, parameter[]] variable[extent] assign[=] call[name[obj].get_extent, parameter[]] if <ast.UnaryOp object at 0x7da1b16bf790> begin[:] variable[extent] assign[=] call[name[tuple], parameter[name[extent]]] variable[ff] assign[=] call[name[data]][constant[float format]] call[name[content].append, parameter[call[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[\addplot graphics [includegraphics cmd=\pgfimage,xmin=] + name[ff]] + constant[, xmax=]] + name[ff]] + constant[, ymin=]] + name[ff]] + constant[, ymax=]] + name[ff]] + constant[] {{{}}}; ]].format, parameter[<ast.Starred object at 0x7da1b16bd960>]]]] return[tuple[[<ast.Name object at 0x7da1b16beec0>, <ast.Name object at 0x7da1b16bd0c0>]]]
keyword[def] identifier[draw_image] ( identifier[data] , identifier[obj] ): literal[string] identifier[content] =[] identifier[filename] , identifier[rel_filepath] = identifier[files] . identifier[new_filename] ( identifier[data] , literal[string] , literal[string] ) identifier[img_array] = identifier[obj] . identifier[get_array] () identifier[dims] = identifier[img_array] . identifier[shape] keyword[if] identifier[len] ( identifier[dims] )== literal[int] : identifier[clims] = identifier[obj] . identifier[get_clim] () identifier[mpl] . identifier[pyplot] . identifier[imsave] ( identifier[fname] = identifier[filename] , identifier[arr] = identifier[img_array] , identifier[cmap] = identifier[obj] . identifier[get_cmap] (), identifier[vmin] = identifier[clims] [ literal[int] ], identifier[vmax] = identifier[clims] [ literal[int] ], identifier[origin] = identifier[obj] . identifier[origin] , ) keyword[else] : keyword[assert] identifier[len] ( identifier[dims] )== literal[int] keyword[and] identifier[dims] [ literal[int] ] keyword[in] [ literal[int] , literal[int] ] keyword[if] identifier[obj] . identifier[origin] == literal[string] : identifier[img_array] = identifier[numpy] . identifier[flipud] ( identifier[img_array] ) identifier[image] = identifier[PIL] . identifier[Image] . identifier[fromarray] ( identifier[numpy] . identifier[uint8] ( identifier[img_array] * literal[int] )) identifier[image] . identifier[save] ( identifier[filename] , identifier[origin] = identifier[obj] . identifier[origin] ) identifier[extent] = identifier[obj] . identifier[get_extent] () keyword[if] keyword[not] identifier[isinstance] ( identifier[extent] , identifier[tuple] ): identifier[extent] = identifier[tuple] ( identifier[extent] ) identifier[ff] = identifier[data] [ literal[string] ] identifier[content] . identifier[append] ( ( literal[string] literal[string] + identifier[ff] + literal[string] + identifier[ff] + literal[string] literal[string] + identifier[ff] + literal[string] + identifier[ff] + literal[string] ). identifier[format] (*( identifier[extent] +( identifier[rel_filepath] ,))) ) keyword[return] identifier[data] , identifier[content]
def draw_image(data, obj): """Returns the PGFPlots code for an image environment. """ content = [] (filename, rel_filepath) = files.new_filename(data, 'img', '.png') # store the image as in a file img_array = obj.get_array() dims = img_array.shape if len(dims) == 2: # the values are given as one real number: look at cmap clims = obj.get_clim() mpl.pyplot.imsave(fname=filename, arr=img_array, cmap=obj.get_cmap(), vmin=clims[0], vmax=clims[1], origin=obj.origin) # depends on [control=['if'], data=[]] else: # RGB (+alpha) information at each point assert len(dims) == 3 and dims[2] in [3, 4] # convert to PIL image if obj.origin == 'lower': img_array = numpy.flipud(img_array) # depends on [control=['if'], data=[]] # Convert mpl image to PIL image = PIL.Image.fromarray(numpy.uint8(img_array * 255)) # If the input image is PIL: # image = PIL.Image.fromarray(img_array) image.save(filename, origin=obj.origin) # write the corresponding information to the TikZ file extent = obj.get_extent() # the format specification will only accept tuples if not isinstance(extent, tuple): extent = tuple(extent) # depends on [control=['if'], data=[]] # Explicitly use \pgfimage as includegrapics command, as the default # \includegraphics fails unexpectedly in some cases ff = data['float format'] content.append(('\\addplot graphics [includegraphics cmd=\\pgfimage,xmin=' + ff + ', xmax=' + ff + ', ymin=' + ff + ', ymax=' + ff + '] {{{}}};\n').format(*extent + (rel_filepath,))) return (data, content)
def parse_rectlabel_app_output(self): """ Internal use mostly, finds all .json files in the current folder expecting them to all have been outputted by the RectLabel app parses each file returning finally an array representing a csv file where each element is a row and the 1st element [0] is the column headers. Could be useful for subsequent string manipulation therefore not prefixed with an underscore RectLabel info: https://rectlabel.com/ """ # get json files only files = [] files = [f for f in os.listdir() if f[-5:] == '.json'] if len(files) == 0: print('No json files found in this directory') return None max_boxes = 0 rows = [] for each_file in files: f = open(each_file, 'r') j = f.read() j = json.loads(j) f.close() # running count of the # of boxes. if len(j['objects']) > max_boxes: max_boxes = len(j['objects']) # Each json file will end up being a row # set labels row = [] for o in j['objects']: labels = {} labels['label'] = o['label'] labels['x'] = o['x_y_w_h'][0] labels['y'] = o['x_y_w_h'][1] labels['width'] = o['x_y_w_h'][2] labels['height'] = o['x_y_w_h'][3] # String manipulation for csv labels_right_format = '\"' + json.dumps(labels).replace('"', '\"\"') + '\"' row.append(labels_right_format) row.insert(0, '\"' + j['filename'] + '\"') rows.append(row) # one array element per row rows = [','.join(i) for i in rows] header = '\"image\"' for box_num in range(0, max_boxes): header += ', \"box\"' + str(box_num) rows.insert(0, header) return rows
def function[parse_rectlabel_app_output, parameter[self]]: constant[ Internal use mostly, finds all .json files in the current folder expecting them to all have been outputted by the RectLabel app parses each file returning finally an array representing a csv file where each element is a row and the 1st element [0] is the column headers. Could be useful for subsequent string manipulation therefore not prefixed with an underscore RectLabel info: https://rectlabel.com/ ] variable[files] assign[=] list[[]] variable[files] assign[=] <ast.ListComp object at 0x7da20e748700> if compare[call[name[len], parameter[name[files]]] equal[==] constant[0]] begin[:] call[name[print], parameter[constant[No json files found in this directory]]] return[constant[None]] variable[max_boxes] assign[=] constant[0] variable[rows] assign[=] list[[]] for taget[name[each_file]] in starred[name[files]] begin[:] variable[f] assign[=] call[name[open], parameter[name[each_file], constant[r]]] variable[j] assign[=] call[name[f].read, parameter[]] variable[j] assign[=] call[name[json].loads, parameter[name[j]]] call[name[f].close, parameter[]] if compare[call[name[len], parameter[call[name[j]][constant[objects]]]] greater[>] name[max_boxes]] begin[:] variable[max_boxes] assign[=] call[name[len], parameter[call[name[j]][constant[objects]]]] variable[row] assign[=] list[[]] for taget[name[o]] in starred[call[name[j]][constant[objects]]] begin[:] variable[labels] assign[=] dictionary[[], []] call[name[labels]][constant[label]] assign[=] call[name[o]][constant[label]] call[name[labels]][constant[x]] assign[=] call[call[name[o]][constant[x_y_w_h]]][constant[0]] call[name[labels]][constant[y]] assign[=] call[call[name[o]][constant[x_y_w_h]]][constant[1]] call[name[labels]][constant[width]] assign[=] call[call[name[o]][constant[x_y_w_h]]][constant[2]] call[name[labels]][constant[height]] assign[=] call[call[name[o]][constant[x_y_w_h]]][constant[3]] variable[labels_right_format] assign[=] binary_operation[binary_operation[constant["] + call[call[name[json].dumps, parameter[name[labels]]].replace, parameter[constant["], constant[""]]]] + constant["]] call[name[row].append, parameter[name[labels_right_format]]] call[name[row].insert, parameter[constant[0], binary_operation[binary_operation[constant["] + call[name[j]][constant[filename]]] + constant["]]]] call[name[rows].append, parameter[name[row]]] variable[rows] assign[=] <ast.ListComp object at 0x7da18fe92da0> variable[header] assign[=] constant["image"] for taget[name[box_num]] in starred[call[name[range], parameter[constant[0], name[max_boxes]]]] begin[:] <ast.AugAssign object at 0x7da20eb2ada0> call[name[rows].insert, parameter[constant[0], name[header]]] return[name[rows]]
keyword[def] identifier[parse_rectlabel_app_output] ( identifier[self] ): literal[string] identifier[files] =[] identifier[files] =[ identifier[f] keyword[for] identifier[f] keyword[in] identifier[os] . identifier[listdir] () keyword[if] identifier[f] [- literal[int] :]== literal[string] ] keyword[if] identifier[len] ( identifier[files] )== literal[int] : identifier[print] ( literal[string] ) keyword[return] keyword[None] identifier[max_boxes] = literal[int] identifier[rows] =[] keyword[for] identifier[each_file] keyword[in] identifier[files] : identifier[f] = identifier[open] ( identifier[each_file] , literal[string] ) identifier[j] = identifier[f] . identifier[read] () identifier[j] = identifier[json] . identifier[loads] ( identifier[j] ) identifier[f] . identifier[close] () keyword[if] identifier[len] ( identifier[j] [ literal[string] ])> identifier[max_boxes] : identifier[max_boxes] = identifier[len] ( identifier[j] [ literal[string] ]) identifier[row] =[] keyword[for] identifier[o] keyword[in] identifier[j] [ literal[string] ]: identifier[labels] ={} identifier[labels] [ literal[string] ]= identifier[o] [ literal[string] ] identifier[labels] [ literal[string] ]= identifier[o] [ literal[string] ][ literal[int] ] identifier[labels] [ literal[string] ]= identifier[o] [ literal[string] ][ literal[int] ] identifier[labels] [ literal[string] ]= identifier[o] [ literal[string] ][ literal[int] ] identifier[labels] [ literal[string] ]= identifier[o] [ literal[string] ][ literal[int] ] identifier[labels_right_format] = literal[string] + identifier[json] . identifier[dumps] ( identifier[labels] ). identifier[replace] ( literal[string] , literal[string] )+ literal[string] identifier[row] . identifier[append] ( identifier[labels_right_format] ) identifier[row] . identifier[insert] ( literal[int] , literal[string] + identifier[j] [ literal[string] ]+ literal[string] ) identifier[rows] . identifier[append] ( identifier[row] ) identifier[rows] =[ literal[string] . identifier[join] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[rows] ] identifier[header] = literal[string] keyword[for] identifier[box_num] keyword[in] identifier[range] ( literal[int] , identifier[max_boxes] ): identifier[header] += literal[string] + identifier[str] ( identifier[box_num] ) identifier[rows] . identifier[insert] ( literal[int] , identifier[header] ) keyword[return] identifier[rows]
def parse_rectlabel_app_output(self): """ Internal use mostly, finds all .json files in the current folder expecting them to all have been outputted by the RectLabel app parses each file returning finally an array representing a csv file where each element is a row and the 1st element [0] is the column headers. Could be useful for subsequent string manipulation therefore not prefixed with an underscore RectLabel info: https://rectlabel.com/ """ # get json files only files = [] files = [f for f in os.listdir() if f[-5:] == '.json'] if len(files) == 0: print('No json files found in this directory') return None # depends on [control=['if'], data=[]] max_boxes = 0 rows = [] for each_file in files: f = open(each_file, 'r') j = f.read() j = json.loads(j) f.close() # running count of the # of boxes. if len(j['objects']) > max_boxes: max_boxes = len(j['objects']) # depends on [control=['if'], data=['max_boxes']] # Each json file will end up being a row # set labels row = [] for o in j['objects']: labels = {} labels['label'] = o['label'] labels['x'] = o['x_y_w_h'][0] labels['y'] = o['x_y_w_h'][1] labels['width'] = o['x_y_w_h'][2] labels['height'] = o['x_y_w_h'][3] # String manipulation for csv labels_right_format = '"' + json.dumps(labels).replace('"', '""') + '"' row.append(labels_right_format) # depends on [control=['for'], data=['o']] row.insert(0, '"' + j['filename'] + '"') rows.append(row) # depends on [control=['for'], data=['each_file']] # one array element per row rows = [','.join(i) for i in rows] header = '"image"' for box_num in range(0, max_boxes): header += ', "box"' + str(box_num) # depends on [control=['for'], data=['box_num']] rows.insert(0, header) return rows
def requirements(): """Build the requirements list for this project""" requirements_list = [] with open('requirements.txt') as requirements: for install in requirements: requirements_list.append(install.strip()) return requirements_list
def function[requirements, parameter[]]: constant[Build the requirements list for this project] variable[requirements_list] assign[=] list[[]] with call[name[open], parameter[constant[requirements.txt]]] begin[:] for taget[name[install]] in starred[name[requirements]] begin[:] call[name[requirements_list].append, parameter[call[name[install].strip, parameter[]]]] return[name[requirements_list]]
keyword[def] identifier[requirements] (): literal[string] identifier[requirements_list] =[] keyword[with] identifier[open] ( literal[string] ) keyword[as] identifier[requirements] : keyword[for] identifier[install] keyword[in] identifier[requirements] : identifier[requirements_list] . identifier[append] ( identifier[install] . identifier[strip] ()) keyword[return] identifier[requirements_list]
def requirements(): """Build the requirements list for this project""" requirements_list = [] with open('requirements.txt') as requirements: for install in requirements: requirements_list.append(install.strip()) # depends on [control=['for'], data=['install']] # depends on [control=['with'], data=['requirements']] return requirements_list
def __process_by_ccore(self): """! @brief Performs cluster analysis using C++ implementation of CLIQUE algorithm that is used by default if user's target platform is supported. """ (self.__clusters, self.__noise, block_logical_locations, block_max_corners, block_min_corners, block_points) = \ wrapper.clique(self.__data, self.__amount_intervals, self.__density_threshold) amount_cells = len(block_logical_locations) for i in range(amount_cells): self.__cells.append(clique_block(block_logical_locations[i], spatial_block(block_max_corners[i], block_min_corners[i]), block_points[i], True))
def function[__process_by_ccore, parameter[self]]: constant[! @brief Performs cluster analysis using C++ implementation of CLIQUE algorithm that is used by default if user's target platform is supported. ] <ast.Tuple object at 0x7da1b013e050> assign[=] call[name[wrapper].clique, parameter[name[self].__data, name[self].__amount_intervals, name[self].__density_threshold]] variable[amount_cells] assign[=] call[name[len], parameter[name[block_logical_locations]]] for taget[name[i]] in starred[call[name[range], parameter[name[amount_cells]]]] begin[:] call[name[self].__cells.append, parameter[call[name[clique_block], parameter[call[name[block_logical_locations]][name[i]], call[name[spatial_block], parameter[call[name[block_max_corners]][name[i]], call[name[block_min_corners]][name[i]]]], call[name[block_points]][name[i]], constant[True]]]]]
keyword[def] identifier[__process_by_ccore] ( identifier[self] ): literal[string] ( identifier[self] . identifier[__clusters] , identifier[self] . identifier[__noise] , identifier[block_logical_locations] , identifier[block_max_corners] , identifier[block_min_corners] , identifier[block_points] )= identifier[wrapper] . identifier[clique] ( identifier[self] . identifier[__data] , identifier[self] . identifier[__amount_intervals] , identifier[self] . identifier[__density_threshold] ) identifier[amount_cells] = identifier[len] ( identifier[block_logical_locations] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[amount_cells] ): identifier[self] . identifier[__cells] . identifier[append] ( identifier[clique_block] ( identifier[block_logical_locations] [ identifier[i] ], identifier[spatial_block] ( identifier[block_max_corners] [ identifier[i] ], identifier[block_min_corners] [ identifier[i] ]), identifier[block_points] [ identifier[i] ], keyword[True] ))
def __process_by_ccore(self): """! @brief Performs cluster analysis using C++ implementation of CLIQUE algorithm that is used by default if user's target platform is supported. """ (self.__clusters, self.__noise, block_logical_locations, block_max_corners, block_min_corners, block_points) = wrapper.clique(self.__data, self.__amount_intervals, self.__density_threshold) amount_cells = len(block_logical_locations) for i in range(amount_cells): self.__cells.append(clique_block(block_logical_locations[i], spatial_block(block_max_corners[i], block_min_corners[i]), block_points[i], True)) # depends on [control=['for'], data=['i']]