code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def __render(self, context, **kwargs): """ Render template. :param context: A dict or dict-like object to instantiate given template file :param kwargs: Keyword arguments passed to the template engine to render templates with specific features enabled. :return: Rendered string """ # Not pass both searchList and namespaces. kwargs["namespaces"] = [context, ] + kwargs.get("namespaces", []) \ + kwargs.get("searchList", []) kwargs["searchList"] = None # TODO: # if at_paths is not None: # paths = at_paths + self._engine_valid_opts.get(..., []) # ... kwargs = self.filter_options(kwargs, self.engine_valid_options()) self.engine_options.update(kwargs) return render_impl(**self.engine_options)
def function[__render, parameter[self, context]]: constant[ Render template. :param context: A dict or dict-like object to instantiate given template file :param kwargs: Keyword arguments passed to the template engine to render templates with specific features enabled. :return: Rendered string ] call[name[kwargs]][constant[namespaces]] assign[=] binary_operation[binary_operation[list[[<ast.Name object at 0x7da2047e8130>]] + call[name[kwargs].get, parameter[constant[namespaces], list[[]]]]] + call[name[kwargs].get, parameter[constant[searchList], list[[]]]]] call[name[kwargs]][constant[searchList]] assign[=] constant[None] variable[kwargs] assign[=] call[name[self].filter_options, parameter[name[kwargs], call[name[self].engine_valid_options, parameter[]]]] call[name[self].engine_options.update, parameter[name[kwargs]]] return[call[name[render_impl], parameter[]]]
keyword[def] identifier[__render] ( identifier[self] , identifier[context] ,** identifier[kwargs] ): literal[string] identifier[kwargs] [ literal[string] ]=[ identifier[context] ,]+ identifier[kwargs] . identifier[get] ( literal[string] ,[])+ identifier[kwargs] . identifier[get] ( literal[string] ,[]) identifier[kwargs] [ literal[string] ]= keyword[None] identifier[kwargs] = identifier[self] . identifier[filter_options] ( identifier[kwargs] , identifier[self] . identifier[engine_valid_options] ()) identifier[self] . identifier[engine_options] . identifier[update] ( identifier[kwargs] ) keyword[return] identifier[render_impl] (** identifier[self] . identifier[engine_options] )
def __render(self, context, **kwargs): """ Render template. :param context: A dict or dict-like object to instantiate given template file :param kwargs: Keyword arguments passed to the template engine to render templates with specific features enabled. :return: Rendered string """ # Not pass both searchList and namespaces. kwargs['namespaces'] = [context] + kwargs.get('namespaces', []) + kwargs.get('searchList', []) kwargs['searchList'] = None # TODO: # if at_paths is not None: # paths = at_paths + self._engine_valid_opts.get(..., []) # ... kwargs = self.filter_options(kwargs, self.engine_valid_options()) self.engine_options.update(kwargs) return render_impl(**self.engine_options)
def set_project_avatar(self, avatar_blob, project_id): """SetProjectAvatar. [Preview API] Sets the avatar for the project. :param :class:`<ProjectAvatar> <azure.devops.v5_1.core.models.ProjectAvatar>` avatar_blob: The avatar blob data object to upload. :param str project_id: The ID or name of the project. """ route_values = {} if project_id is not None: route_values['projectId'] = self._serialize.url('project_id', project_id, 'str') content = self._serialize.body(avatar_blob, 'ProjectAvatar') self._send(http_method='PUT', location_id='54b2a2a0-859b-4d05-827c-ec4c862f641a', version='5.1-preview.1', route_values=route_values, content=content)
def function[set_project_avatar, parameter[self, avatar_blob, project_id]]: constant[SetProjectAvatar. [Preview API] Sets the avatar for the project. :param :class:`<ProjectAvatar> <azure.devops.v5_1.core.models.ProjectAvatar>` avatar_blob: The avatar blob data object to upload. :param str project_id: The ID or name of the project. ] variable[route_values] assign[=] dictionary[[], []] if compare[name[project_id] is_not constant[None]] begin[:] call[name[route_values]][constant[projectId]] assign[=] call[name[self]._serialize.url, parameter[constant[project_id], name[project_id], constant[str]]] variable[content] assign[=] call[name[self]._serialize.body, parameter[name[avatar_blob], constant[ProjectAvatar]]] call[name[self]._send, parameter[]]
keyword[def] identifier[set_project_avatar] ( identifier[self] , identifier[avatar_blob] , identifier[project_id] ): literal[string] identifier[route_values] ={} keyword[if] identifier[project_id] keyword[is] keyword[not] keyword[None] : identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[project_id] , literal[string] ) identifier[content] = identifier[self] . identifier[_serialize] . identifier[body] ( identifier[avatar_blob] , literal[string] ) identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] , identifier[location_id] = literal[string] , identifier[version] = literal[string] , identifier[route_values] = identifier[route_values] , identifier[content] = identifier[content] )
def set_project_avatar(self, avatar_blob, project_id): """SetProjectAvatar. [Preview API] Sets the avatar for the project. :param :class:`<ProjectAvatar> <azure.devops.v5_1.core.models.ProjectAvatar>` avatar_blob: The avatar blob data object to upload. :param str project_id: The ID or name of the project. """ route_values = {} if project_id is not None: route_values['projectId'] = self._serialize.url('project_id', project_id, 'str') # depends on [control=['if'], data=['project_id']] content = self._serialize.body(avatar_blob, 'ProjectAvatar') self._send(http_method='PUT', location_id='54b2a2a0-859b-4d05-827c-ec4c862f641a', version='5.1-preview.1', route_values=route_values, content=content)
def start_runtime(self): ''' Start the system! ''' while True: try: self.call_runtime() except Exception: log.error('Exception in Thorium: ', exc_info=True) time.sleep(self.opts['thorium_interval'])
def function[start_runtime, parameter[self]]: constant[ Start the system! ] while constant[True] begin[:] <ast.Try object at 0x7da2054a4f40>
keyword[def] identifier[start_runtime] ( identifier[self] ): literal[string] keyword[while] keyword[True] : keyword[try] : identifier[self] . identifier[call_runtime] () keyword[except] identifier[Exception] : identifier[log] . identifier[error] ( literal[string] , identifier[exc_info] = keyword[True] ) identifier[time] . identifier[sleep] ( identifier[self] . identifier[opts] [ literal[string] ])
def start_runtime(self): """ Start the system! """ while True: try: self.call_runtime() # depends on [control=['try'], data=[]] except Exception: log.error('Exception in Thorium: ', exc_info=True) time.sleep(self.opts['thorium_interval']) # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]]
def process(self): # pragma: no cover """ Process the logic and structuration of the mining database. """ if PyFunceble.CONFIGURATION["mining"]: # The mining is activated. # We load the mining logic. mined = self.mine() if mined: # The mined data is not empty or None. # We add the mined data to the global database. self._add(mined) # And we finally backup everything. self._backup()
def function[process, parameter[self]]: constant[ Process the logic and structuration of the mining database. ] if call[name[PyFunceble].CONFIGURATION][constant[mining]] begin[:] variable[mined] assign[=] call[name[self].mine, parameter[]] if name[mined] begin[:] call[name[self]._add, parameter[name[mined]]] call[name[self]._backup, parameter[]]
keyword[def] identifier[process] ( identifier[self] ): literal[string] keyword[if] identifier[PyFunceble] . identifier[CONFIGURATION] [ literal[string] ]: identifier[mined] = identifier[self] . identifier[mine] () keyword[if] identifier[mined] : identifier[self] . identifier[_add] ( identifier[mined] ) identifier[self] . identifier[_backup] ()
def process(self): # pragma: no cover '\n Process the logic and structuration of the mining database.\n ' if PyFunceble.CONFIGURATION['mining']: # The mining is activated. # We load the mining logic. mined = self.mine() if mined: # The mined data is not empty or None. # We add the mined data to the global database. self._add(mined) # And we finally backup everything. self._backup() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def run_one(self, name, migrator, fake=True, downgrade=False, force=False): """Run/emulate a migration with given name.""" try: migrate, rollback = self.read(name) if fake: with mock.patch('peewee.Model.select'): with mock.patch('peewee.Query._execute'): migrate(migrator, self.database, fake=fake) if force: self.model.create(name=name) self.logger.info('Done %s', name) migrator.clean() return migrator with self.database.transaction(): if not downgrade: self.logger.info('Migrate "%s"', name) migrate(migrator, self.database, fake=fake) migrator.run() self.model.create(name=name) else: self.logger.info('Rolling back %s', name) rollback(migrator, self.database, fake=fake) migrator.run() self.model.delete().where(self.model.name == name).execute() self.logger.info('Done %s', name) except Exception: self.database.rollback() operation = 'Migration' if not downgrade else 'Rollback' self.logger.exception('%s failed: %s', operation, name) raise
def function[run_one, parameter[self, name, migrator, fake, downgrade, force]]: constant[Run/emulate a migration with given name.] <ast.Try object at 0x7da1b20a8640>
keyword[def] identifier[run_one] ( identifier[self] , identifier[name] , identifier[migrator] , identifier[fake] = keyword[True] , identifier[downgrade] = keyword[False] , identifier[force] = keyword[False] ): literal[string] keyword[try] : identifier[migrate] , identifier[rollback] = identifier[self] . identifier[read] ( identifier[name] ) keyword[if] identifier[fake] : keyword[with] identifier[mock] . identifier[patch] ( literal[string] ): keyword[with] identifier[mock] . identifier[patch] ( literal[string] ): identifier[migrate] ( identifier[migrator] , identifier[self] . identifier[database] , identifier[fake] = identifier[fake] ) keyword[if] identifier[force] : identifier[self] . identifier[model] . identifier[create] ( identifier[name] = identifier[name] ) identifier[self] . identifier[logger] . identifier[info] ( literal[string] , identifier[name] ) identifier[migrator] . identifier[clean] () keyword[return] identifier[migrator] keyword[with] identifier[self] . identifier[database] . identifier[transaction] (): keyword[if] keyword[not] identifier[downgrade] : identifier[self] . identifier[logger] . identifier[info] ( literal[string] , identifier[name] ) identifier[migrate] ( identifier[migrator] , identifier[self] . identifier[database] , identifier[fake] = identifier[fake] ) identifier[migrator] . identifier[run] () identifier[self] . identifier[model] . identifier[create] ( identifier[name] = identifier[name] ) keyword[else] : identifier[self] . identifier[logger] . identifier[info] ( literal[string] , identifier[name] ) identifier[rollback] ( identifier[migrator] , identifier[self] . identifier[database] , identifier[fake] = identifier[fake] ) identifier[migrator] . identifier[run] () identifier[self] . identifier[model] . identifier[delete] (). identifier[where] ( identifier[self] . identifier[model] . identifier[name] == identifier[name] ). identifier[execute] () identifier[self] . identifier[logger] . identifier[info] ( literal[string] , identifier[name] ) keyword[except] identifier[Exception] : identifier[self] . identifier[database] . identifier[rollback] () identifier[operation] = literal[string] keyword[if] keyword[not] identifier[downgrade] keyword[else] literal[string] identifier[self] . identifier[logger] . identifier[exception] ( literal[string] , identifier[operation] , identifier[name] ) keyword[raise]
def run_one(self, name, migrator, fake=True, downgrade=False, force=False): """Run/emulate a migration with given name.""" try: (migrate, rollback) = self.read(name) if fake: with mock.patch('peewee.Model.select'): with mock.patch('peewee.Query._execute'): migrate(migrator, self.database, fake=fake) # depends on [control=['with'], data=[]] # depends on [control=['with'], data=[]] if force: self.model.create(name=name) self.logger.info('Done %s', name) # depends on [control=['if'], data=[]] migrator.clean() return migrator # depends on [control=['if'], data=[]] with self.database.transaction(): if not downgrade: self.logger.info('Migrate "%s"', name) migrate(migrator, self.database, fake=fake) migrator.run() self.model.create(name=name) # depends on [control=['if'], data=[]] else: self.logger.info('Rolling back %s', name) rollback(migrator, self.database, fake=fake) migrator.run() self.model.delete().where(self.model.name == name).execute() self.logger.info('Done %s', name) # depends on [control=['with'], data=[]] # depends on [control=['try'], data=[]] except Exception: self.database.rollback() operation = 'Migration' if not downgrade else 'Rollback' self.logger.exception('%s failed: %s', operation, name) raise # depends on [control=['except'], data=[]]
def abfGroups(abfFolder): """ Given a folder path or list of files, return groups (dict) by cell. Rules which define parents (cells): * assume each cell has one or several ABFs * that cell can be labeled by its "ID" or "parent" ABF (first abf) * the ID is just the filename of the first abf without .abf * if any file starts with an "ID", that ID becomes a parent. * examples could be 16o14044.TIF or 16o14044-cell1-stuff.jpg * usually this is done by saving a pic of the cell with same filename Returns a dict of "parent IDs" representing the "children" groups["16o14041"] = ["16o14041","16o14042","16o14043"] From there, getting children files is trivial. Just find all files in the same folder whose filenames begin with one of the children. """ # prepare the list of files, filenames, and IDs files=False if type(abfFolder) is str and os.path.isdir(abfFolder): files=abfSort(os.listdir(abfFolder)) elif type(abfFolder) is list: files=abfSort(abfFolder) assert type(files) is list files=list_to_lowercase(files) # group every filename in a different list, and determine parents abfs, IDs, others, parents, days = [],[],[],[],[] for fname in files: if fname.endswith(".abf"): abfs.append(fname) IDs.append(fname[:-4]) days.append(fname[:5]) else: others.append(fname) for ID in IDs: for fname in others: if fname.startswith(ID): parents.append(ID) parents=abfSort(set(parents)) # allow only one copy each days=abfSort(set(days)) # allow only one copy each # match up children with parents, respecting daily orphans. groups={} for day in days: parent=None for fname in [x for x in abfs if x.startswith(day)]: ID=fname[:-4] if ID in parents: parent=ID if not parent in groups.keys(): groups[parent]=[] groups[parent].extend([ID]) return groups
def function[abfGroups, parameter[abfFolder]]: constant[ Given a folder path or list of files, return groups (dict) by cell. Rules which define parents (cells): * assume each cell has one or several ABFs * that cell can be labeled by its "ID" or "parent" ABF (first abf) * the ID is just the filename of the first abf without .abf * if any file starts with an "ID", that ID becomes a parent. * examples could be 16o14044.TIF or 16o14044-cell1-stuff.jpg * usually this is done by saving a pic of the cell with same filename Returns a dict of "parent IDs" representing the "children" groups["16o14041"] = ["16o14041","16o14042","16o14043"] From there, getting children files is trivial. Just find all files in the same folder whose filenames begin with one of the children. ] variable[files] assign[=] constant[False] if <ast.BoolOp object at 0x7da1afef1de0> begin[:] variable[files] assign[=] call[name[abfSort], parameter[call[name[os].listdir, parameter[name[abfFolder]]]]] assert[compare[call[name[type], parameter[name[files]]] is name[list]]] variable[files] assign[=] call[name[list_to_lowercase], parameter[name[files]]] <ast.Tuple object at 0x7da1afef2c50> assign[=] tuple[[<ast.List object at 0x7da1afef2d10>, <ast.List object at 0x7da1afef2d40>, <ast.List object at 0x7da1afef0b80>, <ast.List object at 0x7da1afef0b50>, <ast.List object at 0x7da1afef0940>]] for taget[name[fname]] in starred[name[files]] begin[:] if call[name[fname].endswith, parameter[constant[.abf]]] begin[:] call[name[abfs].append, parameter[name[fname]]] call[name[IDs].append, parameter[call[name[fname]][<ast.Slice object at 0x7da1afef12a0>]]] call[name[days].append, parameter[call[name[fname]][<ast.Slice object at 0x7da1afef1ae0>]]] for taget[name[ID]] in starred[name[IDs]] begin[:] for taget[name[fname]] in starred[name[others]] begin[:] if call[name[fname].startswith, parameter[name[ID]]] begin[:] call[name[parents].append, parameter[name[ID]]] variable[parents] assign[=] call[name[abfSort], parameter[call[name[set], parameter[name[parents]]]]] variable[days] assign[=] call[name[abfSort], parameter[call[name[set], parameter[name[days]]]]] variable[groups] assign[=] dictionary[[], []] for taget[name[day]] in starred[name[days]] begin[:] variable[parent] assign[=] constant[None] for taget[name[fname]] in starred[<ast.ListComp object at 0x7da1afef04c0>] begin[:] variable[ID] assign[=] call[name[fname]][<ast.Slice object at 0x7da1afef17e0>] if compare[name[ID] in name[parents]] begin[:] variable[parent] assign[=] name[ID] if <ast.UnaryOp object at 0x7da1afef1d20> begin[:] call[name[groups]][name[parent]] assign[=] list[[]] call[call[name[groups]][name[parent]].extend, parameter[list[[<ast.Name object at 0x7da1afe05ae0>]]]] return[name[groups]]
keyword[def] identifier[abfGroups] ( identifier[abfFolder] ): literal[string] identifier[files] = keyword[False] keyword[if] identifier[type] ( identifier[abfFolder] ) keyword[is] identifier[str] keyword[and] identifier[os] . identifier[path] . identifier[isdir] ( identifier[abfFolder] ): identifier[files] = identifier[abfSort] ( identifier[os] . identifier[listdir] ( identifier[abfFolder] )) keyword[elif] identifier[type] ( identifier[abfFolder] ) keyword[is] identifier[list] : identifier[files] = identifier[abfSort] ( identifier[abfFolder] ) keyword[assert] identifier[type] ( identifier[files] ) keyword[is] identifier[list] identifier[files] = identifier[list_to_lowercase] ( identifier[files] ) identifier[abfs] , identifier[IDs] , identifier[others] , identifier[parents] , identifier[days] =[],[],[],[],[] keyword[for] identifier[fname] keyword[in] identifier[files] : keyword[if] identifier[fname] . identifier[endswith] ( literal[string] ): identifier[abfs] . identifier[append] ( identifier[fname] ) identifier[IDs] . identifier[append] ( identifier[fname] [:- literal[int] ]) identifier[days] . identifier[append] ( identifier[fname] [: literal[int] ]) keyword[else] : identifier[others] . identifier[append] ( identifier[fname] ) keyword[for] identifier[ID] keyword[in] identifier[IDs] : keyword[for] identifier[fname] keyword[in] identifier[others] : keyword[if] identifier[fname] . identifier[startswith] ( identifier[ID] ): identifier[parents] . identifier[append] ( identifier[ID] ) identifier[parents] = identifier[abfSort] ( identifier[set] ( identifier[parents] )) identifier[days] = identifier[abfSort] ( identifier[set] ( identifier[days] )) identifier[groups] ={} keyword[for] identifier[day] keyword[in] identifier[days] : identifier[parent] = keyword[None] keyword[for] identifier[fname] keyword[in] [ identifier[x] keyword[for] identifier[x] keyword[in] identifier[abfs] keyword[if] identifier[x] . identifier[startswith] ( identifier[day] )]: identifier[ID] = identifier[fname] [:- literal[int] ] keyword[if] identifier[ID] keyword[in] identifier[parents] : identifier[parent] = identifier[ID] keyword[if] keyword[not] identifier[parent] keyword[in] identifier[groups] . identifier[keys] (): identifier[groups] [ identifier[parent] ]=[] identifier[groups] [ identifier[parent] ]. identifier[extend] ([ identifier[ID] ]) keyword[return] identifier[groups]
def abfGroups(abfFolder): """ Given a folder path or list of files, return groups (dict) by cell. Rules which define parents (cells): * assume each cell has one or several ABFs * that cell can be labeled by its "ID" or "parent" ABF (first abf) * the ID is just the filename of the first abf without .abf * if any file starts with an "ID", that ID becomes a parent. * examples could be 16o14044.TIF or 16o14044-cell1-stuff.jpg * usually this is done by saving a pic of the cell with same filename Returns a dict of "parent IDs" representing the "children" groups["16o14041"] = ["16o14041","16o14042","16o14043"] From there, getting children files is trivial. Just find all files in the same folder whose filenames begin with one of the children. """ # prepare the list of files, filenames, and IDs files = False if type(abfFolder) is str and os.path.isdir(abfFolder): files = abfSort(os.listdir(abfFolder)) # depends on [control=['if'], data=[]] elif type(abfFolder) is list: files = abfSort(abfFolder) # depends on [control=['if'], data=[]] assert type(files) is list files = list_to_lowercase(files) # group every filename in a different list, and determine parents (abfs, IDs, others, parents, days) = ([], [], [], [], []) for fname in files: if fname.endswith('.abf'): abfs.append(fname) IDs.append(fname[:-4]) days.append(fname[:5]) # depends on [control=['if'], data=[]] else: others.append(fname) # depends on [control=['for'], data=['fname']] for ID in IDs: for fname in others: if fname.startswith(ID): parents.append(ID) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fname']] # depends on [control=['for'], data=['ID']] parents = abfSort(set(parents)) # allow only one copy each days = abfSort(set(days)) # allow only one copy each # match up children with parents, respecting daily orphans. groups = {} for day in days: parent = None for fname in [x for x in abfs if x.startswith(day)]: ID = fname[:-4] if ID in parents: parent = ID # depends on [control=['if'], data=['ID']] if not parent in groups.keys(): groups[parent] = [] # depends on [control=['if'], data=[]] groups[parent].extend([ID]) # depends on [control=['for'], data=['fname']] # depends on [control=['for'], data=['day']] return groups
def handle_exec(args): """usage: cosmic-ray exec <session-file> Perform the remaining work to be done in the specified session. This requires that the rest of your mutation testing infrastructure (e.g. worker processes) are already running. """ session_file = get_db_name(args.get('<session-file>')) cosmic_ray.commands.execute(session_file) return ExitCode.OK
def function[handle_exec, parameter[args]]: constant[usage: cosmic-ray exec <session-file> Perform the remaining work to be done in the specified session. This requires that the rest of your mutation testing infrastructure (e.g. worker processes) are already running. ] variable[session_file] assign[=] call[name[get_db_name], parameter[call[name[args].get, parameter[constant[<session-file>]]]]] call[name[cosmic_ray].commands.execute, parameter[name[session_file]]] return[name[ExitCode].OK]
keyword[def] identifier[handle_exec] ( identifier[args] ): literal[string] identifier[session_file] = identifier[get_db_name] ( identifier[args] . identifier[get] ( literal[string] )) identifier[cosmic_ray] . identifier[commands] . identifier[execute] ( identifier[session_file] ) keyword[return] identifier[ExitCode] . identifier[OK]
def handle_exec(args): """usage: cosmic-ray exec <session-file> Perform the remaining work to be done in the specified session. This requires that the rest of your mutation testing infrastructure (e.g. worker processes) are already running. """ session_file = get_db_name(args.get('<session-file>')) cosmic_ray.commands.execute(session_file) return ExitCode.OK
def process_added_port(self, device_details): """Process the new ports. Wraps _process_added_port, and treats the sucessful and exception cases. """ device = device_details['device'] port_id = device_details['port_id'] reprocess = True try: self._process_added_port(device_details) LOG.debug("Updating cached port %s status as UP.", port_id) self._update_port_status_cache(device, device_bound=True) LOG.info("Port %s processed.", port_id) except os_win_exc.HyperVvNicNotFound: LOG.debug('vNIC %s not found. This can happen if the VM was ' 'destroyed.', port_id) reprocess = False except os_win_exc.HyperVPortNotFoundException: LOG.debug('vSwitch port %s not found. This can happen if the VM ' 'was destroyed.', port_id) # NOTE(claudiub): just to be on the safe side, in case Hyper-V said # that the port was added, but it hasn't really, we're leaving # reprocess = True. If the VM / vNIC was removed, on the next # reprocess, a HyperVvNicNotFound will be raised. except Exception as ex: # NOTE(claudiub): in case of a non-transient error, the port will # be processed over and over again, and will not be reported as # bound (e.g.: InvalidParameterValue when setting QoS), until the # port is deleted. These issues have to be investigated and solved LOG.exception("Exception encountered while processing " "port %(port_id)s. Exception: %(ex)s", dict(port_id=port_id, ex=ex)) else: # no exception encountered, no need to reprocess. reprocess = False if reprocess: # Readd the port as "added", so it can be reprocessed. self._added_ports.add(device) # Force cache refresh. self._refresh_cache = True return False return True
def function[process_added_port, parameter[self, device_details]]: constant[Process the new ports. Wraps _process_added_port, and treats the sucessful and exception cases. ] variable[device] assign[=] call[name[device_details]][constant[device]] variable[port_id] assign[=] call[name[device_details]][constant[port_id]] variable[reprocess] assign[=] constant[True] <ast.Try object at 0x7da1b196a830> if name[reprocess] begin[:] call[name[self]._added_ports.add, parameter[name[device]]] name[self]._refresh_cache assign[=] constant[True] return[constant[False]] return[constant[True]]
keyword[def] identifier[process_added_port] ( identifier[self] , identifier[device_details] ): literal[string] identifier[device] = identifier[device_details] [ literal[string] ] identifier[port_id] = identifier[device_details] [ literal[string] ] identifier[reprocess] = keyword[True] keyword[try] : identifier[self] . identifier[_process_added_port] ( identifier[device_details] ) identifier[LOG] . identifier[debug] ( literal[string] , identifier[port_id] ) identifier[self] . identifier[_update_port_status_cache] ( identifier[device] , identifier[device_bound] = keyword[True] ) identifier[LOG] . identifier[info] ( literal[string] , identifier[port_id] ) keyword[except] identifier[os_win_exc] . identifier[HyperVvNicNotFound] : identifier[LOG] . identifier[debug] ( literal[string] literal[string] , identifier[port_id] ) identifier[reprocess] = keyword[False] keyword[except] identifier[os_win_exc] . identifier[HyperVPortNotFoundException] : identifier[LOG] . identifier[debug] ( literal[string] literal[string] , identifier[port_id] ) keyword[except] identifier[Exception] keyword[as] identifier[ex] : identifier[LOG] . identifier[exception] ( literal[string] literal[string] , identifier[dict] ( identifier[port_id] = identifier[port_id] , identifier[ex] = identifier[ex] )) keyword[else] : identifier[reprocess] = keyword[False] keyword[if] identifier[reprocess] : identifier[self] . identifier[_added_ports] . identifier[add] ( identifier[device] ) identifier[self] . identifier[_refresh_cache] = keyword[True] keyword[return] keyword[False] keyword[return] keyword[True]
def process_added_port(self, device_details): """Process the new ports. Wraps _process_added_port, and treats the sucessful and exception cases. """ device = device_details['device'] port_id = device_details['port_id'] reprocess = True try: self._process_added_port(device_details) LOG.debug('Updating cached port %s status as UP.', port_id) self._update_port_status_cache(device, device_bound=True) LOG.info('Port %s processed.', port_id) # depends on [control=['try'], data=[]] except os_win_exc.HyperVvNicNotFound: LOG.debug('vNIC %s not found. This can happen if the VM was destroyed.', port_id) reprocess = False # depends on [control=['except'], data=[]] except os_win_exc.HyperVPortNotFoundException: LOG.debug('vSwitch port %s not found. This can happen if the VM was destroyed.', port_id) # depends on [control=['except'], data=[]] # NOTE(claudiub): just to be on the safe side, in case Hyper-V said # that the port was added, but it hasn't really, we're leaving # reprocess = True. If the VM / vNIC was removed, on the next # reprocess, a HyperVvNicNotFound will be raised. except Exception as ex: # NOTE(claudiub): in case of a non-transient error, the port will # be processed over and over again, and will not be reported as # bound (e.g.: InvalidParameterValue when setting QoS), until the # port is deleted. These issues have to be investigated and solved LOG.exception('Exception encountered while processing port %(port_id)s. Exception: %(ex)s', dict(port_id=port_id, ex=ex)) # depends on [control=['except'], data=['ex']] else: # no exception encountered, no need to reprocess. reprocess = False if reprocess: # Readd the port as "added", so it can be reprocessed. self._added_ports.add(device) # Force cache refresh. self._refresh_cache = True return False # depends on [control=['if'], data=[]] return True
def pre_release(self): """ Return true if version is a pre-release. """ label = self.version_info.get('label', None) pre = self.version_info.get('pre', None) return True if (label is not None and pre is not None) else False
def function[pre_release, parameter[self]]: constant[ Return true if version is a pre-release. ] variable[label] assign[=] call[name[self].version_info.get, parameter[constant[label], constant[None]]] variable[pre] assign[=] call[name[self].version_info.get, parameter[constant[pre], constant[None]]] return[<ast.IfExp object at 0x7da2041da9e0>]
keyword[def] identifier[pre_release] ( identifier[self] ): literal[string] identifier[label] = identifier[self] . identifier[version_info] . identifier[get] ( literal[string] , keyword[None] ) identifier[pre] = identifier[self] . identifier[version_info] . identifier[get] ( literal[string] , keyword[None] ) keyword[return] keyword[True] keyword[if] ( identifier[label] keyword[is] keyword[not] keyword[None] keyword[and] identifier[pre] keyword[is] keyword[not] keyword[None] ) keyword[else] keyword[False]
def pre_release(self): """ Return true if version is a pre-release. """ label = self.version_info.get('label', None) pre = self.version_info.get('pre', None) return True if label is not None and pre is not None else False
def split_input(cls, job_config): """Inherit docs.""" params = job_config.input_reader_params shard_count = job_config.shard_count query_spec = cls._get_query_spec(params) if not property_range.should_shard_by_property_range(query_spec.filters): return super(ModelDatastoreInputReader, cls).split_input(job_config) p_range = property_range.PropertyRange(query_spec.filters, query_spec.model_class_path) p_ranges = p_range.split(shard_count) # User specified a namespace. if query_spec.ns: ns_range = namespace_range.NamespaceRange( namespace_start=query_spec.ns, namespace_end=query_spec.ns, _app=query_spec.app) ns_ranges = [copy.copy(ns_range) for _ in p_ranges] else: ns_keys = namespace_range.get_namespace_keys( query_spec.app, cls.MAX_NAMESPACES_FOR_KEY_SHARD+1) if not ns_keys: return # User doesn't specify ns but the number of ns is small. # We still split by property range. if len(ns_keys) <= cls.MAX_NAMESPACES_FOR_KEY_SHARD: ns_ranges = [namespace_range.NamespaceRange(_app=query_spec.app) for _ in p_ranges] # Lots of namespaces. Split by ns. else: ns_ranges = namespace_range.NamespaceRange.split(n=shard_count, contiguous=False, can_query=lambda: True, _app=query_spec.app) p_ranges = [copy.copy(p_range) for _ in ns_ranges] assert len(p_ranges) == len(ns_ranges) iters = [ db_iters.RangeIteratorFactory.create_property_range_iterator( p, ns, query_spec) for p, ns in zip(p_ranges, ns_ranges)] return [cls(i) for i in iters]
def function[split_input, parameter[cls, job_config]]: constant[Inherit docs.] variable[params] assign[=] name[job_config].input_reader_params variable[shard_count] assign[=] name[job_config].shard_count variable[query_spec] assign[=] call[name[cls]._get_query_spec, parameter[name[params]]] if <ast.UnaryOp object at 0x7da2044c1210> begin[:] return[call[call[name[super], parameter[name[ModelDatastoreInputReader], name[cls]]].split_input, parameter[name[job_config]]]] variable[p_range] assign[=] call[name[property_range].PropertyRange, parameter[name[query_spec].filters, name[query_spec].model_class_path]] variable[p_ranges] assign[=] call[name[p_range].split, parameter[name[shard_count]]] if name[query_spec].ns begin[:] variable[ns_range] assign[=] call[name[namespace_range].NamespaceRange, parameter[]] variable[ns_ranges] assign[=] <ast.ListComp object at 0x7da2054a6b60> assert[compare[call[name[len], parameter[name[p_ranges]]] equal[==] call[name[len], parameter[name[ns_ranges]]]]] variable[iters] assign[=] <ast.ListComp object at 0x7da20e956320> return[<ast.ListComp object at 0x7da20e9566b0>]
keyword[def] identifier[split_input] ( identifier[cls] , identifier[job_config] ): literal[string] identifier[params] = identifier[job_config] . identifier[input_reader_params] identifier[shard_count] = identifier[job_config] . identifier[shard_count] identifier[query_spec] = identifier[cls] . identifier[_get_query_spec] ( identifier[params] ) keyword[if] keyword[not] identifier[property_range] . identifier[should_shard_by_property_range] ( identifier[query_spec] . identifier[filters] ): keyword[return] identifier[super] ( identifier[ModelDatastoreInputReader] , identifier[cls] ). identifier[split_input] ( identifier[job_config] ) identifier[p_range] = identifier[property_range] . identifier[PropertyRange] ( identifier[query_spec] . identifier[filters] , identifier[query_spec] . identifier[model_class_path] ) identifier[p_ranges] = identifier[p_range] . identifier[split] ( identifier[shard_count] ) keyword[if] identifier[query_spec] . identifier[ns] : identifier[ns_range] = identifier[namespace_range] . identifier[NamespaceRange] ( identifier[namespace_start] = identifier[query_spec] . identifier[ns] , identifier[namespace_end] = identifier[query_spec] . identifier[ns] , identifier[_app] = identifier[query_spec] . identifier[app] ) identifier[ns_ranges] =[ identifier[copy] . identifier[copy] ( identifier[ns_range] ) keyword[for] identifier[_] keyword[in] identifier[p_ranges] ] keyword[else] : identifier[ns_keys] = identifier[namespace_range] . identifier[get_namespace_keys] ( identifier[query_spec] . identifier[app] , identifier[cls] . identifier[MAX_NAMESPACES_FOR_KEY_SHARD] + literal[int] ) keyword[if] keyword[not] identifier[ns_keys] : keyword[return] keyword[if] identifier[len] ( identifier[ns_keys] )<= identifier[cls] . identifier[MAX_NAMESPACES_FOR_KEY_SHARD] : identifier[ns_ranges] =[ identifier[namespace_range] . identifier[NamespaceRange] ( identifier[_app] = identifier[query_spec] . identifier[app] ) keyword[for] identifier[_] keyword[in] identifier[p_ranges] ] keyword[else] : identifier[ns_ranges] = identifier[namespace_range] . identifier[NamespaceRange] . identifier[split] ( identifier[n] = identifier[shard_count] , identifier[contiguous] = keyword[False] , identifier[can_query] = keyword[lambda] : keyword[True] , identifier[_app] = identifier[query_spec] . identifier[app] ) identifier[p_ranges] =[ identifier[copy] . identifier[copy] ( identifier[p_range] ) keyword[for] identifier[_] keyword[in] identifier[ns_ranges] ] keyword[assert] identifier[len] ( identifier[p_ranges] )== identifier[len] ( identifier[ns_ranges] ) identifier[iters] =[ identifier[db_iters] . identifier[RangeIteratorFactory] . identifier[create_property_range_iterator] ( identifier[p] , identifier[ns] , identifier[query_spec] ) keyword[for] identifier[p] , identifier[ns] keyword[in] identifier[zip] ( identifier[p_ranges] , identifier[ns_ranges] )] keyword[return] [ identifier[cls] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[iters] ]
def split_input(cls, job_config): """Inherit docs.""" params = job_config.input_reader_params shard_count = job_config.shard_count query_spec = cls._get_query_spec(params) if not property_range.should_shard_by_property_range(query_spec.filters): return super(ModelDatastoreInputReader, cls).split_input(job_config) # depends on [control=['if'], data=[]] p_range = property_range.PropertyRange(query_spec.filters, query_spec.model_class_path) p_ranges = p_range.split(shard_count) # User specified a namespace. if query_spec.ns: ns_range = namespace_range.NamespaceRange(namespace_start=query_spec.ns, namespace_end=query_spec.ns, _app=query_spec.app) ns_ranges = [copy.copy(ns_range) for _ in p_ranges] # depends on [control=['if'], data=[]] else: ns_keys = namespace_range.get_namespace_keys(query_spec.app, cls.MAX_NAMESPACES_FOR_KEY_SHARD + 1) if not ns_keys: return # depends on [control=['if'], data=[]] # User doesn't specify ns but the number of ns is small. # We still split by property range. if len(ns_keys) <= cls.MAX_NAMESPACES_FOR_KEY_SHARD: ns_ranges = [namespace_range.NamespaceRange(_app=query_spec.app) for _ in p_ranges] # depends on [control=['if'], data=[]] else: # Lots of namespaces. Split by ns. ns_ranges = namespace_range.NamespaceRange.split(n=shard_count, contiguous=False, can_query=lambda : True, _app=query_spec.app) p_ranges = [copy.copy(p_range) for _ in ns_ranges] assert len(p_ranges) == len(ns_ranges) iters = [db_iters.RangeIteratorFactory.create_property_range_iterator(p, ns, query_spec) for (p, ns) in zip(p_ranges, ns_ranges)] return [cls(i) for i in iters]
def add_partitioning_indexes(portal): """Adds the indexes for partitioning """ logger.info("Adding partitioning indexes") add_index(portal, catalog_id=CATALOG_ANALYSIS_LISTING, index_name="getAncestorsUIDs", index_attribute="getAncestorsUIDs", index_metatype="KeywordIndex") add_index(portal, catalog_id=CATALOG_ANALYSIS_REQUEST_LISTING, index_name="isRootAncestor", index_attribute="isRootAncestor", index_metatype="BooleanIndex")
def function[add_partitioning_indexes, parameter[portal]]: constant[Adds the indexes for partitioning ] call[name[logger].info, parameter[constant[Adding partitioning indexes]]] call[name[add_index], parameter[name[portal]]] call[name[add_index], parameter[name[portal]]]
keyword[def] identifier[add_partitioning_indexes] ( identifier[portal] ): literal[string] identifier[logger] . identifier[info] ( literal[string] ) identifier[add_index] ( identifier[portal] , identifier[catalog_id] = identifier[CATALOG_ANALYSIS_LISTING] , identifier[index_name] = literal[string] , identifier[index_attribute] = literal[string] , identifier[index_metatype] = literal[string] ) identifier[add_index] ( identifier[portal] , identifier[catalog_id] = identifier[CATALOG_ANALYSIS_REQUEST_LISTING] , identifier[index_name] = literal[string] , identifier[index_attribute] = literal[string] , identifier[index_metatype] = literal[string] )
def add_partitioning_indexes(portal): """Adds the indexes for partitioning """ logger.info('Adding partitioning indexes') add_index(portal, catalog_id=CATALOG_ANALYSIS_LISTING, index_name='getAncestorsUIDs', index_attribute='getAncestorsUIDs', index_metatype='KeywordIndex') add_index(portal, catalog_id=CATALOG_ANALYSIS_REQUEST_LISTING, index_name='isRootAncestor', index_attribute='isRootAncestor', index_metatype='BooleanIndex')
def copy_no_data(self): """ Returns a copy of the object without any data. """ return type(self)( [], order=list(self.header_modes), types=self.header_types.copy(), modes=self.header_modes.copy())
def function[copy_no_data, parameter[self]]: constant[ Returns a copy of the object without any data. ] return[call[call[name[type], parameter[name[self]]], parameter[list[[]]]]]
keyword[def] identifier[copy_no_data] ( identifier[self] ): literal[string] keyword[return] identifier[type] ( identifier[self] )( [], identifier[order] = identifier[list] ( identifier[self] . identifier[header_modes] ), identifier[types] = identifier[self] . identifier[header_types] . identifier[copy] (), identifier[modes] = identifier[self] . identifier[header_modes] . identifier[copy] ())
def copy_no_data(self): """ Returns a copy of the object without any data. """ return type(self)([], order=list(self.header_modes), types=self.header_types.copy(), modes=self.header_modes.copy())
def run(url, cmd, log_path, log_level, log_session, force_discovery, print_info): """Run the main function.""" log_level = log_levels[log_level] conn = condoor.Connection("host", list(url), log_session=log_session, log_level=log_level, log_dir=log_path) try: conn.connect(force_discovery=force_discovery) if print_info: echo_info(conn) for command in cmd: result = conn.send(command) print("\nCommand: {}".format(command)) print("Result: \n{}".format(result)) except (condoor.ConnectionError, condoor.ConnectionAuthenticationError, condoor.ConnectionTimeoutError, condoor.InvalidHopInfoError, condoor.CommandSyntaxError, condoor.CommandTimeoutError, condoor.CommandError, condoor.ConnectionError) as excpt: click.echo(excpt) finally: conn.disconnect() return
def function[run, parameter[url, cmd, log_path, log_level, log_session, force_discovery, print_info]]: constant[Run the main function.] variable[log_level] assign[=] call[name[log_levels]][name[log_level]] variable[conn] assign[=] call[name[condoor].Connection, parameter[constant[host], call[name[list], parameter[name[url]]]]] <ast.Try object at 0x7da1b2536a10> return[None]
keyword[def] identifier[run] ( identifier[url] , identifier[cmd] , identifier[log_path] , identifier[log_level] , identifier[log_session] , identifier[force_discovery] , identifier[print_info] ): literal[string] identifier[log_level] = identifier[log_levels] [ identifier[log_level] ] identifier[conn] = identifier[condoor] . identifier[Connection] ( literal[string] , identifier[list] ( identifier[url] ), identifier[log_session] = identifier[log_session] , identifier[log_level] = identifier[log_level] , identifier[log_dir] = identifier[log_path] ) keyword[try] : identifier[conn] . identifier[connect] ( identifier[force_discovery] = identifier[force_discovery] ) keyword[if] identifier[print_info] : identifier[echo_info] ( identifier[conn] ) keyword[for] identifier[command] keyword[in] identifier[cmd] : identifier[result] = identifier[conn] . identifier[send] ( identifier[command] ) identifier[print] ( literal[string] . identifier[format] ( identifier[command] )) identifier[print] ( literal[string] . identifier[format] ( identifier[result] )) keyword[except] ( identifier[condoor] . identifier[ConnectionError] , identifier[condoor] . identifier[ConnectionAuthenticationError] , identifier[condoor] . identifier[ConnectionTimeoutError] , identifier[condoor] . identifier[InvalidHopInfoError] , identifier[condoor] . identifier[CommandSyntaxError] , identifier[condoor] . identifier[CommandTimeoutError] , identifier[condoor] . identifier[CommandError] , identifier[condoor] . identifier[ConnectionError] ) keyword[as] identifier[excpt] : identifier[click] . identifier[echo] ( identifier[excpt] ) keyword[finally] : identifier[conn] . identifier[disconnect] () keyword[return]
def run(url, cmd, log_path, log_level, log_session, force_discovery, print_info): """Run the main function.""" log_level = log_levels[log_level] conn = condoor.Connection('host', list(url), log_session=log_session, log_level=log_level, log_dir=log_path) try: conn.connect(force_discovery=force_discovery) if print_info: echo_info(conn) # depends on [control=['if'], data=[]] for command in cmd: result = conn.send(command) print('\nCommand: {}'.format(command)) print('Result: \n{}'.format(result)) # depends on [control=['for'], data=['command']] # depends on [control=['try'], data=[]] except (condoor.ConnectionError, condoor.ConnectionAuthenticationError, condoor.ConnectionTimeoutError, condoor.InvalidHopInfoError, condoor.CommandSyntaxError, condoor.CommandTimeoutError, condoor.CommandError, condoor.ConnectionError) as excpt: click.echo(excpt) # depends on [control=['except'], data=['excpt']] finally: conn.disconnect() return
def scale_to_zero_one(x): """Take some 1d data and scale it so that min matches 0 and max 1. """ xscaled = x - np.min(x) xscaled /= np.max(xscaled) return xscaled
def function[scale_to_zero_one, parameter[x]]: constant[Take some 1d data and scale it so that min matches 0 and max 1. ] variable[xscaled] assign[=] binary_operation[name[x] - call[name[np].min, parameter[name[x]]]] <ast.AugAssign object at 0x7da204620df0> return[name[xscaled]]
keyword[def] identifier[scale_to_zero_one] ( identifier[x] ): literal[string] identifier[xscaled] = identifier[x] - identifier[np] . identifier[min] ( identifier[x] ) identifier[xscaled] /= identifier[np] . identifier[max] ( identifier[xscaled] ) keyword[return] identifier[xscaled]
def scale_to_zero_one(x): """Take some 1d data and scale it so that min matches 0 and max 1. """ xscaled = x - np.min(x) xscaled /= np.max(xscaled) return xscaled
def special_key_assignment(self): """ Determines the keycodes for common special keys on the keyboard. These are integer values and can be passed to the other key methods. Generally speaking, these are non-printable codes. """ #This set of keys compiled using the X11 keysymdef.h file as reference #They comprise a relatively universal set of keys, though there may be #exceptions which may come up for other OSes and vendors. Countless #special cases exist which are not handled here, but may be extended. #TTY Function Keys self.backspace_key = self.lookup_character_keycode('BackSpace') self.tab_key = self.lookup_character_keycode('Tab') self.linefeed_key = self.lookup_character_keycode('Linefeed') self.clear_key = self.lookup_character_keycode('Clear') self.return_key = self.lookup_character_keycode('Return') self.enter_key = self.return_key # Because many keyboards call it "Enter" self.pause_key = self.lookup_character_keycode('Pause') self.scroll_lock_key = self.lookup_character_keycode('Scroll_Lock') self.sys_req_key = self.lookup_character_keycode('Sys_Req') self.escape_key = self.lookup_character_keycode('Escape') self.delete_key = self.lookup_character_keycode('Delete') #Modifier Keys self.shift_l_key = self.lookup_character_keycode('Shift_L') self.shift_r_key = self.lookup_character_keycode('Shift_R') self.shift_key = self.shift_l_key # Default Shift is left Shift self.alt_l_key = self.lookup_character_keycode('Alt_L') self.alt_r_key = self.lookup_character_keycode('Alt_R') self.altgr_key = self.lookup_character_keycode('ISO_Level3_Shift') self.alt_key = self.alt_l_key # Default Alt is left Alt self.control_l_key = self.lookup_character_keycode('Control_L') self.control_r_key = self.lookup_character_keycode('Control_R') self.control_key = self.control_l_key # Default Ctrl is left Ctrl self.caps_lock_key = self.lookup_character_keycode('Caps_Lock') self.capital_key = self.caps_lock_key # Some may know it as Capital self.shift_lock_key = self.lookup_character_keycode('Shift_Lock') self.meta_l_key = self.lookup_character_keycode('Meta_L') self.meta_r_key = self.lookup_character_keycode('Meta_R') self.super_l_key = self.lookup_character_keycode('Super_L') self.windows_l_key = self.super_l_key # Cross-support; also it's printed there self.super_r_key = self.lookup_character_keycode('Super_R') self.windows_r_key = self.super_r_key # Cross-support; also it's printed there self.hyper_l_key = self.lookup_character_keycode('Hyper_L') self.hyper_r_key = self.lookup_character_keycode('Hyper_R') #Cursor Control and Motion self.home_key = self.lookup_character_keycode('Home') self.up_key = self.lookup_character_keycode('Up') self.down_key = self.lookup_character_keycode('Down') self.left_key = self.lookup_character_keycode('Left') self.right_key = self.lookup_character_keycode('Right') self.end_key = self.lookup_character_keycode('End') self.begin_key = self.lookup_character_keycode('Begin') self.page_up_key = self.lookup_character_keycode('Page_Up') self.page_down_key = self.lookup_character_keycode('Page_Down') self.prior_key = self.lookup_character_keycode('Prior') self.next_key = self.lookup_character_keycode('Next') #Misc Functions self.select_key = self.lookup_character_keycode('Select') self.print_key = self.lookup_character_keycode('Print') self.print_screen_key = self.print_key # Seems to be the same thing self.snapshot_key = self.print_key # Another name for printscreen self.execute_key = self.lookup_character_keycode('Execute') self.insert_key = self.lookup_character_keycode('Insert') self.undo_key = self.lookup_character_keycode('Undo') self.redo_key = self.lookup_character_keycode('Redo') self.menu_key = self.lookup_character_keycode('Menu') self.apps_key = self.menu_key # Windows... self.find_key = self.lookup_character_keycode('Find') self.cancel_key = self.lookup_character_keycode('Cancel') self.help_key = self.lookup_character_keycode('Help') self.break_key = self.lookup_character_keycode('Break') self.mode_switch_key = self.lookup_character_keycode('Mode_switch') self.script_switch_key = self.lookup_character_keycode('script_switch') self.num_lock_key = self.lookup_character_keycode('Num_Lock') #Keypad Keys: Dictionary structure keypad = ['Space', 'Tab', 'Enter', 'F1', 'F2', 'F3', 'F4', 'Home', 'Left', 'Up', 'Right', 'Down', 'Prior', 'Page_Up', 'Next', 'Page_Down', 'End', 'Begin', 'Insert', 'Delete', 'Equal', 'Multiply', 'Add', 'Separator', 'Subtract', 'Decimal', 'Divide', 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] self.keypad_keys = dict((k, self.lookup_character_keycode('KP_'+str(k))) for k in keypad) self.numpad_keys = self.keypad_keys #Function Keys/ Auxilliary Keys #FKeys self.function_keys = [None] + [self.lookup_character_keycode('F'+str(i)) for i in range(1,36)] #LKeys self.l_keys = [None] + [self.lookup_character_keycode('L'+str(i)) for i in range(1,11)] #RKeys self.r_keys = [None] + [self.lookup_character_keycode('R'+str(i)) for i in range(1,16)] #Unsupported keys from windows self.kana_key = None self.hangeul_key = None # old name - should be here for compatibility self.hangul_key = None self.junjua_key = None self.final_key = None self.hanja_key = None self.kanji_key = None self.convert_key = None self.nonconvert_key = None self.accept_key = None self.modechange_key = None self.sleep_key = None
def function[special_key_assignment, parameter[self]]: constant[ Determines the keycodes for common special keys on the keyboard. These are integer values and can be passed to the other key methods. Generally speaking, these are non-printable codes. ] name[self].backspace_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[BackSpace]]] name[self].tab_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Tab]]] name[self].linefeed_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Linefeed]]] name[self].clear_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Clear]]] name[self].return_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Return]]] name[self].enter_key assign[=] name[self].return_key name[self].pause_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Pause]]] name[self].scroll_lock_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Scroll_Lock]]] name[self].sys_req_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Sys_Req]]] name[self].escape_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Escape]]] name[self].delete_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Delete]]] name[self].shift_l_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Shift_L]]] name[self].shift_r_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Shift_R]]] name[self].shift_key assign[=] name[self].shift_l_key name[self].alt_l_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Alt_L]]] name[self].alt_r_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Alt_R]]] name[self].altgr_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[ISO_Level3_Shift]]] name[self].alt_key assign[=] name[self].alt_l_key name[self].control_l_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Control_L]]] name[self].control_r_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Control_R]]] name[self].control_key assign[=] name[self].control_l_key name[self].caps_lock_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Caps_Lock]]] name[self].capital_key assign[=] name[self].caps_lock_key name[self].shift_lock_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Shift_Lock]]] name[self].meta_l_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Meta_L]]] name[self].meta_r_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Meta_R]]] name[self].super_l_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Super_L]]] name[self].windows_l_key assign[=] name[self].super_l_key name[self].super_r_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Super_R]]] name[self].windows_r_key assign[=] name[self].super_r_key name[self].hyper_l_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Hyper_L]]] name[self].hyper_r_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Hyper_R]]] name[self].home_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Home]]] name[self].up_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Up]]] name[self].down_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Down]]] name[self].left_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Left]]] name[self].right_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Right]]] name[self].end_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[End]]] name[self].begin_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Begin]]] name[self].page_up_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Page_Up]]] name[self].page_down_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Page_Down]]] name[self].prior_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Prior]]] name[self].next_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Next]]] name[self].select_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Select]]] name[self].print_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Print]]] name[self].print_screen_key assign[=] name[self].print_key name[self].snapshot_key assign[=] name[self].print_key name[self].execute_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Execute]]] name[self].insert_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Insert]]] name[self].undo_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Undo]]] name[self].redo_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Redo]]] name[self].menu_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Menu]]] name[self].apps_key assign[=] name[self].menu_key name[self].find_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Find]]] name[self].cancel_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Cancel]]] name[self].help_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Help]]] name[self].break_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Break]]] name[self].mode_switch_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Mode_switch]]] name[self].script_switch_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[script_switch]]] name[self].num_lock_key assign[=] call[name[self].lookup_character_keycode, parameter[constant[Num_Lock]]] variable[keypad] assign[=] list[[<ast.Constant object at 0x7da2054a79d0>, <ast.Constant object at 0x7da2054a4b50>, <ast.Constant object at 0x7da2054a4a60>, <ast.Constant object at 0x7da2054a5270>, <ast.Constant object at 0x7da2054a4940>, <ast.Constant object at 0x7da2054a4ca0>, <ast.Constant object at 0x7da2054a77c0>, <ast.Constant object at 0x7da2054a4c70>, <ast.Constant object at 0x7da2054a5300>, <ast.Constant object at 0x7da2054a4af0>, <ast.Constant object at 0x7da2054a5900>, <ast.Constant object at 0x7da2054a55a0>, <ast.Constant object at 0x7da2054a7790>, <ast.Constant object at 0x7da2054a48b0>, <ast.Constant object at 0x7da2054a5de0>, <ast.Constant object at 0x7da2054a72e0>, <ast.Constant object at 0x7da2054a4790>, <ast.Constant object at 0x7da2054a6920>, <ast.Constant object at 0x7da2054a5000>, <ast.Constant object at 0x7da2054a6f50>, <ast.Constant object at 0x7da2054a7700>, <ast.Constant object at 0x7da2054a5a80>, <ast.Constant object at 0x7da2054a6b30>, <ast.Constant object at 0x7da2054a40a0>, <ast.Constant object at 0x7da2054a7610>, <ast.Constant object at 0x7da2054a6a40>, <ast.Constant object at 0x7da2054a6c80>, <ast.Constant object at 0x7da2054a5180>, <ast.Constant object at 0x7da2054a7100>, <ast.Constant object at 0x7da2054a4fd0>, <ast.Constant object at 0x7da2054a54e0>, <ast.Constant object at 0x7da2054a4cd0>, <ast.Constant object at 0x7da2054a7a00>, <ast.Constant object at 0x7da2054a6710>, <ast.Constant object at 0x7da2054a7fa0>, <ast.Constant object at 0x7da2054a5e70>, <ast.Constant object at 0x7da2054a6cb0>]] name[self].keypad_keys assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da2054a6b90>]] name[self].numpad_keys assign[=] name[self].keypad_keys name[self].function_keys assign[=] binary_operation[list[[<ast.Constant object at 0x7da2054a76d0>]] + <ast.ListComp object at 0x7da2054a4f10>] name[self].l_keys assign[=] binary_operation[list[[<ast.Constant object at 0x7da2054a6e90>]] + <ast.ListComp object at 0x7da2054a4550>] name[self].r_keys assign[=] binary_operation[list[[<ast.Constant object at 0x7da2054a51e0>]] + <ast.ListComp object at 0x7da2054a7370>] name[self].kana_key assign[=] constant[None] name[self].hangeul_key assign[=] constant[None] name[self].hangul_key assign[=] constant[None] name[self].junjua_key assign[=] constant[None] name[self].final_key assign[=] constant[None] name[self].hanja_key assign[=] constant[None] name[self].kanji_key assign[=] constant[None] name[self].convert_key assign[=] constant[None] name[self].nonconvert_key assign[=] constant[None] name[self].accept_key assign[=] constant[None] name[self].modechange_key assign[=] constant[None] name[self].sleep_key assign[=] constant[None]
keyword[def] identifier[special_key_assignment] ( identifier[self] ): literal[string] identifier[self] . identifier[backspace_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[tab_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[linefeed_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[clear_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[return_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[enter_key] = identifier[self] . identifier[return_key] identifier[self] . identifier[pause_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[scroll_lock_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[sys_req_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[escape_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[delete_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[shift_l_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[shift_r_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[shift_key] = identifier[self] . identifier[shift_l_key] identifier[self] . identifier[alt_l_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[alt_r_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[altgr_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[alt_key] = identifier[self] . identifier[alt_l_key] identifier[self] . identifier[control_l_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[control_r_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[control_key] = identifier[self] . identifier[control_l_key] identifier[self] . identifier[caps_lock_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[capital_key] = identifier[self] . identifier[caps_lock_key] identifier[self] . identifier[shift_lock_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[meta_l_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[meta_r_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[super_l_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[windows_l_key] = identifier[self] . identifier[super_l_key] identifier[self] . identifier[super_r_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[windows_r_key] = identifier[self] . identifier[super_r_key] identifier[self] . identifier[hyper_l_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[hyper_r_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[home_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[up_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[down_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[left_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[right_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[end_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[begin_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[page_up_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[page_down_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[prior_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[next_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[select_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[print_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[print_screen_key] = identifier[self] . identifier[print_key] identifier[self] . identifier[snapshot_key] = identifier[self] . identifier[print_key] identifier[self] . identifier[execute_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[insert_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[undo_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[redo_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[menu_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[apps_key] = identifier[self] . identifier[menu_key] identifier[self] . identifier[find_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[cancel_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[help_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[break_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[mode_switch_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[script_switch_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[self] . identifier[num_lock_key] = identifier[self] . identifier[lookup_character_keycode] ( literal[string] ) identifier[keypad] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ] identifier[self] . identifier[keypad_keys] = identifier[dict] (( identifier[k] , identifier[self] . identifier[lookup_character_keycode] ( literal[string] + identifier[str] ( identifier[k] ))) keyword[for] identifier[k] keyword[in] identifier[keypad] ) identifier[self] . identifier[numpad_keys] = identifier[self] . identifier[keypad_keys] identifier[self] . identifier[function_keys] =[ keyword[None] ]+[ identifier[self] . identifier[lookup_character_keycode] ( literal[string] + identifier[str] ( identifier[i] )) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )] identifier[self] . identifier[l_keys] =[ keyword[None] ]+[ identifier[self] . identifier[lookup_character_keycode] ( literal[string] + identifier[str] ( identifier[i] )) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )] identifier[self] . identifier[r_keys] =[ keyword[None] ]+[ identifier[self] . identifier[lookup_character_keycode] ( literal[string] + identifier[str] ( identifier[i] )) keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] )] identifier[self] . identifier[kana_key] = keyword[None] identifier[self] . identifier[hangeul_key] = keyword[None] identifier[self] . identifier[hangul_key] = keyword[None] identifier[self] . identifier[junjua_key] = keyword[None] identifier[self] . identifier[final_key] = keyword[None] identifier[self] . identifier[hanja_key] = keyword[None] identifier[self] . identifier[kanji_key] = keyword[None] identifier[self] . identifier[convert_key] = keyword[None] identifier[self] . identifier[nonconvert_key] = keyword[None] identifier[self] . identifier[accept_key] = keyword[None] identifier[self] . identifier[modechange_key] = keyword[None] identifier[self] . identifier[sleep_key] = keyword[None]
def special_key_assignment(self): """ Determines the keycodes for common special keys on the keyboard. These are integer values and can be passed to the other key methods. Generally speaking, these are non-printable codes. """ #This set of keys compiled using the X11 keysymdef.h file as reference #They comprise a relatively universal set of keys, though there may be #exceptions which may come up for other OSes and vendors. Countless #special cases exist which are not handled here, but may be extended. #TTY Function Keys self.backspace_key = self.lookup_character_keycode('BackSpace') self.tab_key = self.lookup_character_keycode('Tab') self.linefeed_key = self.lookup_character_keycode('Linefeed') self.clear_key = self.lookup_character_keycode('Clear') self.return_key = self.lookup_character_keycode('Return') self.enter_key = self.return_key # Because many keyboards call it "Enter" self.pause_key = self.lookup_character_keycode('Pause') self.scroll_lock_key = self.lookup_character_keycode('Scroll_Lock') self.sys_req_key = self.lookup_character_keycode('Sys_Req') self.escape_key = self.lookup_character_keycode('Escape') self.delete_key = self.lookup_character_keycode('Delete') #Modifier Keys self.shift_l_key = self.lookup_character_keycode('Shift_L') self.shift_r_key = self.lookup_character_keycode('Shift_R') self.shift_key = self.shift_l_key # Default Shift is left Shift self.alt_l_key = self.lookup_character_keycode('Alt_L') self.alt_r_key = self.lookup_character_keycode('Alt_R') self.altgr_key = self.lookup_character_keycode('ISO_Level3_Shift') self.alt_key = self.alt_l_key # Default Alt is left Alt self.control_l_key = self.lookup_character_keycode('Control_L') self.control_r_key = self.lookup_character_keycode('Control_R') self.control_key = self.control_l_key # Default Ctrl is left Ctrl self.caps_lock_key = self.lookup_character_keycode('Caps_Lock') self.capital_key = self.caps_lock_key # Some may know it as Capital self.shift_lock_key = self.lookup_character_keycode('Shift_Lock') self.meta_l_key = self.lookup_character_keycode('Meta_L') self.meta_r_key = self.lookup_character_keycode('Meta_R') self.super_l_key = self.lookup_character_keycode('Super_L') self.windows_l_key = self.super_l_key # Cross-support; also it's printed there self.super_r_key = self.lookup_character_keycode('Super_R') self.windows_r_key = self.super_r_key # Cross-support; also it's printed there self.hyper_l_key = self.lookup_character_keycode('Hyper_L') self.hyper_r_key = self.lookup_character_keycode('Hyper_R') #Cursor Control and Motion self.home_key = self.lookup_character_keycode('Home') self.up_key = self.lookup_character_keycode('Up') self.down_key = self.lookup_character_keycode('Down') self.left_key = self.lookup_character_keycode('Left') self.right_key = self.lookup_character_keycode('Right') self.end_key = self.lookup_character_keycode('End') self.begin_key = self.lookup_character_keycode('Begin') self.page_up_key = self.lookup_character_keycode('Page_Up') self.page_down_key = self.lookup_character_keycode('Page_Down') self.prior_key = self.lookup_character_keycode('Prior') self.next_key = self.lookup_character_keycode('Next') #Misc Functions self.select_key = self.lookup_character_keycode('Select') self.print_key = self.lookup_character_keycode('Print') self.print_screen_key = self.print_key # Seems to be the same thing self.snapshot_key = self.print_key # Another name for printscreen self.execute_key = self.lookup_character_keycode('Execute') self.insert_key = self.lookup_character_keycode('Insert') self.undo_key = self.lookup_character_keycode('Undo') self.redo_key = self.lookup_character_keycode('Redo') self.menu_key = self.lookup_character_keycode('Menu') self.apps_key = self.menu_key # Windows... self.find_key = self.lookup_character_keycode('Find') self.cancel_key = self.lookup_character_keycode('Cancel') self.help_key = self.lookup_character_keycode('Help') self.break_key = self.lookup_character_keycode('Break') self.mode_switch_key = self.lookup_character_keycode('Mode_switch') self.script_switch_key = self.lookup_character_keycode('script_switch') self.num_lock_key = self.lookup_character_keycode('Num_Lock') #Keypad Keys: Dictionary structure keypad = ['Space', 'Tab', 'Enter', 'F1', 'F2', 'F3', 'F4', 'Home', 'Left', 'Up', 'Right', 'Down', 'Prior', 'Page_Up', 'Next', 'Page_Down', 'End', 'Begin', 'Insert', 'Delete', 'Equal', 'Multiply', 'Add', 'Separator', 'Subtract', 'Decimal', 'Divide', 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] self.keypad_keys = dict(((k, self.lookup_character_keycode('KP_' + str(k))) for k in keypad)) self.numpad_keys = self.keypad_keys #Function Keys/ Auxilliary Keys #FKeys self.function_keys = [None] + [self.lookup_character_keycode('F' + str(i)) for i in range(1, 36)] #LKeys self.l_keys = [None] + [self.lookup_character_keycode('L' + str(i)) for i in range(1, 11)] #RKeys self.r_keys = [None] + [self.lookup_character_keycode('R' + str(i)) for i in range(1, 16)] #Unsupported keys from windows self.kana_key = None self.hangeul_key = None # old name - should be here for compatibility self.hangul_key = None self.junjua_key = None self.final_key = None self.hanja_key = None self.kanji_key = None self.convert_key = None self.nonconvert_key = None self.accept_key = None self.modechange_key = None self.sleep_key = None
def generate_file_rst(fname, target_dir, src_dir, gallery_conf): """Generate the rst file for a given example. Parameters ---------- fname : str Filename of python script target_dir : str Absolute path to directory in documentation where examples are saved src_dir : str Absolute path to directory where source examples are stored gallery_conf : dict Contains the configuration of Sphinx-Gallery Returns ------- intro: str The introduction of the example time_elapsed : float seconds required to run the script """ src_file = os.path.normpath(os.path.join(src_dir, fname)) target_file = os.path.join(target_dir, fname) _replace_md5(src_file, target_file, 'copy') intro, _ = extract_intro_and_title(fname, get_docstring_and_rest(src_file)[0]) executable = executable_script(src_file, gallery_conf) if md5sum_is_current(target_file): if executable: gallery_conf['stale_examples'].append(target_file) return intro, 0 image_dir = os.path.join(target_dir, 'images') if not os.path.exists(image_dir): os.makedirs(image_dir) base_image_name = os.path.splitext(fname)[0] image_fname = 'sphx_glr_' + base_image_name + '_{0:03}.png' image_path_template = os.path.join(image_dir, image_fname) script_vars = { 'execute_script': executable, 'image_path_iterator': ImagePathIterator(image_path_template), 'src_file': src_file, 'target_file': target_file} file_conf, script_blocks = split_code_and_text_blocks(src_file) output_blocks, time_elapsed = execute_script(script_blocks, script_vars, gallery_conf) logger.debug("%s ran in : %.2g seconds\n", src_file, time_elapsed) example_rst = rst_blocks(script_blocks, output_blocks, file_conf, gallery_conf) memory_used = gallery_conf['memory_base'] + script_vars['memory_delta'] if not executable: time_elapsed = memory_used = 0. # don't let the output change save_rst_example(example_rst, target_file, time_elapsed, memory_used, gallery_conf) save_thumbnail(image_path_template, src_file, file_conf, gallery_conf) example_nb = jupyter_notebook(script_blocks, gallery_conf) ipy_fname = replace_py_ipynb(target_file) + '.new' save_notebook(example_nb, ipy_fname) _replace_md5(ipy_fname) return intro, time_elapsed
def function[generate_file_rst, parameter[fname, target_dir, src_dir, gallery_conf]]: constant[Generate the rst file for a given example. Parameters ---------- fname : str Filename of python script target_dir : str Absolute path to directory in documentation where examples are saved src_dir : str Absolute path to directory where source examples are stored gallery_conf : dict Contains the configuration of Sphinx-Gallery Returns ------- intro: str The introduction of the example time_elapsed : float seconds required to run the script ] variable[src_file] assign[=] call[name[os].path.normpath, parameter[call[name[os].path.join, parameter[name[src_dir], name[fname]]]]] variable[target_file] assign[=] call[name[os].path.join, parameter[name[target_dir], name[fname]]] call[name[_replace_md5], parameter[name[src_file], name[target_file], constant[copy]]] <ast.Tuple object at 0x7da1b2344580> assign[=] call[name[extract_intro_and_title], parameter[name[fname], call[call[name[get_docstring_and_rest], parameter[name[src_file]]]][constant[0]]]] variable[executable] assign[=] call[name[executable_script], parameter[name[src_file], name[gallery_conf]]] if call[name[md5sum_is_current], parameter[name[target_file]]] begin[:] if name[executable] begin[:] call[call[name[gallery_conf]][constant[stale_examples]].append, parameter[name[target_file]]] return[tuple[[<ast.Name object at 0x7da1b2346980>, <ast.Constant object at 0x7da1b2347670>]]] variable[image_dir] assign[=] call[name[os].path.join, parameter[name[target_dir], constant[images]]] if <ast.UnaryOp object at 0x7da1b2346320> begin[:] call[name[os].makedirs, parameter[name[image_dir]]] variable[base_image_name] assign[=] call[call[name[os].path.splitext, parameter[name[fname]]]][constant[0]] variable[image_fname] assign[=] binary_operation[binary_operation[constant[sphx_glr_] + name[base_image_name]] + constant[_{0:03}.png]] variable[image_path_template] assign[=] call[name[os].path.join, parameter[name[image_dir], name[image_fname]]] variable[script_vars] assign[=] dictionary[[<ast.Constant object at 0x7da1b23458d0>, <ast.Constant object at 0x7da1b2346890>, <ast.Constant object at 0x7da1b2347a60>, <ast.Constant object at 0x7da1b23464d0>], [<ast.Name object at 0x7da1b2347250>, <ast.Call object at 0x7da1b2347280>, <ast.Name object at 0x7da1b2347370>, <ast.Name object at 0x7da1b2345e70>]] <ast.Tuple object at 0x7da1b2346260> assign[=] call[name[split_code_and_text_blocks], parameter[name[src_file]]] <ast.Tuple object at 0x7da1b2347730> assign[=] call[name[execute_script], parameter[name[script_blocks], name[script_vars], name[gallery_conf]]] call[name[logger].debug, parameter[constant[%s ran in : %.2g seconds ], name[src_file], name[time_elapsed]]] variable[example_rst] assign[=] call[name[rst_blocks], parameter[name[script_blocks], name[output_blocks], name[file_conf], name[gallery_conf]]] variable[memory_used] assign[=] binary_operation[call[name[gallery_conf]][constant[memory_base]] + call[name[script_vars]][constant[memory_delta]]] if <ast.UnaryOp object at 0x7da1b23479a0> begin[:] variable[time_elapsed] assign[=] constant[0.0] call[name[save_rst_example], parameter[name[example_rst], name[target_file], name[time_elapsed], name[memory_used], name[gallery_conf]]] call[name[save_thumbnail], parameter[name[image_path_template], name[src_file], name[file_conf], name[gallery_conf]]] variable[example_nb] assign[=] call[name[jupyter_notebook], parameter[name[script_blocks], name[gallery_conf]]] variable[ipy_fname] assign[=] binary_operation[call[name[replace_py_ipynb], parameter[name[target_file]]] + constant[.new]] call[name[save_notebook], parameter[name[example_nb], name[ipy_fname]]] call[name[_replace_md5], parameter[name[ipy_fname]]] return[tuple[[<ast.Name object at 0x7da1b26afd90>, <ast.Name object at 0x7da1b26acd00>]]]
keyword[def] identifier[generate_file_rst] ( identifier[fname] , identifier[target_dir] , identifier[src_dir] , identifier[gallery_conf] ): literal[string] identifier[src_file] = identifier[os] . identifier[path] . identifier[normpath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[src_dir] , identifier[fname] )) identifier[target_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[target_dir] , identifier[fname] ) identifier[_replace_md5] ( identifier[src_file] , identifier[target_file] , literal[string] ) identifier[intro] , identifier[_] = identifier[extract_intro_and_title] ( identifier[fname] , identifier[get_docstring_and_rest] ( identifier[src_file] )[ literal[int] ]) identifier[executable] = identifier[executable_script] ( identifier[src_file] , identifier[gallery_conf] ) keyword[if] identifier[md5sum_is_current] ( identifier[target_file] ): keyword[if] identifier[executable] : identifier[gallery_conf] [ literal[string] ]. identifier[append] ( identifier[target_file] ) keyword[return] identifier[intro] , literal[int] identifier[image_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[target_dir] , literal[string] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[image_dir] ): identifier[os] . identifier[makedirs] ( identifier[image_dir] ) identifier[base_image_name] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[fname] )[ literal[int] ] identifier[image_fname] = literal[string] + identifier[base_image_name] + literal[string] identifier[image_path_template] = identifier[os] . identifier[path] . identifier[join] ( identifier[image_dir] , identifier[image_fname] ) identifier[script_vars] ={ literal[string] : identifier[executable] , literal[string] : identifier[ImagePathIterator] ( identifier[image_path_template] ), literal[string] : identifier[src_file] , literal[string] : identifier[target_file] } identifier[file_conf] , identifier[script_blocks] = identifier[split_code_and_text_blocks] ( identifier[src_file] ) identifier[output_blocks] , identifier[time_elapsed] = identifier[execute_script] ( identifier[script_blocks] , identifier[script_vars] , identifier[gallery_conf] ) identifier[logger] . identifier[debug] ( literal[string] , identifier[src_file] , identifier[time_elapsed] ) identifier[example_rst] = identifier[rst_blocks] ( identifier[script_blocks] , identifier[output_blocks] , identifier[file_conf] , identifier[gallery_conf] ) identifier[memory_used] = identifier[gallery_conf] [ literal[string] ]+ identifier[script_vars] [ literal[string] ] keyword[if] keyword[not] identifier[executable] : identifier[time_elapsed] = identifier[memory_used] = literal[int] identifier[save_rst_example] ( identifier[example_rst] , identifier[target_file] , identifier[time_elapsed] , identifier[memory_used] , identifier[gallery_conf] ) identifier[save_thumbnail] ( identifier[image_path_template] , identifier[src_file] , identifier[file_conf] , identifier[gallery_conf] ) identifier[example_nb] = identifier[jupyter_notebook] ( identifier[script_blocks] , identifier[gallery_conf] ) identifier[ipy_fname] = identifier[replace_py_ipynb] ( identifier[target_file] )+ literal[string] identifier[save_notebook] ( identifier[example_nb] , identifier[ipy_fname] ) identifier[_replace_md5] ( identifier[ipy_fname] ) keyword[return] identifier[intro] , identifier[time_elapsed]
def generate_file_rst(fname, target_dir, src_dir, gallery_conf): """Generate the rst file for a given example. Parameters ---------- fname : str Filename of python script target_dir : str Absolute path to directory in documentation where examples are saved src_dir : str Absolute path to directory where source examples are stored gallery_conf : dict Contains the configuration of Sphinx-Gallery Returns ------- intro: str The introduction of the example time_elapsed : float seconds required to run the script """ src_file = os.path.normpath(os.path.join(src_dir, fname)) target_file = os.path.join(target_dir, fname) _replace_md5(src_file, target_file, 'copy') (intro, _) = extract_intro_and_title(fname, get_docstring_and_rest(src_file)[0]) executable = executable_script(src_file, gallery_conf) if md5sum_is_current(target_file): if executable: gallery_conf['stale_examples'].append(target_file) # depends on [control=['if'], data=[]] return (intro, 0) # depends on [control=['if'], data=[]] image_dir = os.path.join(target_dir, 'images') if not os.path.exists(image_dir): os.makedirs(image_dir) # depends on [control=['if'], data=[]] base_image_name = os.path.splitext(fname)[0] image_fname = 'sphx_glr_' + base_image_name + '_{0:03}.png' image_path_template = os.path.join(image_dir, image_fname) script_vars = {'execute_script': executable, 'image_path_iterator': ImagePathIterator(image_path_template), 'src_file': src_file, 'target_file': target_file} (file_conf, script_blocks) = split_code_and_text_blocks(src_file) (output_blocks, time_elapsed) = execute_script(script_blocks, script_vars, gallery_conf) logger.debug('%s ran in : %.2g seconds\n', src_file, time_elapsed) example_rst = rst_blocks(script_blocks, output_blocks, file_conf, gallery_conf) memory_used = gallery_conf['memory_base'] + script_vars['memory_delta'] if not executable: time_elapsed = memory_used = 0.0 # don't let the output change # depends on [control=['if'], data=[]] save_rst_example(example_rst, target_file, time_elapsed, memory_used, gallery_conf) save_thumbnail(image_path_template, src_file, file_conf, gallery_conf) example_nb = jupyter_notebook(script_blocks, gallery_conf) ipy_fname = replace_py_ipynb(target_file) + '.new' save_notebook(example_nb, ipy_fname) _replace_md5(ipy_fname) return (intro, time_elapsed)
def generate(self): """Generate a new string and return it.""" key = self._propose_new_key() while self.key_exists(key): _logger.warning('Previous candidate was used.' ' Regenerating another...') key = self._propose_new_key() return key
def function[generate, parameter[self]]: constant[Generate a new string and return it.] variable[key] assign[=] call[name[self]._propose_new_key, parameter[]] while call[name[self].key_exists, parameter[name[key]]] begin[:] call[name[_logger].warning, parameter[constant[Previous candidate was used. Regenerating another...]]] variable[key] assign[=] call[name[self]._propose_new_key, parameter[]] return[name[key]]
keyword[def] identifier[generate] ( identifier[self] ): literal[string] identifier[key] = identifier[self] . identifier[_propose_new_key] () keyword[while] identifier[self] . identifier[key_exists] ( identifier[key] ): identifier[_logger] . identifier[warning] ( literal[string] literal[string] ) identifier[key] = identifier[self] . identifier[_propose_new_key] () keyword[return] identifier[key]
def generate(self): """Generate a new string and return it.""" key = self._propose_new_key() while self.key_exists(key): _logger.warning('Previous candidate was used. Regenerating another...') key = self._propose_new_key() # depends on [control=['while'], data=[]] return key
def process(self, expression): """ Process color expression args: expression (tuple): color expression returns: str """ a, o, b = expression c1 = self._hextorgb(a) c2 = self._hextorgb(b) r = ['#'] for i in range(3): v = self.operate(c1[i], c2[i], o) if v > 0xff: v = 0xff if v < 0: v = 0 r.append("%02x" % int(v)) return ''.join(r)
def function[process, parameter[self, expression]]: constant[ Process color expression args: expression (tuple): color expression returns: str ] <ast.Tuple object at 0x7da1b00d9270> assign[=] name[expression] variable[c1] assign[=] call[name[self]._hextorgb, parameter[name[a]]] variable[c2] assign[=] call[name[self]._hextorgb, parameter[name[b]]] variable[r] assign[=] list[[<ast.Constant object at 0x7da1aff01cf0>]] for taget[name[i]] in starred[call[name[range], parameter[constant[3]]]] begin[:] variable[v] assign[=] call[name[self].operate, parameter[call[name[c1]][name[i]], call[name[c2]][name[i]], name[o]]] if compare[name[v] greater[>] constant[255]] begin[:] variable[v] assign[=] constant[255] if compare[name[v] less[<] constant[0]] begin[:] variable[v] assign[=] constant[0] call[name[r].append, parameter[binary_operation[constant[%02x] <ast.Mod object at 0x7da2590d6920> call[name[int], parameter[name[v]]]]]] return[call[constant[].join, parameter[name[r]]]]
keyword[def] identifier[process] ( identifier[self] , identifier[expression] ): literal[string] identifier[a] , identifier[o] , identifier[b] = identifier[expression] identifier[c1] = identifier[self] . identifier[_hextorgb] ( identifier[a] ) identifier[c2] = identifier[self] . identifier[_hextorgb] ( identifier[b] ) identifier[r] =[ literal[string] ] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ): identifier[v] = identifier[self] . identifier[operate] ( identifier[c1] [ identifier[i] ], identifier[c2] [ identifier[i] ], identifier[o] ) keyword[if] identifier[v] > literal[int] : identifier[v] = literal[int] keyword[if] identifier[v] < literal[int] : identifier[v] = literal[int] identifier[r] . identifier[append] ( literal[string] % identifier[int] ( identifier[v] )) keyword[return] literal[string] . identifier[join] ( identifier[r] )
def process(self, expression): """ Process color expression args: expression (tuple): color expression returns: str """ (a, o, b) = expression c1 = self._hextorgb(a) c2 = self._hextorgb(b) r = ['#'] for i in range(3): v = self.operate(c1[i], c2[i], o) if v > 255: v = 255 # depends on [control=['if'], data=['v']] if v < 0: v = 0 # depends on [control=['if'], data=['v']] r.append('%02x' % int(v)) # depends on [control=['for'], data=['i']] return ''.join(r)
def Rules(**rules): """Create a dictionary mapping symbols to alternative sequences. >>> Rules(A = "B C | D E") {'A': [['B', 'C'], ['D', 'E']]} """ for (lhs, rhs) in rules.items(): rules[lhs] = [alt.strip().split() for alt in rhs.split('|')] return rules
def function[Rules, parameter[]]: constant[Create a dictionary mapping symbols to alternative sequences. >>> Rules(A = "B C | D E") {'A': [['B', 'C'], ['D', 'E']]} ] for taget[tuple[[<ast.Name object at 0x7da204564f70>, <ast.Name object at 0x7da204564520>]]] in starred[call[name[rules].items, parameter[]]] begin[:] call[name[rules]][name[lhs]] assign[=] <ast.ListComp object at 0x7da18bccae30> return[name[rules]]
keyword[def] identifier[Rules] (** identifier[rules] ): literal[string] keyword[for] ( identifier[lhs] , identifier[rhs] ) keyword[in] identifier[rules] . identifier[items] (): identifier[rules] [ identifier[lhs] ]=[ identifier[alt] . identifier[strip] (). identifier[split] () keyword[for] identifier[alt] keyword[in] identifier[rhs] . identifier[split] ( literal[string] )] keyword[return] identifier[rules]
def Rules(**rules): """Create a dictionary mapping symbols to alternative sequences. >>> Rules(A = "B C | D E") {'A': [['B', 'C'], ['D', 'E']]} """ for (lhs, rhs) in rules.items(): rules[lhs] = [alt.strip().split() for alt in rhs.split('|')] # depends on [control=['for'], data=[]] return rules
def build(self, builder): """ Build this element :param builder: :return: """ builder.start(self.__class__.__name__) builder.data(self.country_code) builder.end(self.__class__.__name__)
def function[build, parameter[self, builder]]: constant[ Build this element :param builder: :return: ] call[name[builder].start, parameter[name[self].__class__.__name__]] call[name[builder].data, parameter[name[self].country_code]] call[name[builder].end, parameter[name[self].__class__.__name__]]
keyword[def] identifier[build] ( identifier[self] , identifier[builder] ): literal[string] identifier[builder] . identifier[start] ( identifier[self] . identifier[__class__] . identifier[__name__] ) identifier[builder] . identifier[data] ( identifier[self] . identifier[country_code] ) identifier[builder] . identifier[end] ( identifier[self] . identifier[__class__] . identifier[__name__] )
def build(self, builder): """ Build this element :param builder: :return: """ builder.start(self.__class__.__name__) builder.data(self.country_code) builder.end(self.__class__.__name__)
def _get_service_bindings(self, service_name): """ Return the service bindings for the service instance. """ instance = self.get_instance(service_name) return self.api.get(instance['service_bindings_url'])
def function[_get_service_bindings, parameter[self, service_name]]: constant[ Return the service bindings for the service instance. ] variable[instance] assign[=] call[name[self].get_instance, parameter[name[service_name]]] return[call[name[self].api.get, parameter[call[name[instance]][constant[service_bindings_url]]]]]
keyword[def] identifier[_get_service_bindings] ( identifier[self] , identifier[service_name] ): literal[string] identifier[instance] = identifier[self] . identifier[get_instance] ( identifier[service_name] ) keyword[return] identifier[self] . identifier[api] . identifier[get] ( identifier[instance] [ literal[string] ])
def _get_service_bindings(self, service_name): """ Return the service bindings for the service instance. """ instance = self.get_instance(service_name) return self.api.get(instance['service_bindings_url'])
def replace(self, key, initial_value, new_value): """Atomically replace the value of a key with a new value. This compares the current value of a key, then replaces it with a new value if it is equal to a specified value. This operation takes place in a transaction. :param key: key in etcd to replace :param initial_value: old value to replace :type initial_value: bytes or string :param new_value: new value of the key :type new_value: bytes or string :returns: status of transaction, ``True`` if the replace was successful, ``False`` otherwise :rtype: bool """ base64_key = _encode(key) base64_initial_value = _encode(initial_value) base64_new_value = _encode(new_value) txn = { 'compare': [{ 'key': base64_key, 'result': 'EQUAL', 'target': 'VALUE', 'value': base64_initial_value }], 'success': [{ 'request_put': { 'key': base64_key, 'value': base64_new_value, } }], 'failure': [] } result = self.transaction(txn) if 'succeeded' in result: return result['succeeded'] return False
def function[replace, parameter[self, key, initial_value, new_value]]: constant[Atomically replace the value of a key with a new value. This compares the current value of a key, then replaces it with a new value if it is equal to a specified value. This operation takes place in a transaction. :param key: key in etcd to replace :param initial_value: old value to replace :type initial_value: bytes or string :param new_value: new value of the key :type new_value: bytes or string :returns: status of transaction, ``True`` if the replace was successful, ``False`` otherwise :rtype: bool ] variable[base64_key] assign[=] call[name[_encode], parameter[name[key]]] variable[base64_initial_value] assign[=] call[name[_encode], parameter[name[initial_value]]] variable[base64_new_value] assign[=] call[name[_encode], parameter[name[new_value]]] variable[txn] assign[=] dictionary[[<ast.Constant object at 0x7da1b2347430>, <ast.Constant object at 0x7da1b2344130>, <ast.Constant object at 0x7da1b2347490>], [<ast.List object at 0x7da1b2344610>, <ast.List object at 0x7da1b2347940>, <ast.List object at 0x7da1b2345780>]] variable[result] assign[=] call[name[self].transaction, parameter[name[txn]]] if compare[constant[succeeded] in name[result]] begin[:] return[call[name[result]][constant[succeeded]]] return[constant[False]]
keyword[def] identifier[replace] ( identifier[self] , identifier[key] , identifier[initial_value] , identifier[new_value] ): literal[string] identifier[base64_key] = identifier[_encode] ( identifier[key] ) identifier[base64_initial_value] = identifier[_encode] ( identifier[initial_value] ) identifier[base64_new_value] = identifier[_encode] ( identifier[new_value] ) identifier[txn] ={ literal[string] :[{ literal[string] : identifier[base64_key] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : identifier[base64_initial_value] }], literal[string] :[{ literal[string] :{ literal[string] : identifier[base64_key] , literal[string] : identifier[base64_new_value] , } }], literal[string] :[] } identifier[result] = identifier[self] . identifier[transaction] ( identifier[txn] ) keyword[if] literal[string] keyword[in] identifier[result] : keyword[return] identifier[result] [ literal[string] ] keyword[return] keyword[False]
def replace(self, key, initial_value, new_value): """Atomically replace the value of a key with a new value. This compares the current value of a key, then replaces it with a new value if it is equal to a specified value. This operation takes place in a transaction. :param key: key in etcd to replace :param initial_value: old value to replace :type initial_value: bytes or string :param new_value: new value of the key :type new_value: bytes or string :returns: status of transaction, ``True`` if the replace was successful, ``False`` otherwise :rtype: bool """ base64_key = _encode(key) base64_initial_value = _encode(initial_value) base64_new_value = _encode(new_value) txn = {'compare': [{'key': base64_key, 'result': 'EQUAL', 'target': 'VALUE', 'value': base64_initial_value}], 'success': [{'request_put': {'key': base64_key, 'value': base64_new_value}}], 'failure': []} result = self.transaction(txn) if 'succeeded' in result: return result['succeeded'] # depends on [control=['if'], data=['result']] return False
def do_indent(s, width=4, indentfirst=False): """Return a copy of the passed string, each line indented by 4 spaces. The first line is not indented. If you want to change the number of spaces or indent the first line too you can pass additional parameters to the filter: .. sourcecode:: jinja {{ mytext|indent(2, true) }} indent by two spaces and indent the first line too. """ indention = u' ' * width rv = (u'\n' + indention).join(s.splitlines()) if indentfirst: rv = indention + rv return rv
def function[do_indent, parameter[s, width, indentfirst]]: constant[Return a copy of the passed string, each line indented by 4 spaces. The first line is not indented. If you want to change the number of spaces or indent the first line too you can pass additional parameters to the filter: .. sourcecode:: jinja {{ mytext|indent(2, true) }} indent by two spaces and indent the first line too. ] variable[indention] assign[=] binary_operation[constant[ ] * name[width]] variable[rv] assign[=] call[binary_operation[constant[ ] + name[indention]].join, parameter[call[name[s].splitlines, parameter[]]]] if name[indentfirst] begin[:] variable[rv] assign[=] binary_operation[name[indention] + name[rv]] return[name[rv]]
keyword[def] identifier[do_indent] ( identifier[s] , identifier[width] = literal[int] , identifier[indentfirst] = keyword[False] ): literal[string] identifier[indention] = literal[string] * identifier[width] identifier[rv] =( literal[string] + identifier[indention] ). identifier[join] ( identifier[s] . identifier[splitlines] ()) keyword[if] identifier[indentfirst] : identifier[rv] = identifier[indention] + identifier[rv] keyword[return] identifier[rv]
def do_indent(s, width=4, indentfirst=False): """Return a copy of the passed string, each line indented by 4 spaces. The first line is not indented. If you want to change the number of spaces or indent the first line too you can pass additional parameters to the filter: .. sourcecode:: jinja {{ mytext|indent(2, true) }} indent by two spaces and indent the first line too. """ indention = u' ' * width rv = (u'\n' + indention).join(s.splitlines()) if indentfirst: rv = indention + rv # depends on [control=['if'], data=[]] return rv
def run(self): """ Runs all jobs. Returns: All errors encountered as a list of list. [[error_dicts for job 1], [error_dicts for job 2], ....] Raises: ValidationError: if a job fails validation ReturnCodeError: if the process has a return code different from 0 NonRecoverableError: if an unrecoverable occurs MaxCorrectionsPerJobError: if max_errors_per_job is reached MaxCorrectionsError: if max_errors is reached MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached """ cwd = os.getcwd() with ScratchDir(self.scratch_dir, create_symbolic_link=True, copy_to_current_on_exit=True, copy_from_current_on_enter=True) as temp_dir: self.total_errors = 0 start = datetime.datetime.now() logger.info("Run started at {} in {}.".format( start, temp_dir)) v = sys.version.replace("\n", " ") logger.info("Custodian running on Python version {}".format(v)) logger.info("Hostname: {}, Cluster: {}".format( *get_execution_host_info())) try: # skip jobs until the restart for job_n, job in islice(enumerate(self.jobs, 1), self.restart, None): self._run_job(job_n, job) # We do a dump of the run log after each job. dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder, indent=4) # Checkpoint after each job so that we can recover from last # point and remove old checkpoints if self.checkpoint: self.restart = job_n Custodian._save_checkpoint(cwd, job_n) except CustodianError as ex: logger.error(ex.message) if ex.raises: raise finally: # Log the corrections to a json file. logger.info("Logging to {}...".format(Custodian.LOG_FILE)) dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder, indent=4) end = datetime.datetime.now() logger.info("Run ended at {}.".format(end)) run_time = end - start logger.info("Run completed. Total time taken = {}." .format(run_time)) if self.gzipped_output: gzip_dir(".") # Cleanup checkpoint files (if any) if run is successful. Custodian._delete_checkpoints(cwd) return self.run_log
def function[run, parameter[self]]: constant[ Runs all jobs. Returns: All errors encountered as a list of list. [[error_dicts for job 1], [error_dicts for job 2], ....] Raises: ValidationError: if a job fails validation ReturnCodeError: if the process has a return code different from 0 NonRecoverableError: if an unrecoverable occurs MaxCorrectionsPerJobError: if max_errors_per_job is reached MaxCorrectionsError: if max_errors is reached MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached ] variable[cwd] assign[=] call[name[os].getcwd, parameter[]] with call[name[ScratchDir], parameter[name[self].scratch_dir]] begin[:] name[self].total_errors assign[=] constant[0] variable[start] assign[=] call[name[datetime].datetime.now, parameter[]] call[name[logger].info, parameter[call[constant[Run started at {} in {}.].format, parameter[name[start], name[temp_dir]]]]] variable[v] assign[=] call[name[sys].version.replace, parameter[constant[ ], constant[ ]]] call[name[logger].info, parameter[call[constant[Custodian running on Python version {}].format, parameter[name[v]]]]] call[name[logger].info, parameter[call[constant[Hostname: {}, Cluster: {}].format, parameter[<ast.Starred object at 0x7da18bcc9e70>]]]] <ast.Try object at 0x7da18bccaa40> call[name[Custodian]._delete_checkpoints, parameter[name[cwd]]] return[name[self].run_log]
keyword[def] identifier[run] ( identifier[self] ): literal[string] identifier[cwd] = identifier[os] . identifier[getcwd] () keyword[with] identifier[ScratchDir] ( identifier[self] . identifier[scratch_dir] , identifier[create_symbolic_link] = keyword[True] , identifier[copy_to_current_on_exit] = keyword[True] , identifier[copy_from_current_on_enter] = keyword[True] ) keyword[as] identifier[temp_dir] : identifier[self] . identifier[total_errors] = literal[int] identifier[start] = identifier[datetime] . identifier[datetime] . identifier[now] () identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[start] , identifier[temp_dir] )) identifier[v] = identifier[sys] . identifier[version] . identifier[replace] ( literal[string] , literal[string] ) identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[v] )) identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( * identifier[get_execution_host_info] ())) keyword[try] : keyword[for] identifier[job_n] , identifier[job] keyword[in] identifier[islice] ( identifier[enumerate] ( identifier[self] . identifier[jobs] , literal[int] ), identifier[self] . identifier[restart] , keyword[None] ): identifier[self] . identifier[_run_job] ( identifier[job_n] , identifier[job] ) identifier[dumpfn] ( identifier[self] . identifier[run_log] , identifier[Custodian] . identifier[LOG_FILE] , identifier[cls] = identifier[MontyEncoder] , identifier[indent] = literal[int] ) keyword[if] identifier[self] . identifier[checkpoint] : identifier[self] . identifier[restart] = identifier[job_n] identifier[Custodian] . identifier[_save_checkpoint] ( identifier[cwd] , identifier[job_n] ) keyword[except] identifier[CustodianError] keyword[as] identifier[ex] : identifier[logger] . identifier[error] ( identifier[ex] . identifier[message] ) keyword[if] identifier[ex] . identifier[raises] : keyword[raise] keyword[finally] : identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[Custodian] . identifier[LOG_FILE] )) identifier[dumpfn] ( identifier[self] . identifier[run_log] , identifier[Custodian] . identifier[LOG_FILE] , identifier[cls] = identifier[MontyEncoder] , identifier[indent] = literal[int] ) identifier[end] = identifier[datetime] . identifier[datetime] . identifier[now] () identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[end] )) identifier[run_time] = identifier[end] - identifier[start] identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[run_time] )) keyword[if] identifier[self] . identifier[gzipped_output] : identifier[gzip_dir] ( literal[string] ) identifier[Custodian] . identifier[_delete_checkpoints] ( identifier[cwd] ) keyword[return] identifier[self] . identifier[run_log]
def run(self): """ Runs all jobs. Returns: All errors encountered as a list of list. [[error_dicts for job 1], [error_dicts for job 2], ....] Raises: ValidationError: if a job fails validation ReturnCodeError: if the process has a return code different from 0 NonRecoverableError: if an unrecoverable occurs MaxCorrectionsPerJobError: if max_errors_per_job is reached MaxCorrectionsError: if max_errors is reached MaxCorrectionsPerHandlerError: if max_errors_per_handler is reached """ cwd = os.getcwd() with ScratchDir(self.scratch_dir, create_symbolic_link=True, copy_to_current_on_exit=True, copy_from_current_on_enter=True) as temp_dir: self.total_errors = 0 start = datetime.datetime.now() logger.info('Run started at {} in {}.'.format(start, temp_dir)) v = sys.version.replace('\n', ' ') logger.info('Custodian running on Python version {}'.format(v)) logger.info('Hostname: {}, Cluster: {}'.format(*get_execution_host_info())) try: # skip jobs until the restart for (job_n, job) in islice(enumerate(self.jobs, 1), self.restart, None): self._run_job(job_n, job) # We do a dump of the run log after each job. dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder, indent=4) # Checkpoint after each job so that we can recover from last # point and remove old checkpoints if self.checkpoint: self.restart = job_n Custodian._save_checkpoint(cwd, job_n) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]] except CustodianError as ex: logger.error(ex.message) if ex.raises: raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['ex']] finally: # Log the corrections to a json file. logger.info('Logging to {}...'.format(Custodian.LOG_FILE)) dumpfn(self.run_log, Custodian.LOG_FILE, cls=MontyEncoder, indent=4) end = datetime.datetime.now() logger.info('Run ended at {}.'.format(end)) run_time = end - start logger.info('Run completed. Total time taken = {}.'.format(run_time)) if self.gzipped_output: gzip_dir('.') # depends on [control=['if'], data=[]] # Cleanup checkpoint files (if any) if run is successful. Custodian._delete_checkpoints(cwd) # depends on [control=['with'], data=['temp_dir']] return self.run_log
def parse(readDataInstance, arrayType, arrayLength): """ Returns a new L{Array} object. @type readDataInstance: L{ReadData} @param readDataInstance: The L{ReadData} object containing the array data. @type arrayType: int @param arrayType: The type of L{Array} to be built. @type arrayLength: int @param arrayLength: The length of the array passed as an argument. @rtype: L{Array} @return: New L{Array} object. """ newArray = Array(arrayType) dataLength = len(readDataInstance) if arrayType is TYPE_DWORD: toRead = arrayLength * 4 if dataLength >= toRead: for i in range(arrayLength): newArray.append(DWORD(readDataInstance.readDword())) else: raise excep.DataLengthException("Not enough bytes to read.") elif arrayType is TYPE_WORD: toRead = arrayLength * 2 if dataLength >= toRead: for i in range(arrayLength): newArray.append(DWORD(readDataInstance.readWord())) else: raise excep.DataLengthException("Not enough bytes to read.") elif arrayType is TYPE_QWORD: toRead = arrayLength * 8 if dataLength >= toRead: for i in range(arrayLength): newArray.append(QWORD(readDataInstance.readQword())) else: raise excep.DataLengthException("Not enough bytes to read.") elif arrayType is TYPE_BYTE: for i in range(arrayLength): newArray.append(BYTE(readDataInstance.readByte())) else: raise excep.ArrayTypeException("Could\'t create an array of type %d" % arrayType) return newArray
def function[parse, parameter[readDataInstance, arrayType, arrayLength]]: constant[ Returns a new L{Array} object. @type readDataInstance: L{ReadData} @param readDataInstance: The L{ReadData} object containing the array data. @type arrayType: int @param arrayType: The type of L{Array} to be built. @type arrayLength: int @param arrayLength: The length of the array passed as an argument. @rtype: L{Array} @return: New L{Array} object. ] variable[newArray] assign[=] call[name[Array], parameter[name[arrayType]]] variable[dataLength] assign[=] call[name[len], parameter[name[readDataInstance]]] if compare[name[arrayType] is name[TYPE_DWORD]] begin[:] variable[toRead] assign[=] binary_operation[name[arrayLength] * constant[4]] if compare[name[dataLength] greater_or_equal[>=] name[toRead]] begin[:] for taget[name[i]] in starred[call[name[range], parameter[name[arrayLength]]]] begin[:] call[name[newArray].append, parameter[call[name[DWORD], parameter[call[name[readDataInstance].readDword, parameter[]]]]]] return[name[newArray]]
keyword[def] identifier[parse] ( identifier[readDataInstance] , identifier[arrayType] , identifier[arrayLength] ): literal[string] identifier[newArray] = identifier[Array] ( identifier[arrayType] ) identifier[dataLength] = identifier[len] ( identifier[readDataInstance] ) keyword[if] identifier[arrayType] keyword[is] identifier[TYPE_DWORD] : identifier[toRead] = identifier[arrayLength] * literal[int] keyword[if] identifier[dataLength] >= identifier[toRead] : keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[arrayLength] ): identifier[newArray] . identifier[append] ( identifier[DWORD] ( identifier[readDataInstance] . identifier[readDword] ())) keyword[else] : keyword[raise] identifier[excep] . identifier[DataLengthException] ( literal[string] ) keyword[elif] identifier[arrayType] keyword[is] identifier[TYPE_WORD] : identifier[toRead] = identifier[arrayLength] * literal[int] keyword[if] identifier[dataLength] >= identifier[toRead] : keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[arrayLength] ): identifier[newArray] . identifier[append] ( identifier[DWORD] ( identifier[readDataInstance] . identifier[readWord] ())) keyword[else] : keyword[raise] identifier[excep] . identifier[DataLengthException] ( literal[string] ) keyword[elif] identifier[arrayType] keyword[is] identifier[TYPE_QWORD] : identifier[toRead] = identifier[arrayLength] * literal[int] keyword[if] identifier[dataLength] >= identifier[toRead] : keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[arrayLength] ): identifier[newArray] . identifier[append] ( identifier[QWORD] ( identifier[readDataInstance] . identifier[readQword] ())) keyword[else] : keyword[raise] identifier[excep] . identifier[DataLengthException] ( literal[string] ) keyword[elif] identifier[arrayType] keyword[is] identifier[TYPE_BYTE] : keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[arrayLength] ): identifier[newArray] . identifier[append] ( identifier[BYTE] ( identifier[readDataInstance] . identifier[readByte] ())) keyword[else] : keyword[raise] identifier[excep] . identifier[ArrayTypeException] ( literal[string] % identifier[arrayType] ) keyword[return] identifier[newArray]
def parse(readDataInstance, arrayType, arrayLength): """ Returns a new L{Array} object. @type readDataInstance: L{ReadData} @param readDataInstance: The L{ReadData} object containing the array data. @type arrayType: int @param arrayType: The type of L{Array} to be built. @type arrayLength: int @param arrayLength: The length of the array passed as an argument. @rtype: L{Array} @return: New L{Array} object. """ newArray = Array(arrayType) dataLength = len(readDataInstance) if arrayType is TYPE_DWORD: toRead = arrayLength * 4 if dataLength >= toRead: for i in range(arrayLength): newArray.append(DWORD(readDataInstance.readDword())) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] else: raise excep.DataLengthException('Not enough bytes to read.') # depends on [control=['if'], data=[]] elif arrayType is TYPE_WORD: toRead = arrayLength * 2 if dataLength >= toRead: for i in range(arrayLength): newArray.append(DWORD(readDataInstance.readWord())) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] else: raise excep.DataLengthException('Not enough bytes to read.') # depends on [control=['if'], data=[]] elif arrayType is TYPE_QWORD: toRead = arrayLength * 8 if dataLength >= toRead: for i in range(arrayLength): newArray.append(QWORD(readDataInstance.readQword())) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] else: raise excep.DataLengthException('Not enough bytes to read.') # depends on [control=['if'], data=[]] elif arrayType is TYPE_BYTE: for i in range(arrayLength): newArray.append(BYTE(readDataInstance.readByte())) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] else: raise excep.ArrayTypeException("Could't create an array of type %d" % arrayType) return newArray
def get_embedded_items(result_collection): """ Given a result_collection (returned by a previous API call that returns a collection, like get_bundle_list() or search()), return a list of embedded items with each item in the returned list considered a result object. 'result_collection' a JSON object returned by a previous API call. The parameter 'embed_items' must have been True when the result_collection was originally requested.May not be None. Returns a list, which may be empty if no embedded items were found. """ # Argument error checking. assert result_collection is not None result = [] embedded_objects = result_collection.get('_embedded') if embedded_objects is not None: # Handle being passed a non-collection gracefully. result = embedded_objects.get('items', result) return result
def function[get_embedded_items, parameter[result_collection]]: constant[ Given a result_collection (returned by a previous API call that returns a collection, like get_bundle_list() or search()), return a list of embedded items with each item in the returned list considered a result object. 'result_collection' a JSON object returned by a previous API call. The parameter 'embed_items' must have been True when the result_collection was originally requested.May not be None. Returns a list, which may be empty if no embedded items were found. ] assert[compare[name[result_collection] is_not constant[None]]] variable[result] assign[=] list[[]] variable[embedded_objects] assign[=] call[name[result_collection].get, parameter[constant[_embedded]]] if compare[name[embedded_objects] is_not constant[None]] begin[:] variable[result] assign[=] call[name[embedded_objects].get, parameter[constant[items], name[result]]] return[name[result]]
keyword[def] identifier[get_embedded_items] ( identifier[result_collection] ): literal[string] keyword[assert] identifier[result_collection] keyword[is] keyword[not] keyword[None] identifier[result] =[] identifier[embedded_objects] = identifier[result_collection] . identifier[get] ( literal[string] ) keyword[if] identifier[embedded_objects] keyword[is] keyword[not] keyword[None] : identifier[result] = identifier[embedded_objects] . identifier[get] ( literal[string] , identifier[result] ) keyword[return] identifier[result]
def get_embedded_items(result_collection): """ Given a result_collection (returned by a previous API call that returns a collection, like get_bundle_list() or search()), return a list of embedded items with each item in the returned list considered a result object. 'result_collection' a JSON object returned by a previous API call. The parameter 'embed_items' must have been True when the result_collection was originally requested.May not be None. Returns a list, which may be empty if no embedded items were found. """ # Argument error checking. assert result_collection is not None result = [] embedded_objects = result_collection.get('_embedded') if embedded_objects is not None: # Handle being passed a non-collection gracefully. result = embedded_objects.get('items', result) # depends on [control=['if'], data=['embedded_objects']] return result
def hex_to_name(hexx): """ Convert hex to a color name, using matplotlib's colour names. Args: hexx (str): A hexadecimal colour, starting with '#'. Returns: str: The name of the colour, or None if not found. """ for n, h in defaults.COLOURS.items(): if (len(n) > 1) and (h == hexx.upper()): return n.lower() return None
def function[hex_to_name, parameter[hexx]]: constant[ Convert hex to a color name, using matplotlib's colour names. Args: hexx (str): A hexadecimal colour, starting with '#'. Returns: str: The name of the colour, or None if not found. ] for taget[tuple[[<ast.Name object at 0x7da18c4ce5f0>, <ast.Name object at 0x7da18c4ce590>]]] in starred[call[name[defaults].COLOURS.items, parameter[]]] begin[:] if <ast.BoolOp object at 0x7da18c4cc2e0> begin[:] return[call[name[n].lower, parameter[]]] return[constant[None]]
keyword[def] identifier[hex_to_name] ( identifier[hexx] ): literal[string] keyword[for] identifier[n] , identifier[h] keyword[in] identifier[defaults] . identifier[COLOURS] . identifier[items] (): keyword[if] ( identifier[len] ( identifier[n] )> literal[int] ) keyword[and] ( identifier[h] == identifier[hexx] . identifier[upper] ()): keyword[return] identifier[n] . identifier[lower] () keyword[return] keyword[None]
def hex_to_name(hexx): """ Convert hex to a color name, using matplotlib's colour names. Args: hexx (str): A hexadecimal colour, starting with '#'. Returns: str: The name of the colour, or None if not found. """ for (n, h) in defaults.COLOURS.items(): if len(n) > 1 and h == hexx.upper(): return n.lower() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return None
def alert_type(self, alert_type): """Sets the alert_type of this Alert. Alert type. # noqa: E501 :param alert_type: The alert_type of this Alert. # noqa: E501 :type: str """ allowed_values = ["CLASSIC", "THRESHOLD"] # noqa: E501 if alert_type not in allowed_values: raise ValueError( "Invalid value for `alert_type` ({0}), must be one of {1}" # noqa: E501 .format(alert_type, allowed_values) ) self._alert_type = alert_type
def function[alert_type, parameter[self, alert_type]]: constant[Sets the alert_type of this Alert. Alert type. # noqa: E501 :param alert_type: The alert_type of this Alert. # noqa: E501 :type: str ] variable[allowed_values] assign[=] list[[<ast.Constant object at 0x7da18bc71030>, <ast.Constant object at 0x7da18bc73c40>]] if compare[name[alert_type] <ast.NotIn object at 0x7da2590d7190> name[allowed_values]] begin[:] <ast.Raise object at 0x7da18bc71390> name[self]._alert_type assign[=] name[alert_type]
keyword[def] identifier[alert_type] ( identifier[self] , identifier[alert_type] ): literal[string] identifier[allowed_values] =[ literal[string] , literal[string] ] keyword[if] identifier[alert_type] keyword[not] keyword[in] identifier[allowed_values] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[alert_type] , identifier[allowed_values] ) ) identifier[self] . identifier[_alert_type] = identifier[alert_type]
def alert_type(self, alert_type): """Sets the alert_type of this Alert. Alert type. # noqa: E501 :param alert_type: The alert_type of this Alert. # noqa: E501 :type: str """ allowed_values = ['CLASSIC', 'THRESHOLD'] # noqa: E501 if alert_type not in allowed_values: # noqa: E501 raise ValueError('Invalid value for `alert_type` ({0}), must be one of {1}'.format(alert_type, allowed_values)) # depends on [control=['if'], data=['alert_type', 'allowed_values']] self._alert_type = alert_type
def get_dotstr(self): """Return a string containing DAG graph in Grpahviz's dot language.""" dotobj = self.gosubdagplot.get_pydot_graph() # pydot.Dot dotstr = dotobj.create_dot() return dotstr
def function[get_dotstr, parameter[self]]: constant[Return a string containing DAG graph in Grpahviz's dot language.] variable[dotobj] assign[=] call[name[self].gosubdagplot.get_pydot_graph, parameter[]] variable[dotstr] assign[=] call[name[dotobj].create_dot, parameter[]] return[name[dotstr]]
keyword[def] identifier[get_dotstr] ( identifier[self] ): literal[string] identifier[dotobj] = identifier[self] . identifier[gosubdagplot] . identifier[get_pydot_graph] () identifier[dotstr] = identifier[dotobj] . identifier[create_dot] () keyword[return] identifier[dotstr]
def get_dotstr(self): """Return a string containing DAG graph in Grpahviz's dot language.""" dotobj = self.gosubdagplot.get_pydot_graph() # pydot.Dot dotstr = dotobj.create_dot() return dotstr
def apply_strain(self, strain): """ Apply a strain to the lattice. Args: strain (float or list): Amount of strain to apply. Can be a float, or a sequence of 3 numbers. E.g., 0.01 means all lattice vectors are increased by 1%. This is equivalent to calling modify_lattice with a lattice with lattice parameters that are 1% larger. """ s = (1 + np.array(strain)) * np.eye(3) self.lattice = Lattice(np.dot(self._lattice.matrix.T, s).T)
def function[apply_strain, parameter[self, strain]]: constant[ Apply a strain to the lattice. Args: strain (float or list): Amount of strain to apply. Can be a float, or a sequence of 3 numbers. E.g., 0.01 means all lattice vectors are increased by 1%. This is equivalent to calling modify_lattice with a lattice with lattice parameters that are 1% larger. ] variable[s] assign[=] binary_operation[binary_operation[constant[1] + call[name[np].array, parameter[name[strain]]]] * call[name[np].eye, parameter[constant[3]]]] name[self].lattice assign[=] call[name[Lattice], parameter[call[name[np].dot, parameter[name[self]._lattice.matrix.T, name[s]]].T]]
keyword[def] identifier[apply_strain] ( identifier[self] , identifier[strain] ): literal[string] identifier[s] =( literal[int] + identifier[np] . identifier[array] ( identifier[strain] ))* identifier[np] . identifier[eye] ( literal[int] ) identifier[self] . identifier[lattice] = identifier[Lattice] ( identifier[np] . identifier[dot] ( identifier[self] . identifier[_lattice] . identifier[matrix] . identifier[T] , identifier[s] ). identifier[T] )
def apply_strain(self, strain): """ Apply a strain to the lattice. Args: strain (float or list): Amount of strain to apply. Can be a float, or a sequence of 3 numbers. E.g., 0.01 means all lattice vectors are increased by 1%. This is equivalent to calling modify_lattice with a lattice with lattice parameters that are 1% larger. """ s = (1 + np.array(strain)) * np.eye(3) self.lattice = Lattice(np.dot(self._lattice.matrix.T, s).T)
def progress(self, msg, onerror=None, sep='...', end='DONE', abrt='FAIL', prog='.', excs=(Exception,), reraise=True): """ Context manager for handling interactive prog indication This context manager streamlines presenting banners and prog indicators. To start the prog, pass ``msg`` argument as a start message. For example:: printer = Console(verbose=True) with printer.progress('Checking files') as prog: # Do some checks if errors: prog.abrt() prog.end() The context manager returns a ``Progress`` instance, which provides methods like ``abrt()`` (abort), ``end()`` (end), and ``prog()`` (print prog indicator). The prog methods like ``abrt()`` and ``end()`` will raise an exception that interrupts the prog. These exceptions are ``ProgressEnd`` exception subclasses and are ``ProgressAbrt`` and ``ProgressOK`` respectively. They are silenced and not handled in any way as they only serve the purpose of flow control. Other exceptions are trapped and ``abrt()`` is called. The exceptions that should be trapped can be customized using the ``excs`` argument, which should be a tuple of exception classes. If a handler function is passed using ``onerror`` argument, then this function takes the raised exception and handles it. By default, the ``error()`` factory is called with no arguments to generate the default error handler. If string is passed, then ``error()`` factory is called with that string. Finally, when prog is aborted either naturally or when exception is raised, it is possible to reraise the ``ProgressAbrt`` exception. This is done using the ``reraise`` flag. Default is to reraise. """ if not onerror: onerror = self.error() if type(onerror) is str: onerror = self.error(msg=onerror) self.pverb(msg, end=sep) prog = progress.Progress(self.pverb, end=end, abrt=abrt, prog=prog) try: yield prog prog.end() except self.ProgressOK: pass except self.ProgressAbrt as err: if reraise: raise err except KeyboardInterrupt: raise except excs as err: prog.abrt(noraise=True) if onerror: onerror(err) if self.debug: traceback.print_exc() if reraise: raise self.ProgressAbrt()
def function[progress, parameter[self, msg, onerror, sep, end, abrt, prog, excs, reraise]]: constant[ Context manager for handling interactive prog indication This context manager streamlines presenting banners and prog indicators. To start the prog, pass ``msg`` argument as a start message. For example:: printer = Console(verbose=True) with printer.progress('Checking files') as prog: # Do some checks if errors: prog.abrt() prog.end() The context manager returns a ``Progress`` instance, which provides methods like ``abrt()`` (abort), ``end()`` (end), and ``prog()`` (print prog indicator). The prog methods like ``abrt()`` and ``end()`` will raise an exception that interrupts the prog. These exceptions are ``ProgressEnd`` exception subclasses and are ``ProgressAbrt`` and ``ProgressOK`` respectively. They are silenced and not handled in any way as they only serve the purpose of flow control. Other exceptions are trapped and ``abrt()`` is called. The exceptions that should be trapped can be customized using the ``excs`` argument, which should be a tuple of exception classes. If a handler function is passed using ``onerror`` argument, then this function takes the raised exception and handles it. By default, the ``error()`` factory is called with no arguments to generate the default error handler. If string is passed, then ``error()`` factory is called with that string. Finally, when prog is aborted either naturally or when exception is raised, it is possible to reraise the ``ProgressAbrt`` exception. This is done using the ``reraise`` flag. Default is to reraise. ] if <ast.UnaryOp object at 0x7da18eb57df0> begin[:] variable[onerror] assign[=] call[name[self].error, parameter[]] if compare[call[name[type], parameter[name[onerror]]] is name[str]] begin[:] variable[onerror] assign[=] call[name[self].error, parameter[]] call[name[self].pverb, parameter[name[msg]]] variable[prog] assign[=] call[name[progress].Progress, parameter[name[self].pverb]] <ast.Try object at 0x7da1b14d2b00>
keyword[def] identifier[progress] ( identifier[self] , identifier[msg] , identifier[onerror] = keyword[None] , identifier[sep] = literal[string] , identifier[end] = literal[string] , identifier[abrt] = literal[string] , identifier[prog] = literal[string] , identifier[excs] =( identifier[Exception] ,), identifier[reraise] = keyword[True] ): literal[string] keyword[if] keyword[not] identifier[onerror] : identifier[onerror] = identifier[self] . identifier[error] () keyword[if] identifier[type] ( identifier[onerror] ) keyword[is] identifier[str] : identifier[onerror] = identifier[self] . identifier[error] ( identifier[msg] = identifier[onerror] ) identifier[self] . identifier[pverb] ( identifier[msg] , identifier[end] = identifier[sep] ) identifier[prog] = identifier[progress] . identifier[Progress] ( identifier[self] . identifier[pverb] , identifier[end] = identifier[end] , identifier[abrt] = identifier[abrt] , identifier[prog] = identifier[prog] ) keyword[try] : keyword[yield] identifier[prog] identifier[prog] . identifier[end] () keyword[except] identifier[self] . identifier[ProgressOK] : keyword[pass] keyword[except] identifier[self] . identifier[ProgressAbrt] keyword[as] identifier[err] : keyword[if] identifier[reraise] : keyword[raise] identifier[err] keyword[except] identifier[KeyboardInterrupt] : keyword[raise] keyword[except] identifier[excs] keyword[as] identifier[err] : identifier[prog] . identifier[abrt] ( identifier[noraise] = keyword[True] ) keyword[if] identifier[onerror] : identifier[onerror] ( identifier[err] ) keyword[if] identifier[self] . identifier[debug] : identifier[traceback] . identifier[print_exc] () keyword[if] identifier[reraise] : keyword[raise] identifier[self] . identifier[ProgressAbrt] ()
def progress(self, msg, onerror=None, sep='...', end='DONE', abrt='FAIL', prog='.', excs=(Exception,), reraise=True): """ Context manager for handling interactive prog indication This context manager streamlines presenting banners and prog indicators. To start the prog, pass ``msg`` argument as a start message. For example:: printer = Console(verbose=True) with printer.progress('Checking files') as prog: # Do some checks if errors: prog.abrt() prog.end() The context manager returns a ``Progress`` instance, which provides methods like ``abrt()`` (abort), ``end()`` (end), and ``prog()`` (print prog indicator). The prog methods like ``abrt()`` and ``end()`` will raise an exception that interrupts the prog. These exceptions are ``ProgressEnd`` exception subclasses and are ``ProgressAbrt`` and ``ProgressOK`` respectively. They are silenced and not handled in any way as they only serve the purpose of flow control. Other exceptions are trapped and ``abrt()`` is called. The exceptions that should be trapped can be customized using the ``excs`` argument, which should be a tuple of exception classes. If a handler function is passed using ``onerror`` argument, then this function takes the raised exception and handles it. By default, the ``error()`` factory is called with no arguments to generate the default error handler. If string is passed, then ``error()`` factory is called with that string. Finally, when prog is aborted either naturally or when exception is raised, it is possible to reraise the ``ProgressAbrt`` exception. This is done using the ``reraise`` flag. Default is to reraise. """ if not onerror: onerror = self.error() # depends on [control=['if'], data=[]] if type(onerror) is str: onerror = self.error(msg=onerror) # depends on [control=['if'], data=[]] self.pverb(msg, end=sep) prog = progress.Progress(self.pverb, end=end, abrt=abrt, prog=prog) try: yield prog prog.end() # depends on [control=['try'], data=[]] except self.ProgressOK: pass # depends on [control=['except'], data=[]] except self.ProgressAbrt as err: if reraise: raise err # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['err']] except KeyboardInterrupt: raise # depends on [control=['except'], data=[]] except excs as err: prog.abrt(noraise=True) if onerror: onerror(err) # depends on [control=['if'], data=[]] if self.debug: traceback.print_exc() # depends on [control=['if'], data=[]] if reraise: raise self.ProgressAbrt() # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['err']]
def Boolean(value, true=(u'yes', u'1', u'true'), false=(u'no', u'0', u'false'), encoding=None): """ Parse a value as a boolean. :type value: `unicode` or `bytes` :param value: Text value to parse. :type true: `tuple` of `unicode` :param true: Values to compare, ignoring case, for ``True`` values. :type false: `tuple` of `unicode` :param false: Values to compare, ignoring case, for ``False`` values. :type encoding: `bytes` :param encoding: Encoding to treat `bytes` values as, defaults to ``utf-8``. :rtype: `bool` :return: Parsed boolean or ``None`` if ``value`` did not match ``true`` or ``false`` values. """ value = Text(value, encoding) if value is not None: value = value.lower().strip() if value in true: return True elif value in false: return False return None
def function[Boolean, parameter[value, true, false, encoding]]: constant[ Parse a value as a boolean. :type value: `unicode` or `bytes` :param value: Text value to parse. :type true: `tuple` of `unicode` :param true: Values to compare, ignoring case, for ``True`` values. :type false: `tuple` of `unicode` :param false: Values to compare, ignoring case, for ``False`` values. :type encoding: `bytes` :param encoding: Encoding to treat `bytes` values as, defaults to ``utf-8``. :rtype: `bool` :return: Parsed boolean or ``None`` if ``value`` did not match ``true`` or ``false`` values. ] variable[value] assign[=] call[name[Text], parameter[name[value], name[encoding]]] if compare[name[value] is_not constant[None]] begin[:] variable[value] assign[=] call[call[name[value].lower, parameter[]].strip, parameter[]] if compare[name[value] in name[true]] begin[:] return[constant[True]] return[constant[None]]
keyword[def] identifier[Boolean] ( identifier[value] , identifier[true] =( literal[string] , literal[string] , literal[string] ), identifier[false] =( literal[string] , literal[string] , literal[string] ), identifier[encoding] = keyword[None] ): literal[string] identifier[value] = identifier[Text] ( identifier[value] , identifier[encoding] ) keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] : identifier[value] = identifier[value] . identifier[lower] (). identifier[strip] () keyword[if] identifier[value] keyword[in] identifier[true] : keyword[return] keyword[True] keyword[elif] identifier[value] keyword[in] identifier[false] : keyword[return] keyword[False] keyword[return] keyword[None]
def Boolean(value, true=(u'yes', u'1', u'true'), false=(u'no', u'0', u'false'), encoding=None): """ Parse a value as a boolean. :type value: `unicode` or `bytes` :param value: Text value to parse. :type true: `tuple` of `unicode` :param true: Values to compare, ignoring case, for ``True`` values. :type false: `tuple` of `unicode` :param false: Values to compare, ignoring case, for ``False`` values. :type encoding: `bytes` :param encoding: Encoding to treat `bytes` values as, defaults to ``utf-8``. :rtype: `bool` :return: Parsed boolean or ``None`` if ``value`` did not match ``true`` or ``false`` values. """ value = Text(value, encoding) if value is not None: value = value.lower().strip() # depends on [control=['if'], data=['value']] if value in true: return True # depends on [control=['if'], data=[]] elif value in false: return False # depends on [control=['if'], data=[]] return None
def get_warning_choice(self, message, short_message, style=wx.YES_NO | wx.NO_DEFAULT | wx.ICON_WARNING): """Launches proceeding dialog and returns True if ok to proceed""" dlg = GMD.GenericMessageDialog(self.main_window, message, short_message, style) choice = dlg.ShowModal() dlg.Destroy() return choice == wx.ID_YES
def function[get_warning_choice, parameter[self, message, short_message, style]]: constant[Launches proceeding dialog and returns True if ok to proceed] variable[dlg] assign[=] call[name[GMD].GenericMessageDialog, parameter[name[self].main_window, name[message], name[short_message], name[style]]] variable[choice] assign[=] call[name[dlg].ShowModal, parameter[]] call[name[dlg].Destroy, parameter[]] return[compare[name[choice] equal[==] name[wx].ID_YES]]
keyword[def] identifier[get_warning_choice] ( identifier[self] , identifier[message] , identifier[short_message] , identifier[style] = identifier[wx] . identifier[YES_NO] | identifier[wx] . identifier[NO_DEFAULT] | identifier[wx] . identifier[ICON_WARNING] ): literal[string] identifier[dlg] = identifier[GMD] . identifier[GenericMessageDialog] ( identifier[self] . identifier[main_window] , identifier[message] , identifier[short_message] , identifier[style] ) identifier[choice] = identifier[dlg] . identifier[ShowModal] () identifier[dlg] . identifier[Destroy] () keyword[return] identifier[choice] == identifier[wx] . identifier[ID_YES]
def get_warning_choice(self, message, short_message, style=wx.YES_NO | wx.NO_DEFAULT | wx.ICON_WARNING): """Launches proceeding dialog and returns True if ok to proceed""" dlg = GMD.GenericMessageDialog(self.main_window, message, short_message, style) choice = dlg.ShowModal() dlg.Destroy() return choice == wx.ID_YES
def clearConnections( self, cls ): """ Clears all the connections for this node. :param cls | <subclass of XNodeConnection> || None :return <int> | number of connections removed """ count = 0 for connection in self.connections(cls): connection.remove() count += 1 return count
def function[clearConnections, parameter[self, cls]]: constant[ Clears all the connections for this node. :param cls | <subclass of XNodeConnection> || None :return <int> | number of connections removed ] variable[count] assign[=] constant[0] for taget[name[connection]] in starred[call[name[self].connections, parameter[name[cls]]]] begin[:] call[name[connection].remove, parameter[]] <ast.AugAssign object at 0x7da204620be0> return[name[count]]
keyword[def] identifier[clearConnections] ( identifier[self] , identifier[cls] ): literal[string] identifier[count] = literal[int] keyword[for] identifier[connection] keyword[in] identifier[self] . identifier[connections] ( identifier[cls] ): identifier[connection] . identifier[remove] () identifier[count] += literal[int] keyword[return] identifier[count]
def clearConnections(self, cls): """ Clears all the connections for this node. :param cls | <subclass of XNodeConnection> || None :return <int> | number of connections removed """ count = 0 for connection in self.connections(cls): connection.remove() count += 1 # depends on [control=['for'], data=['connection']] return count
def build_where_clause(mappings, operator='AND'): """Constructs the where clause based on a dictionary of values >>> build_where_clause({'id': 456, 'name': 'myrecord'}, operator='OR') >>> 'WHERE id = 456 OR name = "myrecord" ' """ where_clause_mappings = {} where_clause_mappings.update(mappings) where_clause = 'WHERE ' + ' {} '.format(operator).join( '{k} = {v}'.format(k=k, v='"{}"'.format(v) if isinstance(v, basestring) else v) for k, v in where_clause_mappings.iteritems() ) return where_clause
def function[build_where_clause, parameter[mappings, operator]]: constant[Constructs the where clause based on a dictionary of values >>> build_where_clause({'id': 456, 'name': 'myrecord'}, operator='OR') >>> 'WHERE id = 456 OR name = "myrecord" ' ] variable[where_clause_mappings] assign[=] dictionary[[], []] call[name[where_clause_mappings].update, parameter[name[mappings]]] variable[where_clause] assign[=] binary_operation[constant[WHERE ] + call[call[constant[ {} ].format, parameter[name[operator]]].join, parameter[<ast.GeneratorExp object at 0x7da20c76fa00>]]] return[name[where_clause]]
keyword[def] identifier[build_where_clause] ( identifier[mappings] , identifier[operator] = literal[string] ): literal[string] identifier[where_clause_mappings] ={} identifier[where_clause_mappings] . identifier[update] ( identifier[mappings] ) identifier[where_clause] = literal[string] + literal[string] . identifier[format] ( identifier[operator] ). identifier[join] ( literal[string] . identifier[format] ( identifier[k] = identifier[k] , identifier[v] = literal[string] . identifier[format] ( identifier[v] ) keyword[if] identifier[isinstance] ( identifier[v] , identifier[basestring] ) keyword[else] identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[where_clause_mappings] . identifier[iteritems] () ) keyword[return] identifier[where_clause]
def build_where_clause(mappings, operator='AND'): """Constructs the where clause based on a dictionary of values >>> build_where_clause({'id': 456, 'name': 'myrecord'}, operator='OR') >>> 'WHERE id = 456 OR name = "myrecord" ' """ where_clause_mappings = {} where_clause_mappings.update(mappings) where_clause = 'WHERE ' + ' {} '.format(operator).join(('{k} = {v}'.format(k=k, v='"{}"'.format(v) if isinstance(v, basestring) else v) for (k, v) in where_clause_mappings.iteritems())) return where_clause
def create(cls, source, *, transform_args=None): """Create an instance of the class from the source. By default cls.transform_args is used, but can be overridden by passing in transform_args. """ if transform_args is None: transform_args = cls.transform_args return cls(get_obj(source, *transform_args))
def function[create, parameter[cls, source]]: constant[Create an instance of the class from the source. By default cls.transform_args is used, but can be overridden by passing in transform_args. ] if compare[name[transform_args] is constant[None]] begin[:] variable[transform_args] assign[=] name[cls].transform_args return[call[name[cls], parameter[call[name[get_obj], parameter[name[source], <ast.Starred object at 0x7da204566d40>]]]]]
keyword[def] identifier[create] ( identifier[cls] , identifier[source] ,*, identifier[transform_args] = keyword[None] ): literal[string] keyword[if] identifier[transform_args] keyword[is] keyword[None] : identifier[transform_args] = identifier[cls] . identifier[transform_args] keyword[return] identifier[cls] ( identifier[get_obj] ( identifier[source] ,* identifier[transform_args] ))
def create(cls, source, *, transform_args=None): """Create an instance of the class from the source. By default cls.transform_args is used, but can be overridden by passing in transform_args. """ if transform_args is None: transform_args = cls.transform_args # depends on [control=['if'], data=['transform_args']] return cls(get_obj(source, *transform_args))
def model_getattr(): """ Creates a getter that will drop the current value and retrieve the model's attribute with the context key as name. """ def model_getattr(_value, context, **_params): value = getattr(context["model"], context["key"]) return _attr(value) return model_getattr
def function[model_getattr, parameter[]]: constant[ Creates a getter that will drop the current value and retrieve the model's attribute with the context key as name. ] def function[model_getattr, parameter[_value, context]]: variable[value] assign[=] call[name[getattr], parameter[call[name[context]][constant[model]], call[name[context]][constant[key]]]] return[call[name[_attr], parameter[name[value]]]] return[name[model_getattr]]
keyword[def] identifier[model_getattr] (): literal[string] keyword[def] identifier[model_getattr] ( identifier[_value] , identifier[context] ,** identifier[_params] ): identifier[value] = identifier[getattr] ( identifier[context] [ literal[string] ], identifier[context] [ literal[string] ]) keyword[return] identifier[_attr] ( identifier[value] ) keyword[return] identifier[model_getattr]
def model_getattr(): """ Creates a getter that will drop the current value and retrieve the model's attribute with the context key as name. """ def model_getattr(_value, context, **_params): value = getattr(context['model'], context['key']) return _attr(value) return model_getattr
def update(self): """ Update this provider. Should be invoked when the server gets updated. This method will notify all clients that wait for `self.next_revision_available`. """ with self.lock: # Increment revision and commit it. self.revision += 1 self.server.commit(self.revision + 1) # Unblock all waiting clients. self.next_revision_available.set() self.next_revision_available.clear() # Check sessions to see which revision can be removed. if self.sessions: lowest_revision = min( session.revision for session in self.sessions.itervalues()) # Remove all old revision history if lowest_revision == self.revision: self.server.clean(lowest_revision) # Invoke hooks invoke_hooks(self.hooks, "updated", self.revision)
def function[update, parameter[self]]: constant[ Update this provider. Should be invoked when the server gets updated. This method will notify all clients that wait for `self.next_revision_available`. ] with name[self].lock begin[:] <ast.AugAssign object at 0x7da20c9933a0> call[name[self].server.commit, parameter[binary_operation[name[self].revision + constant[1]]]] call[name[self].next_revision_available.set, parameter[]] call[name[self].next_revision_available.clear, parameter[]] if name[self].sessions begin[:] variable[lowest_revision] assign[=] call[name[min], parameter[<ast.GeneratorExp object at 0x7da18bc72620>]] if compare[name[lowest_revision] equal[==] name[self].revision] begin[:] call[name[self].server.clean, parameter[name[lowest_revision]]] call[name[invoke_hooks], parameter[name[self].hooks, constant[updated], name[self].revision]]
keyword[def] identifier[update] ( identifier[self] ): literal[string] keyword[with] identifier[self] . identifier[lock] : identifier[self] . identifier[revision] += literal[int] identifier[self] . identifier[server] . identifier[commit] ( identifier[self] . identifier[revision] + literal[int] ) identifier[self] . identifier[next_revision_available] . identifier[set] () identifier[self] . identifier[next_revision_available] . identifier[clear] () keyword[if] identifier[self] . identifier[sessions] : identifier[lowest_revision] = identifier[min] ( identifier[session] . identifier[revision] keyword[for] identifier[session] keyword[in] identifier[self] . identifier[sessions] . identifier[itervalues] ()) keyword[if] identifier[lowest_revision] == identifier[self] . identifier[revision] : identifier[self] . identifier[server] . identifier[clean] ( identifier[lowest_revision] ) identifier[invoke_hooks] ( identifier[self] . identifier[hooks] , literal[string] , identifier[self] . identifier[revision] )
def update(self): """ Update this provider. Should be invoked when the server gets updated. This method will notify all clients that wait for `self.next_revision_available`. """ with self.lock: # Increment revision and commit it. self.revision += 1 self.server.commit(self.revision + 1) # Unblock all waiting clients. self.next_revision_available.set() self.next_revision_available.clear() # Check sessions to see which revision can be removed. if self.sessions: lowest_revision = min((session.revision for session in self.sessions.itervalues())) # Remove all old revision history if lowest_revision == self.revision: self.server.clean(lowest_revision) # depends on [control=['if'], data=['lowest_revision']] # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]] # Invoke hooks invoke_hooks(self.hooks, 'updated', self.revision)
def get_grid_data(xall, yall, zall, nbins=100, method='nearest'): """Interpolate unstructured two-dimensional data. Parameters ---------- xall : ndarray(T) Sample x-coordinates. yall : ndarray(T) Sample y-coordinates. zall : ndarray(T) Sample z-coordinates. nbins : int, optional, default=100 Number of histogram bins used in x/y-dimensions. method : str, optional, default='nearest' Assignment method; scipy.interpolate.griddata supports the methods 'nearest', 'linear', and 'cubic'. Returns ------- x : ndarray(nbins, nbins) The bins' x-coordinates in meshgrid format. y : ndarray(nbins, nbins) The bins' y-coordinates in meshgrid format. z : ndarray(nbins, nbins) Interpolated z-data in meshgrid format. """ from scipy.interpolate import griddata x, y = _np.meshgrid( _np.linspace(xall.min(), xall.max(), nbins), _np.linspace(yall.min(), yall.max(), nbins), indexing='ij') z = griddata( _np.hstack([xall[:,None], yall[:,None]]), zall, (x, y), method=method) return x, y, z
def function[get_grid_data, parameter[xall, yall, zall, nbins, method]]: constant[Interpolate unstructured two-dimensional data. Parameters ---------- xall : ndarray(T) Sample x-coordinates. yall : ndarray(T) Sample y-coordinates. zall : ndarray(T) Sample z-coordinates. nbins : int, optional, default=100 Number of histogram bins used in x/y-dimensions. method : str, optional, default='nearest' Assignment method; scipy.interpolate.griddata supports the methods 'nearest', 'linear', and 'cubic'. Returns ------- x : ndarray(nbins, nbins) The bins' x-coordinates in meshgrid format. y : ndarray(nbins, nbins) The bins' y-coordinates in meshgrid format. z : ndarray(nbins, nbins) Interpolated z-data in meshgrid format. ] from relative_module[scipy.interpolate] import module[griddata] <ast.Tuple object at 0x7da1b078ea70> assign[=] call[name[_np].meshgrid, parameter[call[name[_np].linspace, parameter[call[name[xall].min, parameter[]], call[name[xall].max, parameter[]], name[nbins]]], call[name[_np].linspace, parameter[call[name[yall].min, parameter[]], call[name[yall].max, parameter[]], name[nbins]]]]] variable[z] assign[=] call[name[griddata], parameter[call[name[_np].hstack, parameter[list[[<ast.Subscript object at 0x7da1b078c280>, <ast.Subscript object at 0x7da1b078d9c0>]]]], name[zall], tuple[[<ast.Name object at 0x7da1b078e4d0>, <ast.Name object at 0x7da1b078d4e0>]]]] return[tuple[[<ast.Name object at 0x7da1b078d8a0>, <ast.Name object at 0x7da1b078fa60>, <ast.Name object at 0x7da1b078f8e0>]]]
keyword[def] identifier[get_grid_data] ( identifier[xall] , identifier[yall] , identifier[zall] , identifier[nbins] = literal[int] , identifier[method] = literal[string] ): literal[string] keyword[from] identifier[scipy] . identifier[interpolate] keyword[import] identifier[griddata] identifier[x] , identifier[y] = identifier[_np] . identifier[meshgrid] ( identifier[_np] . identifier[linspace] ( identifier[xall] . identifier[min] (), identifier[xall] . identifier[max] (), identifier[nbins] ), identifier[_np] . identifier[linspace] ( identifier[yall] . identifier[min] (), identifier[yall] . identifier[max] (), identifier[nbins] ), identifier[indexing] = literal[string] ) identifier[z] = identifier[griddata] ( identifier[_np] . identifier[hstack] ([ identifier[xall] [:, keyword[None] ], identifier[yall] [:, keyword[None] ]]), identifier[zall] ,( identifier[x] , identifier[y] ), identifier[method] = identifier[method] ) keyword[return] identifier[x] , identifier[y] , identifier[z]
def get_grid_data(xall, yall, zall, nbins=100, method='nearest'): """Interpolate unstructured two-dimensional data. Parameters ---------- xall : ndarray(T) Sample x-coordinates. yall : ndarray(T) Sample y-coordinates. zall : ndarray(T) Sample z-coordinates. nbins : int, optional, default=100 Number of histogram bins used in x/y-dimensions. method : str, optional, default='nearest' Assignment method; scipy.interpolate.griddata supports the methods 'nearest', 'linear', and 'cubic'. Returns ------- x : ndarray(nbins, nbins) The bins' x-coordinates in meshgrid format. y : ndarray(nbins, nbins) The bins' y-coordinates in meshgrid format. z : ndarray(nbins, nbins) Interpolated z-data in meshgrid format. """ from scipy.interpolate import griddata (x, y) = _np.meshgrid(_np.linspace(xall.min(), xall.max(), nbins), _np.linspace(yall.min(), yall.max(), nbins), indexing='ij') z = griddata(_np.hstack([xall[:, None], yall[:, None]]), zall, (x, y), method=method) return (x, y, z)
def get_neighbor_names(self, node_name: str, order: int = 1) -> list: """Get the names of all neighbors of a node, and the node itself. :param node_name: Node whose neighbor names are requested. :return: A list of names of all neighbors of a node, and the node itself. """ logger.info("In get_neighbor_names()") node = self.graph.vs.find(name=node_name) neighbors = self.graph.neighborhood(node, order=order) names = self.graph.vs[neighbors]["name"] names.append(node_name) return list(names)
def function[get_neighbor_names, parameter[self, node_name, order]]: constant[Get the names of all neighbors of a node, and the node itself. :param node_name: Node whose neighbor names are requested. :return: A list of names of all neighbors of a node, and the node itself. ] call[name[logger].info, parameter[constant[In get_neighbor_names()]]] variable[node] assign[=] call[name[self].graph.vs.find, parameter[]] variable[neighbors] assign[=] call[name[self].graph.neighborhood, parameter[name[node]]] variable[names] assign[=] call[call[name[self].graph.vs][name[neighbors]]][constant[name]] call[name[names].append, parameter[name[node_name]]] return[call[name[list], parameter[name[names]]]]
keyword[def] identifier[get_neighbor_names] ( identifier[self] , identifier[node_name] : identifier[str] , identifier[order] : identifier[int] = literal[int] )-> identifier[list] : literal[string] identifier[logger] . identifier[info] ( literal[string] ) identifier[node] = identifier[self] . identifier[graph] . identifier[vs] . identifier[find] ( identifier[name] = identifier[node_name] ) identifier[neighbors] = identifier[self] . identifier[graph] . identifier[neighborhood] ( identifier[node] , identifier[order] = identifier[order] ) identifier[names] = identifier[self] . identifier[graph] . identifier[vs] [ identifier[neighbors] ][ literal[string] ] identifier[names] . identifier[append] ( identifier[node_name] ) keyword[return] identifier[list] ( identifier[names] )
def get_neighbor_names(self, node_name: str, order: int=1) -> list: """Get the names of all neighbors of a node, and the node itself. :param node_name: Node whose neighbor names are requested. :return: A list of names of all neighbors of a node, and the node itself. """ logger.info('In get_neighbor_names()') node = self.graph.vs.find(name=node_name) neighbors = self.graph.neighborhood(node, order=order) names = self.graph.vs[neighbors]['name'] names.append(node_name) return list(names)
def plot4_nolog(self, num): """ Plots the abundances of H-1, He-4, C-12 and O-16. """ self.plot_prof_2(num,'H-1',0.,5.) self.plot_prof_2(num,'He-4',0.,5.) self.plot_prof_2(num,'C-12',0.,5.) self.plot_prof_2(num,'O-16',0.,5.) pyl.legend(loc=3)
def function[plot4_nolog, parameter[self, num]]: constant[ Plots the abundances of H-1, He-4, C-12 and O-16. ] call[name[self].plot_prof_2, parameter[name[num], constant[H-1], constant[0.0], constant[5.0]]] call[name[self].plot_prof_2, parameter[name[num], constant[He-4], constant[0.0], constant[5.0]]] call[name[self].plot_prof_2, parameter[name[num], constant[C-12], constant[0.0], constant[5.0]]] call[name[self].plot_prof_2, parameter[name[num], constant[O-16], constant[0.0], constant[5.0]]] call[name[pyl].legend, parameter[]]
keyword[def] identifier[plot4_nolog] ( identifier[self] , identifier[num] ): literal[string] identifier[self] . identifier[plot_prof_2] ( identifier[num] , literal[string] , literal[int] , literal[int] ) identifier[self] . identifier[plot_prof_2] ( identifier[num] , literal[string] , literal[int] , literal[int] ) identifier[self] . identifier[plot_prof_2] ( identifier[num] , literal[string] , literal[int] , literal[int] ) identifier[self] . identifier[plot_prof_2] ( identifier[num] , literal[string] , literal[int] , literal[int] ) identifier[pyl] . identifier[legend] ( identifier[loc] = literal[int] )
def plot4_nolog(self, num): """ Plots the abundances of H-1, He-4, C-12 and O-16. """ self.plot_prof_2(num, 'H-1', 0.0, 5.0) self.plot_prof_2(num, 'He-4', 0.0, 5.0) self.plot_prof_2(num, 'C-12', 0.0, 5.0) self.plot_prof_2(num, 'O-16', 0.0, 5.0) pyl.legend(loc=3)
def from_epw_file(cls, epwfile, timestep=1): """Create a wea object using the solar irradiance values in an epw file. Args: epwfile: Full path to epw weather file. timestep: An optional integer to set the number of time steps per hour. Default is 1 for one value per hour. Note that this input will only do a linear interpolation over the data in the EPW file. While such linear interpolations are suitable for most thermal simulations, where thermal lag "smooths over" the effect of momentary increases in solar energy, it is not recommended for daylight simulations, where momentary increases in solar energy can mean the difference between glare and visual comfort. """ is_leap_year = False # epw file is always for 8760 hours epw = EPW(epwfile) direct_normal, diffuse_horizontal = \ cls._get_data_collections(epw.direct_normal_radiation.values, epw.diffuse_horizontal_radiation.values, epw.metadata, 1, is_leap_year) if timestep != 1: print ("Note: timesteps greater than 1 on epw-generated Wea's \n" + "are suitable for thermal models but are not recommended \n" + "for daylight models.") # interpolate the data direct_normal = direct_normal.interpolate_to_timestep(timestep) diffuse_horizontal = diffuse_horizontal.interpolate_to_timestep(timestep) # create sunpath to check if the sun is up at a given timestep sp = Sunpath.from_location(epw.location) # add correct values to the emply data collection for i, dt in enumerate(cls._get_datetimes(timestep, is_leap_year)): # set irradiance values to 0 when the sun is not up sun = sp.calculate_sun_from_date_time(dt) if sun.altitude < 0: direct_normal[i] = 0 diffuse_horizontal[i] = 0 return cls(epw.location, direct_normal, diffuse_horizontal, timestep, is_leap_year)
def function[from_epw_file, parameter[cls, epwfile, timestep]]: constant[Create a wea object using the solar irradiance values in an epw file. Args: epwfile: Full path to epw weather file. timestep: An optional integer to set the number of time steps per hour. Default is 1 for one value per hour. Note that this input will only do a linear interpolation over the data in the EPW file. While such linear interpolations are suitable for most thermal simulations, where thermal lag "smooths over" the effect of momentary increases in solar energy, it is not recommended for daylight simulations, where momentary increases in solar energy can mean the difference between glare and visual comfort. ] variable[is_leap_year] assign[=] constant[False] variable[epw] assign[=] call[name[EPW], parameter[name[epwfile]]] <ast.Tuple object at 0x7da1b1274bb0> assign[=] call[name[cls]._get_data_collections, parameter[name[epw].direct_normal_radiation.values, name[epw].diffuse_horizontal_radiation.values, name[epw].metadata, constant[1], name[is_leap_year]]] if compare[name[timestep] not_equal[!=] constant[1]] begin[:] call[name[print], parameter[binary_operation[binary_operation[constant[Note: timesteps greater than 1 on epw-generated Wea's ] + constant[are suitable for thermal models but are not recommended ]] + constant[for daylight models.]]]] variable[direct_normal] assign[=] call[name[direct_normal].interpolate_to_timestep, parameter[name[timestep]]] variable[diffuse_horizontal] assign[=] call[name[diffuse_horizontal].interpolate_to_timestep, parameter[name[timestep]]] variable[sp] assign[=] call[name[Sunpath].from_location, parameter[name[epw].location]] for taget[tuple[[<ast.Name object at 0x7da1b1277d30>, <ast.Name object at 0x7da1b1275840>]]] in starred[call[name[enumerate], parameter[call[name[cls]._get_datetimes, parameter[name[timestep], name[is_leap_year]]]]]] begin[:] variable[sun] assign[=] call[name[sp].calculate_sun_from_date_time, parameter[name[dt]]] if compare[name[sun].altitude less[<] constant[0]] begin[:] call[name[direct_normal]][name[i]] assign[=] constant[0] call[name[diffuse_horizontal]][name[i]] assign[=] constant[0] return[call[name[cls], parameter[name[epw].location, name[direct_normal], name[diffuse_horizontal], name[timestep], name[is_leap_year]]]]
keyword[def] identifier[from_epw_file] ( identifier[cls] , identifier[epwfile] , identifier[timestep] = literal[int] ): literal[string] identifier[is_leap_year] = keyword[False] identifier[epw] = identifier[EPW] ( identifier[epwfile] ) identifier[direct_normal] , identifier[diffuse_horizontal] = identifier[cls] . identifier[_get_data_collections] ( identifier[epw] . identifier[direct_normal_radiation] . identifier[values] , identifier[epw] . identifier[diffuse_horizontal_radiation] . identifier[values] , identifier[epw] . identifier[metadata] , literal[int] , identifier[is_leap_year] ) keyword[if] identifier[timestep] != literal[int] : identifier[print] ( literal[string] + literal[string] + literal[string] ) identifier[direct_normal] = identifier[direct_normal] . identifier[interpolate_to_timestep] ( identifier[timestep] ) identifier[diffuse_horizontal] = identifier[diffuse_horizontal] . identifier[interpolate_to_timestep] ( identifier[timestep] ) identifier[sp] = identifier[Sunpath] . identifier[from_location] ( identifier[epw] . identifier[location] ) keyword[for] identifier[i] , identifier[dt] keyword[in] identifier[enumerate] ( identifier[cls] . identifier[_get_datetimes] ( identifier[timestep] , identifier[is_leap_year] )): identifier[sun] = identifier[sp] . identifier[calculate_sun_from_date_time] ( identifier[dt] ) keyword[if] identifier[sun] . identifier[altitude] < literal[int] : identifier[direct_normal] [ identifier[i] ]= literal[int] identifier[diffuse_horizontal] [ identifier[i] ]= literal[int] keyword[return] identifier[cls] ( identifier[epw] . identifier[location] , identifier[direct_normal] , identifier[diffuse_horizontal] , identifier[timestep] , identifier[is_leap_year] )
def from_epw_file(cls, epwfile, timestep=1): """Create a wea object using the solar irradiance values in an epw file. Args: epwfile: Full path to epw weather file. timestep: An optional integer to set the number of time steps per hour. Default is 1 for one value per hour. Note that this input will only do a linear interpolation over the data in the EPW file. While such linear interpolations are suitable for most thermal simulations, where thermal lag "smooths over" the effect of momentary increases in solar energy, it is not recommended for daylight simulations, where momentary increases in solar energy can mean the difference between glare and visual comfort. """ is_leap_year = False # epw file is always for 8760 hours epw = EPW(epwfile) (direct_normal, diffuse_horizontal) = cls._get_data_collections(epw.direct_normal_radiation.values, epw.diffuse_horizontal_radiation.values, epw.metadata, 1, is_leap_year) if timestep != 1: print("Note: timesteps greater than 1 on epw-generated Wea's \n" + 'are suitable for thermal models but are not recommended \n' + 'for daylight models.') # interpolate the data direct_normal = direct_normal.interpolate_to_timestep(timestep) diffuse_horizontal = diffuse_horizontal.interpolate_to_timestep(timestep) # create sunpath to check if the sun is up at a given timestep sp = Sunpath.from_location(epw.location) # add correct values to the emply data collection for (i, dt) in enumerate(cls._get_datetimes(timestep, is_leap_year)): # set irradiance values to 0 when the sun is not up sun = sp.calculate_sun_from_date_time(dt) if sun.altitude < 0: direct_normal[i] = 0 diffuse_horizontal[i] = 0 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['timestep']] return cls(epw.location, direct_normal, diffuse_horizontal, timestep, is_leap_year)
def p_use_declaration(p): '''use_declaration : namespace_name | NS_SEPARATOR namespace_name | namespace_name AS STRING | NS_SEPARATOR namespace_name AS STRING''' if len(p) == 2: p[0] = ast.UseDeclaration(p[1], None, lineno=p.lineno(1)) elif len(p) == 3: p[0] = ast.UseDeclaration(p[1] + p[2], None, lineno=p.lineno(1)) elif len(p) == 4: p[0] = ast.UseDeclaration(p[1], p[3], lineno=p.lineno(2)) else: p[0] = ast.UseDeclaration(p[1] + p[2], p[4], lineno=p.lineno(1))
def function[p_use_declaration, parameter[p]]: constant[use_declaration : namespace_name | NS_SEPARATOR namespace_name | namespace_name AS STRING | NS_SEPARATOR namespace_name AS STRING] if compare[call[name[len], parameter[name[p]]] equal[==] constant[2]] begin[:] call[name[p]][constant[0]] assign[=] call[name[ast].UseDeclaration, parameter[call[name[p]][constant[1]], constant[None]]]
keyword[def] identifier[p_use_declaration] ( identifier[p] ): literal[string] keyword[if] identifier[len] ( identifier[p] )== literal[int] : identifier[p] [ literal[int] ]= identifier[ast] . identifier[UseDeclaration] ( identifier[p] [ literal[int] ], keyword[None] , identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] )) keyword[elif] identifier[len] ( identifier[p] )== literal[int] : identifier[p] [ literal[int] ]= identifier[ast] . identifier[UseDeclaration] ( identifier[p] [ literal[int] ]+ identifier[p] [ literal[int] ], keyword[None] , identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] )) keyword[elif] identifier[len] ( identifier[p] )== literal[int] : identifier[p] [ literal[int] ]= identifier[ast] . identifier[UseDeclaration] ( identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] )) keyword[else] : identifier[p] [ literal[int] ]= identifier[ast] . identifier[UseDeclaration] ( identifier[p] [ literal[int] ]+ identifier[p] [ literal[int] ], identifier[p] [ literal[int] ], identifier[lineno] = identifier[p] . identifier[lineno] ( literal[int] ))
def p_use_declaration(p): """use_declaration : namespace_name | NS_SEPARATOR namespace_name | namespace_name AS STRING | NS_SEPARATOR namespace_name AS STRING""" if len(p) == 2: p[0] = ast.UseDeclaration(p[1], None, lineno=p.lineno(1)) # depends on [control=['if'], data=[]] elif len(p) == 3: p[0] = ast.UseDeclaration(p[1] + p[2], None, lineno=p.lineno(1)) # depends on [control=['if'], data=[]] elif len(p) == 4: p[0] = ast.UseDeclaration(p[1], p[3], lineno=p.lineno(2)) # depends on [control=['if'], data=[]] else: p[0] = ast.UseDeclaration(p[1] + p[2], p[4], lineno=p.lineno(1))
def MOVHPD(cpu, dest, src): """ Moves high packed double-precision floating-point value. Moves a double-precision floating-point value from the source operand (second operand) and the destination operand (first operand). The source and destination operands can be an XMM register or a 64-bit memory location. This instruction allows double-precision floating-point values to be moved to and from the high quadword of an XMM register and memory. It cannot be used for register to register or memory to memory moves. When the destination operand is an XMM register, the low quadword of the register remains unchanged. :param cpu: current CPU. :param dest: destination operand. :param src: source operand. """ if src.size == 128: assert dest.size == 64 dest.write(Operators.EXTRACT(src.read(), 64, 64)) else: assert src.size == 64 and dest.size == 128 value = Operators.EXTRACT(dest.read(), 0, 64) # low part dest.write(Operators.CONCAT(128, src.read(), value))
def function[MOVHPD, parameter[cpu, dest, src]]: constant[ Moves high packed double-precision floating-point value. Moves a double-precision floating-point value from the source operand (second operand) and the destination operand (first operand). The source and destination operands can be an XMM register or a 64-bit memory location. This instruction allows double-precision floating-point values to be moved to and from the high quadword of an XMM register and memory. It cannot be used for register to register or memory to memory moves. When the destination operand is an XMM register, the low quadword of the register remains unchanged. :param cpu: current CPU. :param dest: destination operand. :param src: source operand. ] if compare[name[src].size equal[==] constant[128]] begin[:] assert[compare[name[dest].size equal[==] constant[64]]] call[name[dest].write, parameter[call[name[Operators].EXTRACT, parameter[call[name[src].read, parameter[]], constant[64], constant[64]]]]]
keyword[def] identifier[MOVHPD] ( identifier[cpu] , identifier[dest] , identifier[src] ): literal[string] keyword[if] identifier[src] . identifier[size] == literal[int] : keyword[assert] identifier[dest] . identifier[size] == literal[int] identifier[dest] . identifier[write] ( identifier[Operators] . identifier[EXTRACT] ( identifier[src] . identifier[read] (), literal[int] , literal[int] )) keyword[else] : keyword[assert] identifier[src] . identifier[size] == literal[int] keyword[and] identifier[dest] . identifier[size] == literal[int] identifier[value] = identifier[Operators] . identifier[EXTRACT] ( identifier[dest] . identifier[read] (), literal[int] , literal[int] ) identifier[dest] . identifier[write] ( identifier[Operators] . identifier[CONCAT] ( literal[int] , identifier[src] . identifier[read] (), identifier[value] ))
def MOVHPD(cpu, dest, src): """ Moves high packed double-precision floating-point value. Moves a double-precision floating-point value from the source operand (second operand) and the destination operand (first operand). The source and destination operands can be an XMM register or a 64-bit memory location. This instruction allows double-precision floating-point values to be moved to and from the high quadword of an XMM register and memory. It cannot be used for register to register or memory to memory moves. When the destination operand is an XMM register, the low quadword of the register remains unchanged. :param cpu: current CPU. :param dest: destination operand. :param src: source operand. """ if src.size == 128: assert dest.size == 64 dest.write(Operators.EXTRACT(src.read(), 64, 64)) # depends on [control=['if'], data=[]] else: assert src.size == 64 and dest.size == 128 value = Operators.EXTRACT(dest.read(), 0, 64) # low part dest.write(Operators.CONCAT(128, src.read(), value))
def getdatastrs(self): """Retrieve the dataset standard string attributes. Args:: no argument Returns:: 4-element tuple holding: - dataset label string (attribute 'long_name') - dataset unit (attribute 'units') - dataset output format (attribute 'format') - dataset coordinate system (attribute 'coordsys') The values returned by 'getdatastrs' are part of the so-called "standard" SDS attributes. Those 4 values correspond respectively to the following attributes:: long_name, units, format, coordsys . C library equivalent: SDgetdatastrs """ status, label, unit, format, coord_system = \ _C.SDgetdatastrs(self._id, 128) _checkErr('getdatastrs', status, 'cannot execute') return label, unit, format, coord_system
def function[getdatastrs, parameter[self]]: constant[Retrieve the dataset standard string attributes. Args:: no argument Returns:: 4-element tuple holding: - dataset label string (attribute 'long_name') - dataset unit (attribute 'units') - dataset output format (attribute 'format') - dataset coordinate system (attribute 'coordsys') The values returned by 'getdatastrs' are part of the so-called "standard" SDS attributes. Those 4 values correspond respectively to the following attributes:: long_name, units, format, coordsys . C library equivalent: SDgetdatastrs ] <ast.Tuple object at 0x7da1b12955d0> assign[=] call[name[_C].SDgetdatastrs, parameter[name[self]._id, constant[128]]] call[name[_checkErr], parameter[constant[getdatastrs], name[status], constant[cannot execute]]] return[tuple[[<ast.Name object at 0x7da1b1297340>, <ast.Name object at 0x7da1b12950c0>, <ast.Name object at 0x7da1b1296f80>, <ast.Name object at 0x7da1b1295750>]]]
keyword[def] identifier[getdatastrs] ( identifier[self] ): literal[string] identifier[status] , identifier[label] , identifier[unit] , identifier[format] , identifier[coord_system] = identifier[_C] . identifier[SDgetdatastrs] ( identifier[self] . identifier[_id] , literal[int] ) identifier[_checkErr] ( literal[string] , identifier[status] , literal[string] ) keyword[return] identifier[label] , identifier[unit] , identifier[format] , identifier[coord_system]
def getdatastrs(self): """Retrieve the dataset standard string attributes. Args:: no argument Returns:: 4-element tuple holding: - dataset label string (attribute 'long_name') - dataset unit (attribute 'units') - dataset output format (attribute 'format') - dataset coordinate system (attribute 'coordsys') The values returned by 'getdatastrs' are part of the so-called "standard" SDS attributes. Those 4 values correspond respectively to the following attributes:: long_name, units, format, coordsys . C library equivalent: SDgetdatastrs """ (status, label, unit, format, coord_system) = _C.SDgetdatastrs(self._id, 128) _checkErr('getdatastrs', status, 'cannot execute') return (label, unit, format, coord_system)
def fileupdate(self, data): """Method to update extra metadata fields with dict obtained through `fileinfo`""" self.name = data["name"] add = self.__additional add["filetype"] = "other" for filetype in ("book", "image", "video", "audio", "archive"): if filetype in data: add["filetype"] = filetype break if add["filetype"] in ("image", "video", "audio"): add["thumb"] = data.get("thumb", dict()) # checksum is md5 add["checksum"] = data["checksum"] add["expire_time"] = data["expires"] / 1000 add["size"] = data["size"] add["info"] = data.get(add["filetype"], dict()) add["uploader"] = data["user"] if self.room.admin: add["info"].update({"room": data.get("room")}) add["info"].update({"uploader_ip": data.get("uploader_ip")}) self.updated = True
def function[fileupdate, parameter[self, data]]: constant[Method to update extra metadata fields with dict obtained through `fileinfo`] name[self].name assign[=] call[name[data]][constant[name]] variable[add] assign[=] name[self].__additional call[name[add]][constant[filetype]] assign[=] constant[other] for taget[name[filetype]] in starred[tuple[[<ast.Constant object at 0x7da18eb571c0>, <ast.Constant object at 0x7da18eb55960>, <ast.Constant object at 0x7da18eb56e90>, <ast.Constant object at 0x7da18eb57220>, <ast.Constant object at 0x7da18eb555d0>]]] begin[:] if compare[name[filetype] in name[data]] begin[:] call[name[add]][constant[filetype]] assign[=] name[filetype] break if compare[call[name[add]][constant[filetype]] in tuple[[<ast.Constant object at 0x7da18eb571f0>, <ast.Constant object at 0x7da18eb578e0>, <ast.Constant object at 0x7da18eb57d90>]]] begin[:] call[name[add]][constant[thumb]] assign[=] call[name[data].get, parameter[constant[thumb], call[name[dict], parameter[]]]] call[name[add]][constant[checksum]] assign[=] call[name[data]][constant[checksum]] call[name[add]][constant[expire_time]] assign[=] binary_operation[call[name[data]][constant[expires]] / constant[1000]] call[name[add]][constant[size]] assign[=] call[name[data]][constant[size]] call[name[add]][constant[info]] assign[=] call[name[data].get, parameter[call[name[add]][constant[filetype]], call[name[dict], parameter[]]]] call[name[add]][constant[uploader]] assign[=] call[name[data]][constant[user]] if name[self].room.admin begin[:] call[call[name[add]][constant[info]].update, parameter[dictionary[[<ast.Constant object at 0x7da18eb543a0>], [<ast.Call object at 0x7da18eb542b0>]]]] call[call[name[add]][constant[info]].update, parameter[dictionary[[<ast.Constant object at 0x7da18eb57ac0>], [<ast.Call object at 0x7da18eb542e0>]]]] name[self].updated assign[=] constant[True]
keyword[def] identifier[fileupdate] ( identifier[self] , identifier[data] ): literal[string] identifier[self] . identifier[name] = identifier[data] [ literal[string] ] identifier[add] = identifier[self] . identifier[__additional] identifier[add] [ literal[string] ]= literal[string] keyword[for] identifier[filetype] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ): keyword[if] identifier[filetype] keyword[in] identifier[data] : identifier[add] [ literal[string] ]= identifier[filetype] keyword[break] keyword[if] identifier[add] [ literal[string] ] keyword[in] ( literal[string] , literal[string] , literal[string] ): identifier[add] [ literal[string] ]= identifier[data] . identifier[get] ( literal[string] , identifier[dict] ()) identifier[add] [ literal[string] ]= identifier[data] [ literal[string] ] identifier[add] [ literal[string] ]= identifier[data] [ literal[string] ]/ literal[int] identifier[add] [ literal[string] ]= identifier[data] [ literal[string] ] identifier[add] [ literal[string] ]= identifier[data] . identifier[get] ( identifier[add] [ literal[string] ], identifier[dict] ()) identifier[add] [ literal[string] ]= identifier[data] [ literal[string] ] keyword[if] identifier[self] . identifier[room] . identifier[admin] : identifier[add] [ literal[string] ]. identifier[update] ({ literal[string] : identifier[data] . identifier[get] ( literal[string] )}) identifier[add] [ literal[string] ]. identifier[update] ({ literal[string] : identifier[data] . identifier[get] ( literal[string] )}) identifier[self] . identifier[updated] = keyword[True]
def fileupdate(self, data): """Method to update extra metadata fields with dict obtained through `fileinfo`""" self.name = data['name'] add = self.__additional add['filetype'] = 'other' for filetype in ('book', 'image', 'video', 'audio', 'archive'): if filetype in data: add['filetype'] = filetype break # depends on [control=['if'], data=['filetype']] # depends on [control=['for'], data=['filetype']] if add['filetype'] in ('image', 'video', 'audio'): add['thumb'] = data.get('thumb', dict()) # depends on [control=['if'], data=[]] # checksum is md5 add['checksum'] = data['checksum'] add['expire_time'] = data['expires'] / 1000 add['size'] = data['size'] add['info'] = data.get(add['filetype'], dict()) add['uploader'] = data['user'] if self.room.admin: add['info'].update({'room': data.get('room')}) add['info'].update({'uploader_ip': data.get('uploader_ip')}) # depends on [control=['if'], data=[]] self.updated = True
def qos_queue_scheduler_strict_priority_priority_number(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos") queue = ET.SubElement(qos, "queue") scheduler = ET.SubElement(queue, "scheduler") strict_priority = ET.SubElement(scheduler, "strict-priority") priority_number = ET.SubElement(strict_priority, "priority-number") priority_number.text = kwargs.pop('priority_number') callback = kwargs.pop('callback', self._callback) return callback(config)
def function[qos_queue_scheduler_strict_priority_priority_number, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[qos] assign[=] call[name[ET].SubElement, parameter[name[config], constant[qos]]] variable[queue] assign[=] call[name[ET].SubElement, parameter[name[qos], constant[queue]]] variable[scheduler] assign[=] call[name[ET].SubElement, parameter[name[queue], constant[scheduler]]] variable[strict_priority] assign[=] call[name[ET].SubElement, parameter[name[scheduler], constant[strict-priority]]] variable[priority_number] assign[=] call[name[ET].SubElement, parameter[name[strict_priority], constant[priority-number]]] name[priority_number].text assign[=] call[name[kwargs].pop, parameter[constant[priority_number]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[qos_queue_scheduler_strict_priority_priority_number] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[qos] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] ) identifier[queue] = identifier[ET] . identifier[SubElement] ( identifier[qos] , literal[string] ) identifier[scheduler] = identifier[ET] . identifier[SubElement] ( identifier[queue] , literal[string] ) identifier[strict_priority] = identifier[ET] . identifier[SubElement] ( identifier[scheduler] , literal[string] ) identifier[priority_number] = identifier[ET] . identifier[SubElement] ( identifier[strict_priority] , literal[string] ) identifier[priority_number] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def qos_queue_scheduler_strict_priority_priority_number(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') qos = ET.SubElement(config, 'qos', xmlns='urn:brocade.com:mgmt:brocade-qos') queue = ET.SubElement(qos, 'queue') scheduler = ET.SubElement(queue, 'scheduler') strict_priority = ET.SubElement(scheduler, 'strict-priority') priority_number = ET.SubElement(strict_priority, 'priority-number') priority_number.text = kwargs.pop('priority_number') callback = kwargs.pop('callback', self._callback) return callback(config)
def query_segdb(cls, flags, *args, **kwargs): """Query the inital LIGO segment database for a list of flags. Parameters ---------- flags : `iterable` A list of flag names for which to query. *args Either, two `float`-like numbers indicating the GPS [start, stop) interval, or a `SegmentList` defining a number of summary segments. url : `str`, optional URL of the segment database, defaults to ``$DEFAULT_SEGMENT_SERVER`` environment variable, or ``'https://segments.ligo.org'`` Returns ------- flagdict : `DataQualityDict` An ordered `DataQualityDict` of (name, `DataQualityFlag`) pairs. """ warnings.warn("query_segdb is deprecated and will be removed in a " "future release", DeprecationWarning) # parse segments qsegs = _parse_query_segments(args, cls.query_segdb) url = kwargs.pop('url', DEFAULT_SEGMENT_SERVER) if kwargs.pop('on_error', None) is not None: warnings.warn("DataQualityDict.query_segdb doesn't accept the " "on_error keyword argument") if kwargs.keys(): raise TypeError("DataQualityDict.query_segdb has no keyword " "argument '%s'" % list(kwargs.keys()[0])) # process query from glue.segmentdb import (segmentdb_utils as segdb_utils, query_engine as segdb_engine) connection = segdb_utils.setup_database(url) engine = segdb_engine.LdbdQueryEngine(connection) segdefs = [] for flag in flags: dqflag = DataQualityFlag(name=flag) ifo = dqflag.ifo name = dqflag.tag if dqflag.version is None: vers = '*' else: vers = dqflag.version for gpsstart, gpsend in qsegs: if float(gpsend) == +inf: gpsend = to_gps('now').seconds gpsstart = float(gpsstart) if not gpsstart.is_integer(): raise ValueError("Segment database queries can only" "operate on integer GPS times") gpsend = float(gpsend) if not gpsend.is_integer(): raise ValueError("Segment database queries can only" "operate on integer GPS times") segdefs += segdb_utils.expand_version_number( engine, (ifo, name, vers, gpsstart, gpsend, 0, 0)) segs = segdb_utils.query_segments(engine, 'segment', segdefs) segsum = segdb_utils.query_segments(engine, 'segment_summary', segdefs) # build output out = cls() for definition, segments, summary in zip(segdefs, segs, segsum): # parse flag name flag = ':'.join(map(str, definition[:3])) name = flag.rsplit(':', 1)[0] # if versionless if flag.endswith('*'): flag = name key = name # if asked for versionless, but returned a version elif flag not in flags and name in flags: key = name # other else: key = flag # define flag if key not in out: out[key] = DataQualityFlag(name=flag) # add segments out[key].known.extend(summary) out[key].active.extend(segments) return out
def function[query_segdb, parameter[cls, flags]]: constant[Query the inital LIGO segment database for a list of flags. Parameters ---------- flags : `iterable` A list of flag names for which to query. *args Either, two `float`-like numbers indicating the GPS [start, stop) interval, or a `SegmentList` defining a number of summary segments. url : `str`, optional URL of the segment database, defaults to ``$DEFAULT_SEGMENT_SERVER`` environment variable, or ``'https://segments.ligo.org'`` Returns ------- flagdict : `DataQualityDict` An ordered `DataQualityDict` of (name, `DataQualityFlag`) pairs. ] call[name[warnings].warn, parameter[constant[query_segdb is deprecated and will be removed in a future release], name[DeprecationWarning]]] variable[qsegs] assign[=] call[name[_parse_query_segments], parameter[name[args], name[cls].query_segdb]] variable[url] assign[=] call[name[kwargs].pop, parameter[constant[url], name[DEFAULT_SEGMENT_SERVER]]] if compare[call[name[kwargs].pop, parameter[constant[on_error], constant[None]]] is_not constant[None]] begin[:] call[name[warnings].warn, parameter[constant[DataQualityDict.query_segdb doesn't accept the on_error keyword argument]]] if call[name[kwargs].keys, parameter[]] begin[:] <ast.Raise object at 0x7da20e9b31f0> from relative_module[glue.segmentdb] import module[segmentdb_utils], module[query_engine] variable[connection] assign[=] call[name[segdb_utils].setup_database, parameter[name[url]]] variable[engine] assign[=] call[name[segdb_engine].LdbdQueryEngine, parameter[name[connection]]] variable[segdefs] assign[=] list[[]] for taget[name[flag]] in starred[name[flags]] begin[:] variable[dqflag] assign[=] call[name[DataQualityFlag], parameter[]] variable[ifo] assign[=] name[dqflag].ifo variable[name] assign[=] name[dqflag].tag if compare[name[dqflag].version is constant[None]] begin[:] variable[vers] assign[=] constant[*] for taget[tuple[[<ast.Name object at 0x7da20e954e50>, <ast.Name object at 0x7da20e955240>]]] in starred[name[qsegs]] begin[:] if compare[call[name[float], parameter[name[gpsend]]] equal[==] <ast.UnaryOp object at 0x7da20e954c70>] begin[:] variable[gpsend] assign[=] call[name[to_gps], parameter[constant[now]]].seconds variable[gpsstart] assign[=] call[name[float], parameter[name[gpsstart]]] if <ast.UnaryOp object at 0x7da20e9638e0> begin[:] <ast.Raise object at 0x7da20e962890> variable[gpsend] assign[=] call[name[float], parameter[name[gpsend]]] if <ast.UnaryOp object at 0x7da1b060a620> begin[:] <ast.Raise object at 0x7da1b060a200> <ast.AugAssign object at 0x7da1b0608dc0> variable[segs] assign[=] call[name[segdb_utils].query_segments, parameter[name[engine], constant[segment], name[segdefs]]] variable[segsum] assign[=] call[name[segdb_utils].query_segments, parameter[name[engine], constant[segment_summary], name[segdefs]]] variable[out] assign[=] call[name[cls], parameter[]] for taget[tuple[[<ast.Name object at 0x7da1b0608f40>, <ast.Name object at 0x7da1b0608b50>, <ast.Name object at 0x7da1b06083d0>]]] in starred[call[name[zip], parameter[name[segdefs], name[segs], name[segsum]]]] begin[:] variable[flag] assign[=] call[constant[:].join, parameter[call[name[map], parameter[name[str], call[name[definition]][<ast.Slice object at 0x7da1b0609060>]]]]] variable[name] assign[=] call[call[name[flag].rsplit, parameter[constant[:], constant[1]]]][constant[0]] if call[name[flag].endswith, parameter[constant[*]]] begin[:] variable[flag] assign[=] name[name] variable[key] assign[=] name[name] if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[out]] begin[:] call[name[out]][name[key]] assign[=] call[name[DataQualityFlag], parameter[]] call[call[name[out]][name[key]].known.extend, parameter[name[summary]]] call[call[name[out]][name[key]].active.extend, parameter[name[segments]]] return[name[out]]
keyword[def] identifier[query_segdb] ( identifier[cls] , identifier[flags] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[warnings] . identifier[warn] ( literal[string] literal[string] , identifier[DeprecationWarning] ) identifier[qsegs] = identifier[_parse_query_segments] ( identifier[args] , identifier[cls] . identifier[query_segdb] ) identifier[url] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[DEFAULT_SEGMENT_SERVER] ) keyword[if] identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] ) keyword[is] keyword[not] keyword[None] : identifier[warnings] . identifier[warn] ( literal[string] literal[string] ) keyword[if] identifier[kwargs] . identifier[keys] (): keyword[raise] identifier[TypeError] ( literal[string] literal[string] % identifier[list] ( identifier[kwargs] . identifier[keys] ()[ literal[int] ])) keyword[from] identifier[glue] . identifier[segmentdb] keyword[import] ( identifier[segmentdb_utils] keyword[as] identifier[segdb_utils] , identifier[query_engine] keyword[as] identifier[segdb_engine] ) identifier[connection] = identifier[segdb_utils] . identifier[setup_database] ( identifier[url] ) identifier[engine] = identifier[segdb_engine] . identifier[LdbdQueryEngine] ( identifier[connection] ) identifier[segdefs] =[] keyword[for] identifier[flag] keyword[in] identifier[flags] : identifier[dqflag] = identifier[DataQualityFlag] ( identifier[name] = identifier[flag] ) identifier[ifo] = identifier[dqflag] . identifier[ifo] identifier[name] = identifier[dqflag] . identifier[tag] keyword[if] identifier[dqflag] . identifier[version] keyword[is] keyword[None] : identifier[vers] = literal[string] keyword[else] : identifier[vers] = identifier[dqflag] . identifier[version] keyword[for] identifier[gpsstart] , identifier[gpsend] keyword[in] identifier[qsegs] : keyword[if] identifier[float] ( identifier[gpsend] )==+ identifier[inf] : identifier[gpsend] = identifier[to_gps] ( literal[string] ). identifier[seconds] identifier[gpsstart] = identifier[float] ( identifier[gpsstart] ) keyword[if] keyword[not] identifier[gpsstart] . identifier[is_integer] (): keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) identifier[gpsend] = identifier[float] ( identifier[gpsend] ) keyword[if] keyword[not] identifier[gpsend] . identifier[is_integer] (): keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) identifier[segdefs] += identifier[segdb_utils] . identifier[expand_version_number] ( identifier[engine] ,( identifier[ifo] , identifier[name] , identifier[vers] , identifier[gpsstart] , identifier[gpsend] , literal[int] , literal[int] )) identifier[segs] = identifier[segdb_utils] . identifier[query_segments] ( identifier[engine] , literal[string] , identifier[segdefs] ) identifier[segsum] = identifier[segdb_utils] . identifier[query_segments] ( identifier[engine] , literal[string] , identifier[segdefs] ) identifier[out] = identifier[cls] () keyword[for] identifier[definition] , identifier[segments] , identifier[summary] keyword[in] identifier[zip] ( identifier[segdefs] , identifier[segs] , identifier[segsum] ): identifier[flag] = literal[string] . identifier[join] ( identifier[map] ( identifier[str] , identifier[definition] [: literal[int] ])) identifier[name] = identifier[flag] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ] keyword[if] identifier[flag] . identifier[endswith] ( literal[string] ): identifier[flag] = identifier[name] identifier[key] = identifier[name] keyword[elif] identifier[flag] keyword[not] keyword[in] identifier[flags] keyword[and] identifier[name] keyword[in] identifier[flags] : identifier[key] = identifier[name] keyword[else] : identifier[key] = identifier[flag] keyword[if] identifier[key] keyword[not] keyword[in] identifier[out] : identifier[out] [ identifier[key] ]= identifier[DataQualityFlag] ( identifier[name] = identifier[flag] ) identifier[out] [ identifier[key] ]. identifier[known] . identifier[extend] ( identifier[summary] ) identifier[out] [ identifier[key] ]. identifier[active] . identifier[extend] ( identifier[segments] ) keyword[return] identifier[out]
def query_segdb(cls, flags, *args, **kwargs): """Query the inital LIGO segment database for a list of flags. Parameters ---------- flags : `iterable` A list of flag names for which to query. *args Either, two `float`-like numbers indicating the GPS [start, stop) interval, or a `SegmentList` defining a number of summary segments. url : `str`, optional URL of the segment database, defaults to ``$DEFAULT_SEGMENT_SERVER`` environment variable, or ``'https://segments.ligo.org'`` Returns ------- flagdict : `DataQualityDict` An ordered `DataQualityDict` of (name, `DataQualityFlag`) pairs. """ warnings.warn('query_segdb is deprecated and will be removed in a future release', DeprecationWarning) # parse segments qsegs = _parse_query_segments(args, cls.query_segdb) url = kwargs.pop('url', DEFAULT_SEGMENT_SERVER) if kwargs.pop('on_error', None) is not None: warnings.warn("DataQualityDict.query_segdb doesn't accept the on_error keyword argument") # depends on [control=['if'], data=[]] if kwargs.keys(): raise TypeError("DataQualityDict.query_segdb has no keyword argument '%s'" % list(kwargs.keys()[0])) # depends on [control=['if'], data=[]] # process query from glue.segmentdb import segmentdb_utils as segdb_utils, query_engine as segdb_engine connection = segdb_utils.setup_database(url) engine = segdb_engine.LdbdQueryEngine(connection) segdefs = [] for flag in flags: dqflag = DataQualityFlag(name=flag) ifo = dqflag.ifo name = dqflag.tag if dqflag.version is None: vers = '*' # depends on [control=['if'], data=[]] else: vers = dqflag.version for (gpsstart, gpsend) in qsegs: if float(gpsend) == +inf: gpsend = to_gps('now').seconds # depends on [control=['if'], data=[]] gpsstart = float(gpsstart) if not gpsstart.is_integer(): raise ValueError('Segment database queries can onlyoperate on integer GPS times') # depends on [control=['if'], data=[]] gpsend = float(gpsend) if not gpsend.is_integer(): raise ValueError('Segment database queries can onlyoperate on integer GPS times') # depends on [control=['if'], data=[]] segdefs += segdb_utils.expand_version_number(engine, (ifo, name, vers, gpsstart, gpsend, 0, 0)) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['flag']] segs = segdb_utils.query_segments(engine, 'segment', segdefs) segsum = segdb_utils.query_segments(engine, 'segment_summary', segdefs) # build output out = cls() for (definition, segments, summary) in zip(segdefs, segs, segsum): # parse flag name flag = ':'.join(map(str, definition[:3])) name = flag.rsplit(':', 1)[0] # if versionless if flag.endswith('*'): flag = name key = name # depends on [control=['if'], data=[]] # if asked for versionless, but returned a version elif flag not in flags and name in flags: key = name # depends on [control=['if'], data=[]] else: # other key = flag # define flag if key not in out: out[key] = DataQualityFlag(name=flag) # depends on [control=['if'], data=['key', 'out']] # add segments out[key].known.extend(summary) out[key].active.extend(segments) # depends on [control=['for'], data=[]] return out
def _find_players(self, year): """ Find all player IDs for the requested team. For the requested team and year (if applicable), pull the roster table and parse the player ID for all players on the roster and create an instance of the Player class for the player. All player instances are added to the 'players' property to get all stats for all players on a team. Parameters ---------- year : string The 4-digit string representing the year to pull the team's roster from. """ if not year: year = utils._find_year_for_season('mlb') url = self._create_url(year) page = self._pull_team_page(url) if not page: output = ("Can't pull requested team page. Ensure the following " "URL exists: %s" % url) raise ValueError(output) players = page('table#team_batting tbody tr').items() players_parsed = [] for player in players: if 'class="thead"' in str(player): continue player_id = self._get_id(player) if self._slim: name = self._get_name(player) self._players[player_id] = name else: player_instance = Player(player_id) self._players.append(player_instance) players_parsed.append(player_id) for player in page('table#team_pitching tbody tr').items(): if 'class="thead"' in str(player): continue player_id = self._get_id(player) # Skip players that showup in both batting and pitching tables, as # is often the case with National League pitchers. if player_id in players_parsed: continue if self._slim: name = self._get_name(player) self._players[player_id] = name else: player_instance = Player(player_id) self._players.append(player_instance)
def function[_find_players, parameter[self, year]]: constant[ Find all player IDs for the requested team. For the requested team and year (if applicable), pull the roster table and parse the player ID for all players on the roster and create an instance of the Player class for the player. All player instances are added to the 'players' property to get all stats for all players on a team. Parameters ---------- year : string The 4-digit string representing the year to pull the team's roster from. ] if <ast.UnaryOp object at 0x7da1b0b44e80> begin[:] variable[year] assign[=] call[name[utils]._find_year_for_season, parameter[constant[mlb]]] variable[url] assign[=] call[name[self]._create_url, parameter[name[year]]] variable[page] assign[=] call[name[self]._pull_team_page, parameter[name[url]]] if <ast.UnaryOp object at 0x7da1b0b46470> begin[:] variable[output] assign[=] binary_operation[constant[Can't pull requested team page. Ensure the following URL exists: %s] <ast.Mod object at 0x7da2590d6920> name[url]] <ast.Raise object at 0x7da1b0b462f0> variable[players] assign[=] call[call[name[page], parameter[constant[table#team_batting tbody tr]]].items, parameter[]] variable[players_parsed] assign[=] list[[]] for taget[name[player]] in starred[name[players]] begin[:] if compare[constant[class="thead"] in call[name[str], parameter[name[player]]]] begin[:] continue variable[player_id] assign[=] call[name[self]._get_id, parameter[name[player]]] if name[self]._slim begin[:] variable[name] assign[=] call[name[self]._get_name, parameter[name[player]]] call[name[self]._players][name[player_id]] assign[=] name[name] call[name[players_parsed].append, parameter[name[player_id]]] for taget[name[player]] in starred[call[call[name[page], parameter[constant[table#team_pitching tbody tr]]].items, parameter[]]] begin[:] if compare[constant[class="thead"] in call[name[str], parameter[name[player]]]] begin[:] continue variable[player_id] assign[=] call[name[self]._get_id, parameter[name[player]]] if compare[name[player_id] in name[players_parsed]] begin[:] continue if name[self]._slim begin[:] variable[name] assign[=] call[name[self]._get_name, parameter[name[player]]] call[name[self]._players][name[player_id]] assign[=] name[name]
keyword[def] identifier[_find_players] ( identifier[self] , identifier[year] ): literal[string] keyword[if] keyword[not] identifier[year] : identifier[year] = identifier[utils] . identifier[_find_year_for_season] ( literal[string] ) identifier[url] = identifier[self] . identifier[_create_url] ( identifier[year] ) identifier[page] = identifier[self] . identifier[_pull_team_page] ( identifier[url] ) keyword[if] keyword[not] identifier[page] : identifier[output] =( literal[string] literal[string] % identifier[url] ) keyword[raise] identifier[ValueError] ( identifier[output] ) identifier[players] = identifier[page] ( literal[string] ). identifier[items] () identifier[players_parsed] =[] keyword[for] identifier[player] keyword[in] identifier[players] : keyword[if] literal[string] keyword[in] identifier[str] ( identifier[player] ): keyword[continue] identifier[player_id] = identifier[self] . identifier[_get_id] ( identifier[player] ) keyword[if] identifier[self] . identifier[_slim] : identifier[name] = identifier[self] . identifier[_get_name] ( identifier[player] ) identifier[self] . identifier[_players] [ identifier[player_id] ]= identifier[name] keyword[else] : identifier[player_instance] = identifier[Player] ( identifier[player_id] ) identifier[self] . identifier[_players] . identifier[append] ( identifier[player_instance] ) identifier[players_parsed] . identifier[append] ( identifier[player_id] ) keyword[for] identifier[player] keyword[in] identifier[page] ( literal[string] ). identifier[items] (): keyword[if] literal[string] keyword[in] identifier[str] ( identifier[player] ): keyword[continue] identifier[player_id] = identifier[self] . identifier[_get_id] ( identifier[player] ) keyword[if] identifier[player_id] keyword[in] identifier[players_parsed] : keyword[continue] keyword[if] identifier[self] . identifier[_slim] : identifier[name] = identifier[self] . identifier[_get_name] ( identifier[player] ) identifier[self] . identifier[_players] [ identifier[player_id] ]= identifier[name] keyword[else] : identifier[player_instance] = identifier[Player] ( identifier[player_id] ) identifier[self] . identifier[_players] . identifier[append] ( identifier[player_instance] )
def _find_players(self, year): """ Find all player IDs for the requested team. For the requested team and year (if applicable), pull the roster table and parse the player ID for all players on the roster and create an instance of the Player class for the player. All player instances are added to the 'players' property to get all stats for all players on a team. Parameters ---------- year : string The 4-digit string representing the year to pull the team's roster from. """ if not year: year = utils._find_year_for_season('mlb') # depends on [control=['if'], data=[]] url = self._create_url(year) page = self._pull_team_page(url) if not page: output = "Can't pull requested team page. Ensure the following URL exists: %s" % url raise ValueError(output) # depends on [control=['if'], data=[]] players = page('table#team_batting tbody tr').items() players_parsed = [] for player in players: if 'class="thead"' in str(player): continue # depends on [control=['if'], data=[]] player_id = self._get_id(player) if self._slim: name = self._get_name(player) self._players[player_id] = name # depends on [control=['if'], data=[]] else: player_instance = Player(player_id) self._players.append(player_instance) players_parsed.append(player_id) # depends on [control=['for'], data=['player']] for player in page('table#team_pitching tbody tr').items(): if 'class="thead"' in str(player): continue # depends on [control=['if'], data=[]] player_id = self._get_id(player) # Skip players that showup in both batting and pitching tables, as # is often the case with National League pitchers. if player_id in players_parsed: continue # depends on [control=['if'], data=[]] if self._slim: name = self._get_name(player) self._players[player_id] = name # depends on [control=['if'], data=[]] else: player_instance = Player(player_id) self._players.append(player_instance) # depends on [control=['for'], data=['player']]
def _add_prj_file(original_gis_file, new_gis_file): """ Adds projection file """ out_prj_file = "{0}.prj".format(os.path.splitext(new_gis_file)[0]) if original_gis_file.endswith(".shp"): dataset = ogr.Open(original_gis_file) layer = dataset.GetLayer() spatial_ref = layer.GetSpatialRef() spatial_ref.MorphToESRI() spatial_ref_str = spatial_ref.ExportToWkt() else: dataset = gdal.Open(original_gis_file) spatial_ref_str = dataset.GetProjection() with open(out_prj_file, 'w') as prj_file: prj_file.write(spatial_ref_str)
def function[_add_prj_file, parameter[original_gis_file, new_gis_file]]: constant[ Adds projection file ] variable[out_prj_file] assign[=] call[constant[{0}.prj].format, parameter[call[call[name[os].path.splitext, parameter[name[new_gis_file]]]][constant[0]]]] if call[name[original_gis_file].endswith, parameter[constant[.shp]]] begin[:] variable[dataset] assign[=] call[name[ogr].Open, parameter[name[original_gis_file]]] variable[layer] assign[=] call[name[dataset].GetLayer, parameter[]] variable[spatial_ref] assign[=] call[name[layer].GetSpatialRef, parameter[]] call[name[spatial_ref].MorphToESRI, parameter[]] variable[spatial_ref_str] assign[=] call[name[spatial_ref].ExportToWkt, parameter[]] with call[name[open], parameter[name[out_prj_file], constant[w]]] begin[:] call[name[prj_file].write, parameter[name[spatial_ref_str]]]
keyword[def] identifier[_add_prj_file] ( identifier[original_gis_file] , identifier[new_gis_file] ): literal[string] identifier[out_prj_file] = literal[string] . identifier[format] ( identifier[os] . identifier[path] . identifier[splitext] ( identifier[new_gis_file] )[ literal[int] ]) keyword[if] identifier[original_gis_file] . identifier[endswith] ( literal[string] ): identifier[dataset] = identifier[ogr] . identifier[Open] ( identifier[original_gis_file] ) identifier[layer] = identifier[dataset] . identifier[GetLayer] () identifier[spatial_ref] = identifier[layer] . identifier[GetSpatialRef] () identifier[spatial_ref] . identifier[MorphToESRI] () identifier[spatial_ref_str] = identifier[spatial_ref] . identifier[ExportToWkt] () keyword[else] : identifier[dataset] = identifier[gdal] . identifier[Open] ( identifier[original_gis_file] ) identifier[spatial_ref_str] = identifier[dataset] . identifier[GetProjection] () keyword[with] identifier[open] ( identifier[out_prj_file] , literal[string] ) keyword[as] identifier[prj_file] : identifier[prj_file] . identifier[write] ( identifier[spatial_ref_str] )
def _add_prj_file(original_gis_file, new_gis_file): """ Adds projection file """ out_prj_file = '{0}.prj'.format(os.path.splitext(new_gis_file)[0]) if original_gis_file.endswith('.shp'): dataset = ogr.Open(original_gis_file) layer = dataset.GetLayer() spatial_ref = layer.GetSpatialRef() spatial_ref.MorphToESRI() spatial_ref_str = spatial_ref.ExportToWkt() # depends on [control=['if'], data=[]] else: dataset = gdal.Open(original_gis_file) spatial_ref_str = dataset.GetProjection() with open(out_prj_file, 'w') as prj_file: prj_file.write(spatial_ref_str) # depends on [control=['with'], data=['prj_file']]
def add_extra_datas(self, extra_datas): """ Add extra datas to the last row headers : [col1, col2, col3, col4, col5] row : {col1: a1, col2: a2, col3: a3} extra_datas : [a4, a5] row becomes : {col1: a1, col2: a2, col3: a3, col4: a4, col5: a5} in case of longer extra_datas, the last columns will be overriden :param list extra_datas: list of values to set in the last columns """ # we will add datas starting from the last index for index, data in enumerate(extra_datas): header = self.extra_headers[index] self._datas[-1][header['label']] = data
def function[add_extra_datas, parameter[self, extra_datas]]: constant[ Add extra datas to the last row headers : [col1, col2, col3, col4, col5] row : {col1: a1, col2: a2, col3: a3} extra_datas : [a4, a5] row becomes : {col1: a1, col2: a2, col3: a3, col4: a4, col5: a5} in case of longer extra_datas, the last columns will be overriden :param list extra_datas: list of values to set in the last columns ] for taget[tuple[[<ast.Name object at 0x7da1b170d660>, <ast.Name object at 0x7da1b170c370>]]] in starred[call[name[enumerate], parameter[name[extra_datas]]]] begin[:] variable[header] assign[=] call[name[self].extra_headers][name[index]] call[call[name[self]._datas][<ast.UnaryOp object at 0x7da20e9b2680>]][call[name[header]][constant[label]]] assign[=] name[data]
keyword[def] identifier[add_extra_datas] ( identifier[self] , identifier[extra_datas] ): literal[string] keyword[for] identifier[index] , identifier[data] keyword[in] identifier[enumerate] ( identifier[extra_datas] ): identifier[header] = identifier[self] . identifier[extra_headers] [ identifier[index] ] identifier[self] . identifier[_datas] [- literal[int] ][ identifier[header] [ literal[string] ]]= identifier[data]
def add_extra_datas(self, extra_datas): """ Add extra datas to the last row headers : [col1, col2, col3, col4, col5] row : {col1: a1, col2: a2, col3: a3} extra_datas : [a4, a5] row becomes : {col1: a1, col2: a2, col3: a3, col4: a4, col5: a5} in case of longer extra_datas, the last columns will be overriden :param list extra_datas: list of values to set in the last columns """ # we will add datas starting from the last index for (index, data) in enumerate(extra_datas): header = self.extra_headers[index] self._datas[-1][header['label']] = data # depends on [control=['for'], data=[]]
def get_edge_color(self, increment=1): """ Returns the current face, then increments the face by what's specified """ i = self.edge_colors_index self.edge_colors_index += increment if self.edge_colors_index >= len(self.edge_colors): self.edge_colors_index = self.edge_colors_index-len(self.edge_colors) if self.edge_colors_index >= len(self.edge_colors): self.edge_colors_index=0 # to be safe return self.edge_colors[i]
def function[get_edge_color, parameter[self, increment]]: constant[ Returns the current face, then increments the face by what's specified ] variable[i] assign[=] name[self].edge_colors_index <ast.AugAssign object at 0x7da18bc70700> if compare[name[self].edge_colors_index greater_or_equal[>=] call[name[len], parameter[name[self].edge_colors]]] begin[:] name[self].edge_colors_index assign[=] binary_operation[name[self].edge_colors_index - call[name[len], parameter[name[self].edge_colors]]] if compare[name[self].edge_colors_index greater_or_equal[>=] call[name[len], parameter[name[self].edge_colors]]] begin[:] name[self].edge_colors_index assign[=] constant[0] return[call[name[self].edge_colors][name[i]]]
keyword[def] identifier[get_edge_color] ( identifier[self] , identifier[increment] = literal[int] ): literal[string] identifier[i] = identifier[self] . identifier[edge_colors_index] identifier[self] . identifier[edge_colors_index] += identifier[increment] keyword[if] identifier[self] . identifier[edge_colors_index] >= identifier[len] ( identifier[self] . identifier[edge_colors] ): identifier[self] . identifier[edge_colors_index] = identifier[self] . identifier[edge_colors_index] - identifier[len] ( identifier[self] . identifier[edge_colors] ) keyword[if] identifier[self] . identifier[edge_colors_index] >= identifier[len] ( identifier[self] . identifier[edge_colors] ): identifier[self] . identifier[edge_colors_index] = literal[int] keyword[return] identifier[self] . identifier[edge_colors] [ identifier[i] ]
def get_edge_color(self, increment=1): """ Returns the current face, then increments the face by what's specified """ i = self.edge_colors_index self.edge_colors_index += increment if self.edge_colors_index >= len(self.edge_colors): self.edge_colors_index = self.edge_colors_index - len(self.edge_colors) if self.edge_colors_index >= len(self.edge_colors): self.edge_colors_index = 0 # to be safe # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return self.edge_colors[i]
def _save_documentation(version, base_url="https://spark.apache.org/docs"): """ Write the spark property documentation to a file """ target_dir = join(dirname(__file__), 'spylon', 'spark') with open(join(target_dir, "spark_properties_{}.json".format(version)), 'w') as fp: all_props = _fetch_documentation(version=version, base_url=base_url) all_props = sorted(all_props, key=lambda x: x[0]) all_props_d = [{"property": p, "default": d, "description": desc} for p, d, desc in all_props] json.dump(all_props_d, fp, indent=2)
def function[_save_documentation, parameter[version, base_url]]: constant[ Write the spark property documentation to a file ] variable[target_dir] assign[=] call[name[join], parameter[call[name[dirname], parameter[name[__file__]]], constant[spylon], constant[spark]]] with call[name[open], parameter[call[name[join], parameter[name[target_dir], call[constant[spark_properties_{}.json].format, parameter[name[version]]]]], constant[w]]] begin[:] variable[all_props] assign[=] call[name[_fetch_documentation], parameter[]] variable[all_props] assign[=] call[name[sorted], parameter[name[all_props]]] variable[all_props_d] assign[=] <ast.ListComp object at 0x7da204564dc0> call[name[json].dump, parameter[name[all_props_d], name[fp]]]
keyword[def] identifier[_save_documentation] ( identifier[version] , identifier[base_url] = literal[string] ): literal[string] identifier[target_dir] = identifier[join] ( identifier[dirname] ( identifier[__file__] ), literal[string] , literal[string] ) keyword[with] identifier[open] ( identifier[join] ( identifier[target_dir] , literal[string] . identifier[format] ( identifier[version] )), literal[string] ) keyword[as] identifier[fp] : identifier[all_props] = identifier[_fetch_documentation] ( identifier[version] = identifier[version] , identifier[base_url] = identifier[base_url] ) identifier[all_props] = identifier[sorted] ( identifier[all_props] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ]) identifier[all_props_d] =[{ literal[string] : identifier[p] , literal[string] : identifier[d] , literal[string] : identifier[desc] } keyword[for] identifier[p] , identifier[d] , identifier[desc] keyword[in] identifier[all_props] ] identifier[json] . identifier[dump] ( identifier[all_props_d] , identifier[fp] , identifier[indent] = literal[int] )
def _save_documentation(version, base_url='https://spark.apache.org/docs'): """ Write the spark property documentation to a file """ target_dir = join(dirname(__file__), 'spylon', 'spark') with open(join(target_dir, 'spark_properties_{}.json'.format(version)), 'w') as fp: all_props = _fetch_documentation(version=version, base_url=base_url) all_props = sorted(all_props, key=lambda x: x[0]) all_props_d = [{'property': p, 'default': d, 'description': desc} for (p, d, desc) in all_props] json.dump(all_props_d, fp, indent=2) # depends on [control=['with'], data=['fp']]
def external_system_identifiers(endpoint): """Populate the ``external_system_identifiers`` key. Also populates the ``new_record`` key through side effects. """ @utils.flatten @utils.for_each_value def _external_system_identifiers(self, key, value): new_recid = maybe_int(value.get('d')) if new_recid: self['new_record'] = get_record_ref(new_recid, endpoint) return [ { 'schema': 'SPIRES', 'value': ext_sys_id, } for ext_sys_id in force_list(value.get('a')) ] return _external_system_identifiers
def function[external_system_identifiers, parameter[endpoint]]: constant[Populate the ``external_system_identifiers`` key. Also populates the ``new_record`` key through side effects. ] def function[_external_system_identifiers, parameter[self, key, value]]: variable[new_recid] assign[=] call[name[maybe_int], parameter[call[name[value].get, parameter[constant[d]]]]] if name[new_recid] begin[:] call[name[self]][constant[new_record]] assign[=] call[name[get_record_ref], parameter[name[new_recid], name[endpoint]]] return[<ast.ListComp object at 0x7da207f01360>] return[name[_external_system_identifiers]]
keyword[def] identifier[external_system_identifiers] ( identifier[endpoint] ): literal[string] @ identifier[utils] . identifier[flatten] @ identifier[utils] . identifier[for_each_value] keyword[def] identifier[_external_system_identifiers] ( identifier[self] , identifier[key] , identifier[value] ): identifier[new_recid] = identifier[maybe_int] ( identifier[value] . identifier[get] ( literal[string] )) keyword[if] identifier[new_recid] : identifier[self] [ literal[string] ]= identifier[get_record_ref] ( identifier[new_recid] , identifier[endpoint] ) keyword[return] [ { literal[string] : literal[string] , literal[string] : identifier[ext_sys_id] , } keyword[for] identifier[ext_sys_id] keyword[in] identifier[force_list] ( identifier[value] . identifier[get] ( literal[string] )) ] keyword[return] identifier[_external_system_identifiers]
def external_system_identifiers(endpoint): """Populate the ``external_system_identifiers`` key. Also populates the ``new_record`` key through side effects. """ @utils.flatten @utils.for_each_value def _external_system_identifiers(self, key, value): new_recid = maybe_int(value.get('d')) if new_recid: self['new_record'] = get_record_ref(new_recid, endpoint) # depends on [control=['if'], data=[]] return [{'schema': 'SPIRES', 'value': ext_sys_id} for ext_sys_id in force_list(value.get('a'))] return _external_system_identifiers
def fill_cache(self): """Fill the cache with new data from the sensor.""" _LOGGER.debug('Filling cache with new sensor data.') try: self.firmware_version() except BluetoothBackendException: # If a sensor doesn't work, wait 5 minutes before retrying self._last_read = datetime.now() - self._cache_timeout + \ timedelta(seconds=300) raise with self._bt_interface.connect(self._mac) as connection: try: connection.wait_for_notification(_HANDLE_READ_WRITE_SENSOR_DATA, self, 10) # pylint: disable=no-member # If a sensor doesn't work, wait 5 minutes before retrying except BluetoothBackendException: self._last_read = datetime.now() - self._cache_timeout + \ timedelta(seconds=300) return
def function[fill_cache, parameter[self]]: constant[Fill the cache with new data from the sensor.] call[name[_LOGGER].debug, parameter[constant[Filling cache with new sensor data.]]] <ast.Try object at 0x7da1b0cffbe0> with call[name[self]._bt_interface.connect, parameter[name[self]._mac]] begin[:] <ast.Try object at 0x7da1b0cff550>
keyword[def] identifier[fill_cache] ( identifier[self] ): literal[string] identifier[_LOGGER] . identifier[debug] ( literal[string] ) keyword[try] : identifier[self] . identifier[firmware_version] () keyword[except] identifier[BluetoothBackendException] : identifier[self] . identifier[_last_read] = identifier[datetime] . identifier[now] ()- identifier[self] . identifier[_cache_timeout] + identifier[timedelta] ( identifier[seconds] = literal[int] ) keyword[raise] keyword[with] identifier[self] . identifier[_bt_interface] . identifier[connect] ( identifier[self] . identifier[_mac] ) keyword[as] identifier[connection] : keyword[try] : identifier[connection] . identifier[wait_for_notification] ( identifier[_HANDLE_READ_WRITE_SENSOR_DATA] , identifier[self] , literal[int] ) keyword[except] identifier[BluetoothBackendException] : identifier[self] . identifier[_last_read] = identifier[datetime] . identifier[now] ()- identifier[self] . identifier[_cache_timeout] + identifier[timedelta] ( identifier[seconds] = literal[int] ) keyword[return]
def fill_cache(self): """Fill the cache with new data from the sensor.""" _LOGGER.debug('Filling cache with new sensor data.') try: self.firmware_version() # depends on [control=['try'], data=[]] except BluetoothBackendException: # If a sensor doesn't work, wait 5 minutes before retrying self._last_read = datetime.now() - self._cache_timeout + timedelta(seconds=300) raise # depends on [control=['except'], data=[]] with self._bt_interface.connect(self._mac) as connection: try: connection.wait_for_notification(_HANDLE_READ_WRITE_SENSOR_DATA, self, 10) # pylint: disable=no-member # depends on [control=['try'], data=[]] # If a sensor doesn't work, wait 5 minutes before retrying except BluetoothBackendException: self._last_read = datetime.now() - self._cache_timeout + timedelta(seconds=300) return # depends on [control=['except'], data=[]] # depends on [control=['with'], data=['connection']]
def find_all(self, sub, ignore_case=False): """ Find all occurances of the substring. Return a list of absolute positions in the document. """ flags = re.IGNORECASE if ignore_case else 0 return [a.start() for a in re.finditer(re.escape(sub), self.text, flags)]
def function[find_all, parameter[self, sub, ignore_case]]: constant[ Find all occurances of the substring. Return a list of absolute positions in the document. ] variable[flags] assign[=] <ast.IfExp object at 0x7da18f58c6d0> return[<ast.ListComp object at 0x7da18f58c1f0>]
keyword[def] identifier[find_all] ( identifier[self] , identifier[sub] , identifier[ignore_case] = keyword[False] ): literal[string] identifier[flags] = identifier[re] . identifier[IGNORECASE] keyword[if] identifier[ignore_case] keyword[else] literal[int] keyword[return] [ identifier[a] . identifier[start] () keyword[for] identifier[a] keyword[in] identifier[re] . identifier[finditer] ( identifier[re] . identifier[escape] ( identifier[sub] ), identifier[self] . identifier[text] , identifier[flags] )]
def find_all(self, sub, ignore_case=False): """ Find all occurances of the substring. Return a list of absolute positions in the document. """ flags = re.IGNORECASE if ignore_case else 0 return [a.start() for a in re.finditer(re.escape(sub), self.text, flags)]
def _register_function(name: str, func, universe: bool, in_place: bool): """Register a transformation function under the given name. :param name: Name to register the function under :param func: A function :param universe: :param in_place: :return: The same function, with additional properties added """ if name in mapped: mapped_func = mapped[name] raise PipelineNameError('{name} is already registered with {func_mod}.{func_name}'.format( name=name, func_mod=mapped_func.__module__, func_name=mapped_func.__name__ )) mapped[name] = func if universe: universe_map[name] = func if in_place: in_place_map[name] = func if _has_arguments(func, universe): has_arguments_map[name] = func else: no_arguments_map[name] = func return func
def function[_register_function, parameter[name, func, universe, in_place]]: constant[Register a transformation function under the given name. :param name: Name to register the function under :param func: A function :param universe: :param in_place: :return: The same function, with additional properties added ] if compare[name[name] in name[mapped]] begin[:] variable[mapped_func] assign[=] call[name[mapped]][name[name]] <ast.Raise object at 0x7da1b0e82e30> call[name[mapped]][name[name]] assign[=] name[func] if name[universe] begin[:] call[name[universe_map]][name[name]] assign[=] name[func] if name[in_place] begin[:] call[name[in_place_map]][name[name]] assign[=] name[func] if call[name[_has_arguments], parameter[name[func], name[universe]]] begin[:] call[name[has_arguments_map]][name[name]] assign[=] name[func] return[name[func]]
keyword[def] identifier[_register_function] ( identifier[name] : identifier[str] , identifier[func] , identifier[universe] : identifier[bool] , identifier[in_place] : identifier[bool] ): literal[string] keyword[if] identifier[name] keyword[in] identifier[mapped] : identifier[mapped_func] = identifier[mapped] [ identifier[name] ] keyword[raise] identifier[PipelineNameError] ( literal[string] . identifier[format] ( identifier[name] = identifier[name] , identifier[func_mod] = identifier[mapped_func] . identifier[__module__] , identifier[func_name] = identifier[mapped_func] . identifier[__name__] )) identifier[mapped] [ identifier[name] ]= identifier[func] keyword[if] identifier[universe] : identifier[universe_map] [ identifier[name] ]= identifier[func] keyword[if] identifier[in_place] : identifier[in_place_map] [ identifier[name] ]= identifier[func] keyword[if] identifier[_has_arguments] ( identifier[func] , identifier[universe] ): identifier[has_arguments_map] [ identifier[name] ]= identifier[func] keyword[else] : identifier[no_arguments_map] [ identifier[name] ]= identifier[func] keyword[return] identifier[func]
def _register_function(name: str, func, universe: bool, in_place: bool): """Register a transformation function under the given name. :param name: Name to register the function under :param func: A function :param universe: :param in_place: :return: The same function, with additional properties added """ if name in mapped: mapped_func = mapped[name] raise PipelineNameError('{name} is already registered with {func_mod}.{func_name}'.format(name=name, func_mod=mapped_func.__module__, func_name=mapped_func.__name__)) # depends on [control=['if'], data=['name', 'mapped']] mapped[name] = func if universe: universe_map[name] = func # depends on [control=['if'], data=[]] if in_place: in_place_map[name] = func # depends on [control=['if'], data=[]] if _has_arguments(func, universe): has_arguments_map[name] = func # depends on [control=['if'], data=[]] else: no_arguments_map[name] = func return func
def fight(nick, rest): "Pit two sworn enemies against each other (separate with 'vs.')" if rest: vtype = random.choice(phrases.fight_victories) fdesc = random.choice(phrases.fight_descriptions) # valid separators are vs., v., and vs pattern = re.compile('(.*) (?:vs[.]?|v[.]) (.*)') matcher = pattern.match(rest) if not matcher: karma.Karma.store.change(nick.lower(), -1) args = (vtype, nick, fdesc) return "/me %s %s in %s for bad protocol." % args contenders = [c.strip() for c in matcher.groups()] random.shuffle(contenders) winner, loser = contenders karma.Karma.store.change(winner, 1) karma.Karma.store.change(loser, -1) return "%s %s %s in %s." % (winner, vtype, loser, fdesc)
def function[fight, parameter[nick, rest]]: constant[Pit two sworn enemies against each other (separate with 'vs.')] if name[rest] begin[:] variable[vtype] assign[=] call[name[random].choice, parameter[name[phrases].fight_victories]] variable[fdesc] assign[=] call[name[random].choice, parameter[name[phrases].fight_descriptions]] variable[pattern] assign[=] call[name[re].compile, parameter[constant[(.*) (?:vs[.]?|v[.]) (.*)]]] variable[matcher] assign[=] call[name[pattern].match, parameter[name[rest]]] if <ast.UnaryOp object at 0x7da1b03990c0> begin[:] call[name[karma].Karma.store.change, parameter[call[name[nick].lower, parameter[]], <ast.UnaryOp object at 0x7da1b039a620>]] variable[args] assign[=] tuple[[<ast.Name object at 0x7da1b039b6a0>, <ast.Name object at 0x7da1b039bdc0>, <ast.Name object at 0x7da1b039aa10>]] return[binary_operation[constant[/me %s %s in %s for bad protocol.] <ast.Mod object at 0x7da2590d6920> name[args]]] variable[contenders] assign[=] <ast.ListComp object at 0x7da1b039b760> call[name[random].shuffle, parameter[name[contenders]]] <ast.Tuple object at 0x7da1b03918a0> assign[=] name[contenders] call[name[karma].Karma.store.change, parameter[name[winner], constant[1]]] call[name[karma].Karma.store.change, parameter[name[loser], <ast.UnaryOp object at 0x7da1b0393520>]] return[binary_operation[constant[%s %s %s in %s.] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0390df0>, <ast.Name object at 0x7da1b0390190>, <ast.Name object at 0x7da1b0390eb0>, <ast.Name object at 0x7da1b0393ca0>]]]]
keyword[def] identifier[fight] ( identifier[nick] , identifier[rest] ): literal[string] keyword[if] identifier[rest] : identifier[vtype] = identifier[random] . identifier[choice] ( identifier[phrases] . identifier[fight_victories] ) identifier[fdesc] = identifier[random] . identifier[choice] ( identifier[phrases] . identifier[fight_descriptions] ) identifier[pattern] = identifier[re] . identifier[compile] ( literal[string] ) identifier[matcher] = identifier[pattern] . identifier[match] ( identifier[rest] ) keyword[if] keyword[not] identifier[matcher] : identifier[karma] . identifier[Karma] . identifier[store] . identifier[change] ( identifier[nick] . identifier[lower] (),- literal[int] ) identifier[args] =( identifier[vtype] , identifier[nick] , identifier[fdesc] ) keyword[return] literal[string] % identifier[args] identifier[contenders] =[ identifier[c] . identifier[strip] () keyword[for] identifier[c] keyword[in] identifier[matcher] . identifier[groups] ()] identifier[random] . identifier[shuffle] ( identifier[contenders] ) identifier[winner] , identifier[loser] = identifier[contenders] identifier[karma] . identifier[Karma] . identifier[store] . identifier[change] ( identifier[winner] , literal[int] ) identifier[karma] . identifier[Karma] . identifier[store] . identifier[change] ( identifier[loser] ,- literal[int] ) keyword[return] literal[string] %( identifier[winner] , identifier[vtype] , identifier[loser] , identifier[fdesc] )
def fight(nick, rest): """Pit two sworn enemies against each other (separate with 'vs.')""" if rest: vtype = random.choice(phrases.fight_victories) fdesc = random.choice(phrases.fight_descriptions) # valid separators are vs., v., and vs pattern = re.compile('(.*) (?:vs[.]?|v[.]) (.*)') matcher = pattern.match(rest) if not matcher: karma.Karma.store.change(nick.lower(), -1) args = (vtype, nick, fdesc) return '/me %s %s in %s for bad protocol.' % args # depends on [control=['if'], data=[]] contenders = [c.strip() for c in matcher.groups()] random.shuffle(contenders) (winner, loser) = contenders karma.Karma.store.change(winner, 1) karma.Karma.store.change(loser, -1) return '%s %s %s in %s.' % (winner, vtype, loser, fdesc) # depends on [control=['if'], data=[]]
def write_file(writer, filename): """Write all of lines from file using the writer.""" for line in txt_line_iterator(filename): writer.write(line) writer.write("\n")
def function[write_file, parameter[writer, filename]]: constant[Write all of lines from file using the writer.] for taget[name[line]] in starred[call[name[txt_line_iterator], parameter[name[filename]]]] begin[:] call[name[writer].write, parameter[name[line]]] call[name[writer].write, parameter[constant[ ]]]
keyword[def] identifier[write_file] ( identifier[writer] , identifier[filename] ): literal[string] keyword[for] identifier[line] keyword[in] identifier[txt_line_iterator] ( identifier[filename] ): identifier[writer] . identifier[write] ( identifier[line] ) identifier[writer] . identifier[write] ( literal[string] )
def write_file(writer, filename): """Write all of lines from file using the writer.""" for line in txt_line_iterator(filename): writer.write(line) writer.write('\n') # depends on [control=['for'], data=['line']]
def is_running(self): """ Checks if the child process is running (or is starting). """ return self._process.state() in [self._process.Running, self._process.Starting]
def function[is_running, parameter[self]]: constant[ Checks if the child process is running (or is starting). ] return[compare[call[name[self]._process.state, parameter[]] in list[[<ast.Attribute object at 0x7da18c4cee60>, <ast.Attribute object at 0x7da18c4ce1d0>]]]]
keyword[def] identifier[is_running] ( identifier[self] ): literal[string] keyword[return] identifier[self] . identifier[_process] . identifier[state] () keyword[in] [ identifier[self] . identifier[_process] . identifier[Running] , identifier[self] . identifier[_process] . identifier[Starting] ]
def is_running(self): """ Checks if the child process is running (or is starting). """ return self._process.state() in [self._process.Running, self._process.Starting]
def push_header(self, filename): """ Push the header to a given filename :param filename: the file path to push into. """ # open file and read it all with open(filename, "r") as infile: content = infile.read() # push header content = self.__header + content # re-write file with the header with open(filename, "w") as outfile: outfile.write(content)
def function[push_header, parameter[self, filename]]: constant[ Push the header to a given filename :param filename: the file path to push into. ] with call[name[open], parameter[name[filename], constant[r]]] begin[:] variable[content] assign[=] call[name[infile].read, parameter[]] variable[content] assign[=] binary_operation[name[self].__header + name[content]] with call[name[open], parameter[name[filename], constant[w]]] begin[:] call[name[outfile].write, parameter[name[content]]]
keyword[def] identifier[push_header] ( identifier[self] , identifier[filename] ): literal[string] keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[infile] : identifier[content] = identifier[infile] . identifier[read] () identifier[content] = identifier[self] . identifier[__header] + identifier[content] keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[outfile] : identifier[outfile] . identifier[write] ( identifier[content] )
def push_header(self, filename): """ Push the header to a given filename :param filename: the file path to push into. """ # open file and read it all with open(filename, 'r') as infile: content = infile.read() # depends on [control=['with'], data=['infile']] # push header content = self.__header + content # re-write file with the header with open(filename, 'w') as outfile: outfile.write(content) # depends on [control=['with'], data=['outfile']]
def max_consecutive_days(self) -> Optional[Tuple[int, Interval]]: """ The length of the longest sequence of days in which all days include an interval. Returns: tuple: ``(longest_length, longest_interval)`` where ``longest_interval`` is a :class:`Interval` containing the start and end date of the longest span -- or ``None`` if we contain no intervals. """ if len(self.intervals) == 0: return None startdate = self.start_date() enddate = self.end_date() seq = '' ndays = (enddate - startdate).days + 1 for i in range(ndays): date = startdate + datetime.timedelta(days=i) wholeday = Interval.wholeday(date) if any([x.overlaps(wholeday) for x in self.intervals]): seq += '+' else: seq += ' ' # noinspection PyTypeChecker longest = max(seq.split(), key=len) longest_len = len(longest) longest_idx = seq.index(longest) longest_interval = Interval.dayspan( startdate + datetime.timedelta(days=longest_idx), startdate + datetime.timedelta(days=longest_idx + longest_len) ) return longest_len, longest_interval
def function[max_consecutive_days, parameter[self]]: constant[ The length of the longest sequence of days in which all days include an interval. Returns: tuple: ``(longest_length, longest_interval)`` where ``longest_interval`` is a :class:`Interval` containing the start and end date of the longest span -- or ``None`` if we contain no intervals. ] if compare[call[name[len], parameter[name[self].intervals]] equal[==] constant[0]] begin[:] return[constant[None]] variable[startdate] assign[=] call[name[self].start_date, parameter[]] variable[enddate] assign[=] call[name[self].end_date, parameter[]] variable[seq] assign[=] constant[] variable[ndays] assign[=] binary_operation[binary_operation[name[enddate] - name[startdate]].days + constant[1]] for taget[name[i]] in starred[call[name[range], parameter[name[ndays]]]] begin[:] variable[date] assign[=] binary_operation[name[startdate] + call[name[datetime].timedelta, parameter[]]] variable[wholeday] assign[=] call[name[Interval].wholeday, parameter[name[date]]] if call[name[any], parameter[<ast.ListComp object at 0x7da1b1836f20>]] begin[:] <ast.AugAssign object at 0x7da1b1837610> variable[longest] assign[=] call[name[max], parameter[call[name[seq].split, parameter[]]]] variable[longest_len] assign[=] call[name[len], parameter[name[longest]]] variable[longest_idx] assign[=] call[name[seq].index, parameter[name[longest]]] variable[longest_interval] assign[=] call[name[Interval].dayspan, parameter[binary_operation[name[startdate] + call[name[datetime].timedelta, parameter[]]], binary_operation[name[startdate] + call[name[datetime].timedelta, parameter[]]]]] return[tuple[[<ast.Name object at 0x7da1b1837790>, <ast.Name object at 0x7da1b1836d10>]]]
keyword[def] identifier[max_consecutive_days] ( identifier[self] )-> identifier[Optional] [ identifier[Tuple] [ identifier[int] , identifier[Interval] ]]: literal[string] keyword[if] identifier[len] ( identifier[self] . identifier[intervals] )== literal[int] : keyword[return] keyword[None] identifier[startdate] = identifier[self] . identifier[start_date] () identifier[enddate] = identifier[self] . identifier[end_date] () identifier[seq] = literal[string] identifier[ndays] =( identifier[enddate] - identifier[startdate] ). identifier[days] + literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[ndays] ): identifier[date] = identifier[startdate] + identifier[datetime] . identifier[timedelta] ( identifier[days] = identifier[i] ) identifier[wholeday] = identifier[Interval] . identifier[wholeday] ( identifier[date] ) keyword[if] identifier[any] ([ identifier[x] . identifier[overlaps] ( identifier[wholeday] ) keyword[for] identifier[x] keyword[in] identifier[self] . identifier[intervals] ]): identifier[seq] += literal[string] keyword[else] : identifier[seq] += literal[string] identifier[longest] = identifier[max] ( identifier[seq] . identifier[split] (), identifier[key] = identifier[len] ) identifier[longest_len] = identifier[len] ( identifier[longest] ) identifier[longest_idx] = identifier[seq] . identifier[index] ( identifier[longest] ) identifier[longest_interval] = identifier[Interval] . identifier[dayspan] ( identifier[startdate] + identifier[datetime] . identifier[timedelta] ( identifier[days] = identifier[longest_idx] ), identifier[startdate] + identifier[datetime] . identifier[timedelta] ( identifier[days] = identifier[longest_idx] + identifier[longest_len] ) ) keyword[return] identifier[longest_len] , identifier[longest_interval]
def max_consecutive_days(self) -> Optional[Tuple[int, Interval]]: """ The length of the longest sequence of days in which all days include an interval. Returns: tuple: ``(longest_length, longest_interval)`` where ``longest_interval`` is a :class:`Interval` containing the start and end date of the longest span -- or ``None`` if we contain no intervals. """ if len(self.intervals) == 0: return None # depends on [control=['if'], data=[]] startdate = self.start_date() enddate = self.end_date() seq = '' ndays = (enddate - startdate).days + 1 for i in range(ndays): date = startdate + datetime.timedelta(days=i) wholeday = Interval.wholeday(date) if any([x.overlaps(wholeday) for x in self.intervals]): seq += '+' # depends on [control=['if'], data=[]] else: seq += ' ' # depends on [control=['for'], data=['i']] # noinspection PyTypeChecker longest = max(seq.split(), key=len) longest_len = len(longest) longest_idx = seq.index(longest) longest_interval = Interval.dayspan(startdate + datetime.timedelta(days=longest_idx), startdate + datetime.timedelta(days=longest_idx + longest_len)) return (longest_len, longest_interval)
def create_project(name, include_examples=True): ''' creates the initial project skeleton and files :param name: the project name :return: ''' # from the start directory, create a project directory with the project name path = "{}/{}".format(os.getcwd(), name) if os.path.exists(path): return None, "path {} already exists".format(path) print(bright("creating new ezo project '{}".format(name))) # make project directory os.mkdir(path) print(bright("created project directory: '{}".format(path))) # create an empty contracts directory contracts_dir = "{}/{}".format(path, "contracts") os.mkdir(contracts_dir) print(bright("created contract directory: '{}".format(contracts_dir))) if include_examples: c = [(create_sample_contracts_1(), 'contract1.sol'), (create_sample_contracts_2(), 'contract2.sol')] for s in c: c, fn = s file_path = "{}/{}".format(contracts_dir, fn) try: with open(file_path, "w+") as outfile: outfile.write(c) except Exception as e: print(bright("problem creating sample file: '{}".format(path))) return None, e print(bright("created sample contract: '{}".format(fn))) # create the handlers directory handlers_dir = "{}/{}".format(path, "handlers") os.mkdir(handlers_dir) print(bright("created handlers directory: '{}".format(handlers_dir))) # leveldb directory (created by level) leveldb = "{}/{}".format(path, "ezodb") # create the initial config.json file cfg = create_blank_config_obj() cfg["ezo"]["contract-dir"] = contracts_dir cfg["ezo"]["handlers-dir"] = handlers_dir cfg["ezo"]["project-name"] = name cfg["ezo"]["leveldb"] = leveldb print(bright("creating configuration: '{}".format(path))) # write the file to the root project dir config_file_path = "{}/{}".format(path, "ezo.conf") try: with open(config_file_path, "w+") as outfile: json.dump(cfg, outfile, indent=2) except Exception as e: print(bright("problem creating configuration file: '{}".format(path))) return None, e return None, None
def function[create_project, parameter[name, include_examples]]: constant[ creates the initial project skeleton and files :param name: the project name :return: ] variable[path] assign[=] call[constant[{}/{}].format, parameter[call[name[os].getcwd, parameter[]], name[name]]] if call[name[os].path.exists, parameter[name[path]]] begin[:] return[tuple[[<ast.Constant object at 0x7da18f721000>, <ast.Call object at 0x7da18f722770>]]] call[name[print], parameter[call[name[bright], parameter[call[constant[creating new ezo project '{}].format, parameter[name[name]]]]]]] call[name[os].mkdir, parameter[name[path]]] call[name[print], parameter[call[name[bright], parameter[call[constant[created project directory: '{}].format, parameter[name[path]]]]]]] variable[contracts_dir] assign[=] call[constant[{}/{}].format, parameter[name[path], constant[contracts]]] call[name[os].mkdir, parameter[name[contracts_dir]]] call[name[print], parameter[call[name[bright], parameter[call[constant[created contract directory: '{}].format, parameter[name[contracts_dir]]]]]]] if name[include_examples] begin[:] variable[c] assign[=] list[[<ast.Tuple object at 0x7da18f722e60>, <ast.Tuple object at 0x7da18f722ec0>]] for taget[name[s]] in starred[name[c]] begin[:] <ast.Tuple object at 0x7da18f722650> assign[=] name[s] variable[file_path] assign[=] call[constant[{}/{}].format, parameter[name[contracts_dir], name[fn]]] <ast.Try object at 0x7da18f721720> call[name[print], parameter[call[name[bright], parameter[call[constant[created sample contract: '{}].format, parameter[name[fn]]]]]]] variable[handlers_dir] assign[=] call[constant[{}/{}].format, parameter[name[path], constant[handlers]]] call[name[os].mkdir, parameter[name[handlers_dir]]] call[name[print], parameter[call[name[bright], parameter[call[constant[created handlers directory: '{}].format, parameter[name[handlers_dir]]]]]]] variable[leveldb] assign[=] call[constant[{}/{}].format, parameter[name[path], constant[ezodb]]] variable[cfg] assign[=] call[name[create_blank_config_obj], parameter[]] call[call[name[cfg]][constant[ezo]]][constant[contract-dir]] assign[=] name[contracts_dir] call[call[name[cfg]][constant[ezo]]][constant[handlers-dir]] assign[=] name[handlers_dir] call[call[name[cfg]][constant[ezo]]][constant[project-name]] assign[=] name[name] call[call[name[cfg]][constant[ezo]]][constant[leveldb]] assign[=] name[leveldb] call[name[print], parameter[call[name[bright], parameter[call[constant[creating configuration: '{}].format, parameter[name[path]]]]]]] variable[config_file_path] assign[=] call[constant[{}/{}].format, parameter[name[path], constant[ezo.conf]]] <ast.Try object at 0x7da18f7233d0> return[tuple[[<ast.Constant object at 0x7da207f98340>, <ast.Constant object at 0x7da207f03e80>]]]
keyword[def] identifier[create_project] ( identifier[name] , identifier[include_examples] = keyword[True] ): literal[string] identifier[path] = literal[string] . identifier[format] ( identifier[os] . identifier[getcwd] (), identifier[name] ) keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ): keyword[return] keyword[None] , literal[string] . identifier[format] ( identifier[path] ) identifier[print] ( identifier[bright] ( literal[string] . identifier[format] ( identifier[name] ))) identifier[os] . identifier[mkdir] ( identifier[path] ) identifier[print] ( identifier[bright] ( literal[string] . identifier[format] ( identifier[path] ))) identifier[contracts_dir] = literal[string] . identifier[format] ( identifier[path] , literal[string] ) identifier[os] . identifier[mkdir] ( identifier[contracts_dir] ) identifier[print] ( identifier[bright] ( literal[string] . identifier[format] ( identifier[contracts_dir] ))) keyword[if] identifier[include_examples] : identifier[c] =[( identifier[create_sample_contracts_1] (), literal[string] ),( identifier[create_sample_contracts_2] (), literal[string] )] keyword[for] identifier[s] keyword[in] identifier[c] : identifier[c] , identifier[fn] = identifier[s] identifier[file_path] = literal[string] . identifier[format] ( identifier[contracts_dir] , identifier[fn] ) keyword[try] : keyword[with] identifier[open] ( identifier[file_path] , literal[string] ) keyword[as] identifier[outfile] : identifier[outfile] . identifier[write] ( identifier[c] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[print] ( identifier[bright] ( literal[string] . identifier[format] ( identifier[path] ))) keyword[return] keyword[None] , identifier[e] identifier[print] ( identifier[bright] ( literal[string] . identifier[format] ( identifier[fn] ))) identifier[handlers_dir] = literal[string] . identifier[format] ( identifier[path] , literal[string] ) identifier[os] . identifier[mkdir] ( identifier[handlers_dir] ) identifier[print] ( identifier[bright] ( literal[string] . identifier[format] ( identifier[handlers_dir] ))) identifier[leveldb] = literal[string] . identifier[format] ( identifier[path] , literal[string] ) identifier[cfg] = identifier[create_blank_config_obj] () identifier[cfg] [ literal[string] ][ literal[string] ]= identifier[contracts_dir] identifier[cfg] [ literal[string] ][ literal[string] ]= identifier[handlers_dir] identifier[cfg] [ literal[string] ][ literal[string] ]= identifier[name] identifier[cfg] [ literal[string] ][ literal[string] ]= identifier[leveldb] identifier[print] ( identifier[bright] ( literal[string] . identifier[format] ( identifier[path] ))) identifier[config_file_path] = literal[string] . identifier[format] ( identifier[path] , literal[string] ) keyword[try] : keyword[with] identifier[open] ( identifier[config_file_path] , literal[string] ) keyword[as] identifier[outfile] : identifier[json] . identifier[dump] ( identifier[cfg] , identifier[outfile] , identifier[indent] = literal[int] ) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[print] ( identifier[bright] ( literal[string] . identifier[format] ( identifier[path] ))) keyword[return] keyword[None] , identifier[e] keyword[return] keyword[None] , keyword[None]
def create_project(name, include_examples=True): """ creates the initial project skeleton and files :param name: the project name :return: """ # from the start directory, create a project directory with the project name path = '{}/{}'.format(os.getcwd(), name) if os.path.exists(path): return (None, 'path {} already exists'.format(path)) # depends on [control=['if'], data=[]] print(bright("creating new ezo project '{}".format(name))) # make project directory os.mkdir(path) print(bright("created project directory: '{}".format(path))) # create an empty contracts directory contracts_dir = '{}/{}'.format(path, 'contracts') os.mkdir(contracts_dir) print(bright("created contract directory: '{}".format(contracts_dir))) if include_examples: c = [(create_sample_contracts_1(), 'contract1.sol'), (create_sample_contracts_2(), 'contract2.sol')] for s in c: (c, fn) = s file_path = '{}/{}'.format(contracts_dir, fn) try: with open(file_path, 'w+') as outfile: outfile.write(c) # depends on [control=['with'], data=['outfile']] # depends on [control=['try'], data=[]] except Exception as e: print(bright("problem creating sample file: '{}".format(path))) return (None, e) # depends on [control=['except'], data=['e']] print(bright("created sample contract: '{}".format(fn))) # depends on [control=['for'], data=['s']] # depends on [control=['if'], data=[]] # create the handlers directory handlers_dir = '{}/{}'.format(path, 'handlers') os.mkdir(handlers_dir) print(bright("created handlers directory: '{}".format(handlers_dir))) # leveldb directory (created by level) leveldb = '{}/{}'.format(path, 'ezodb') # create the initial config.json file cfg = create_blank_config_obj() cfg['ezo']['contract-dir'] = contracts_dir cfg['ezo']['handlers-dir'] = handlers_dir cfg['ezo']['project-name'] = name cfg['ezo']['leveldb'] = leveldb print(bright("creating configuration: '{}".format(path))) # write the file to the root project dir config_file_path = '{}/{}'.format(path, 'ezo.conf') try: with open(config_file_path, 'w+') as outfile: json.dump(cfg, outfile, indent=2) # depends on [control=['with'], data=['outfile']] # depends on [control=['try'], data=[]] except Exception as e: print(bright("problem creating configuration file: '{}".format(path))) return (None, e) # depends on [control=['except'], data=['e']] return (None, None)
def wafer_form_helper(context, helper_name): ''' Find the specified Crispy FormHelper and instantiate it. Handy when you are crispyifying other apps' forms. ''' request = context.request module, class_name = helper_name.rsplit('.', 1) if module not in sys.modules: __import__(module) mod = sys.modules[module] class_ = getattr(mod, class_name) return class_(request=request)
def function[wafer_form_helper, parameter[context, helper_name]]: constant[ Find the specified Crispy FormHelper and instantiate it. Handy when you are crispyifying other apps' forms. ] variable[request] assign[=] name[context].request <ast.Tuple object at 0x7da18f00f340> assign[=] call[name[helper_name].rsplit, parameter[constant[.], constant[1]]] if compare[name[module] <ast.NotIn object at 0x7da2590d7190> name[sys].modules] begin[:] call[name[__import__], parameter[name[module]]] variable[mod] assign[=] call[name[sys].modules][name[module]] variable[class_] assign[=] call[name[getattr], parameter[name[mod], name[class_name]]] return[call[name[class_], parameter[]]]
keyword[def] identifier[wafer_form_helper] ( identifier[context] , identifier[helper_name] ): literal[string] identifier[request] = identifier[context] . identifier[request] identifier[module] , identifier[class_name] = identifier[helper_name] . identifier[rsplit] ( literal[string] , literal[int] ) keyword[if] identifier[module] keyword[not] keyword[in] identifier[sys] . identifier[modules] : identifier[__import__] ( identifier[module] ) identifier[mod] = identifier[sys] . identifier[modules] [ identifier[module] ] identifier[class_] = identifier[getattr] ( identifier[mod] , identifier[class_name] ) keyword[return] identifier[class_] ( identifier[request] = identifier[request] )
def wafer_form_helper(context, helper_name): """ Find the specified Crispy FormHelper and instantiate it. Handy when you are crispyifying other apps' forms. """ request = context.request (module, class_name) = helper_name.rsplit('.', 1) if module not in sys.modules: __import__(module) # depends on [control=['if'], data=['module']] mod = sys.modules[module] class_ = getattr(mod, class_name) return class_(request=request)
def build_list_params(self, params, items, label): """ Items is a list of dictionaries or strings:: [ { 'Protocol' : 'HTTP', 'LoadBalancerPort' : '80', 'InstancePort' : '80' }, .. ] etc. or:: ['us-east-1b',...] """ # different from EC2 list params for i in xrange(1, len(items)+1): if isinstance(items[i-1], dict): for k, v in items[i-1].iteritems(): if isinstance(v, dict): for kk, vv in v.iteritems(): params['%s.member.%d.%s.%s' % (label, i, k, kk)] = vv else: params['%s.member.%d.%s' % (label, i, k)] = v elif isinstance(items[i-1], basestring): params['%s.member.%d' % (label, i)] = items[i-1]
def function[build_list_params, parameter[self, params, items, label]]: constant[ Items is a list of dictionaries or strings:: [ { 'Protocol' : 'HTTP', 'LoadBalancerPort' : '80', 'InstancePort' : '80' }, .. ] etc. or:: ['us-east-1b',...] ] for taget[name[i]] in starred[call[name[xrange], parameter[constant[1], binary_operation[call[name[len], parameter[name[items]]] + constant[1]]]]] begin[:] if call[name[isinstance], parameter[call[name[items]][binary_operation[name[i] - constant[1]]], name[dict]]] begin[:] for taget[tuple[[<ast.Name object at 0x7da1b26164d0>, <ast.Name object at 0x7da1b2615270>]]] in starred[call[call[name[items]][binary_operation[name[i] - constant[1]]].iteritems, parameter[]]] begin[:] if call[name[isinstance], parameter[name[v], name[dict]]] begin[:] for taget[tuple[[<ast.Name object at 0x7da1b2615900>, <ast.Name object at 0x7da1b2614f40>]]] in starred[call[name[v].iteritems, parameter[]]] begin[:] call[name[params]][binary_operation[constant[%s.member.%d.%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b2616cb0>, <ast.Name object at 0x7da1b2616a40>, <ast.Name object at 0x7da1b26143d0>, <ast.Name object at 0x7da1b26172e0>]]]] assign[=] name[vv]
keyword[def] identifier[build_list_params] ( identifier[self] , identifier[params] , identifier[items] , identifier[label] ): literal[string] keyword[for] identifier[i] keyword[in] identifier[xrange] ( literal[int] , identifier[len] ( identifier[items] )+ literal[int] ): keyword[if] identifier[isinstance] ( identifier[items] [ identifier[i] - literal[int] ], identifier[dict] ): keyword[for] identifier[k] , identifier[v] keyword[in] identifier[items] [ identifier[i] - literal[int] ]. identifier[iteritems] (): keyword[if] identifier[isinstance] ( identifier[v] , identifier[dict] ): keyword[for] identifier[kk] , identifier[vv] keyword[in] identifier[v] . identifier[iteritems] (): identifier[params] [ literal[string] %( identifier[label] , identifier[i] , identifier[k] , identifier[kk] )]= identifier[vv] keyword[else] : identifier[params] [ literal[string] %( identifier[label] , identifier[i] , identifier[k] )]= identifier[v] keyword[elif] identifier[isinstance] ( identifier[items] [ identifier[i] - literal[int] ], identifier[basestring] ): identifier[params] [ literal[string] %( identifier[label] , identifier[i] )]= identifier[items] [ identifier[i] - literal[int] ]
def build_list_params(self, params, items, label): """ Items is a list of dictionaries or strings:: [ { 'Protocol' : 'HTTP', 'LoadBalancerPort' : '80', 'InstancePort' : '80' }, .. ] etc. or:: ['us-east-1b',...] """ # different from EC2 list params for i in xrange(1, len(items) + 1): if isinstance(items[i - 1], dict): for (k, v) in items[i - 1].iteritems(): if isinstance(v, dict): for (kk, vv) in v.iteritems(): params['%s.member.%d.%s.%s' % (label, i, k, kk)] = vv # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] else: params['%s.member.%d.%s' % (label, i, k)] = v # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] elif isinstance(items[i - 1], basestring): params['%s.member.%d' % (label, i)] = items[i - 1] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
def get_arguments(self): """ Gets the arguments from the command line. """ parser = argparse.ArgumentParser( description='Downloads images from given URL') parser.add_argument('url2scrape', nargs=1, help="URL to scrape") parser.add_argument('-m', '--max-images', type=int, default=None, help="Limit on number of images\n") parser.add_argument('-s', '--save-dir', type=str, default="images", help="Directory in which images should be saved") parser.add_argument('-g', '--injected', help="Scrape injected images", action="store_true") parser.add_argument('--proxy-server', type=str, default=None, help="Proxy server to use") parser.add_argument('--min-filesize', type=int, default=0, help="Limit on size of image in bytes") parser.add_argument('--max-filesize', type=int, default=100000000, help="Limit on size of image in bytes") parser.add_argument('--dump-urls', default=False, help="Print the URLs of the images", action="store_true") parser.add_argument('--formats', nargs="*", default=None, help="Specify formats in a list without any separator.\ This argument must be after the URL.") parser.add_argument('--scrape-reverse', default=False, help="Scrape the images in reverse order", action="store_true") parser.add_argument('--filename-pattern', type=str, default=None, help="Only scrape images with filenames that\ match the given regex pattern") parser.add_argument('--nthreads', type=int, default=10, help="The number of threads to use when downloading images.") args = parser.parse_args() self.url = args.url2scrape[0] if not re.match(r'^[a-zA-Z]+://', self.url): self.url = 'http://' + self.url self.no_to_download = args.max_images save_dir = args.save_dir + '_{uri.netloc}'.format( uri=urlparse(self.url)) if args.save_dir != "images": save_dir = args.save_dir self.download_path = os.path.join(os.getcwd(), save_dir) self.use_ghost = args.injected self.format_list = args.formats if args.formats else [ "jpg", "png", "gif", "svg", "jpeg"] self.min_filesize = args.min_filesize self.max_filesize = args.max_filesize self.dump_urls = args.dump_urls self.proxy_url = args.proxy_server self.proxies = {} if self.proxy_url: if not re.match(r'^[a-zA-Z]+://', self.proxy_url): self.proxy_url = 'http://' + self.proxy_url proxy_start_length = self.proxy_url.find("://") + 3 self.proxies = { self.proxy_url[:(proxy_start_length - 3)]: self.proxy_url } self.scrape_reverse = args.scrape_reverse self.filename_pattern = args.filename_pattern self.nthreads = args.nthreads return (self.url, self.no_to_download, self.format_list, self.download_path, self.min_filesize, self.max_filesize, self.dump_urls, self.scrape_reverse, self.use_ghost, self.filename_pattern)
def function[get_arguments, parameter[self]]: constant[ Gets the arguments from the command line. ] variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]] call[name[parser].add_argument, parameter[constant[url2scrape]]] call[name[parser].add_argument, parameter[constant[-m], constant[--max-images]]] call[name[parser].add_argument, parameter[constant[-s], constant[--save-dir]]] call[name[parser].add_argument, parameter[constant[-g], constant[--injected]]] call[name[parser].add_argument, parameter[constant[--proxy-server]]] call[name[parser].add_argument, parameter[constant[--min-filesize]]] call[name[parser].add_argument, parameter[constant[--max-filesize]]] call[name[parser].add_argument, parameter[constant[--dump-urls]]] call[name[parser].add_argument, parameter[constant[--formats]]] call[name[parser].add_argument, parameter[constant[--scrape-reverse]]] call[name[parser].add_argument, parameter[constant[--filename-pattern]]] call[name[parser].add_argument, parameter[constant[--nthreads]]] variable[args] assign[=] call[name[parser].parse_args, parameter[]] name[self].url assign[=] call[name[args].url2scrape][constant[0]] if <ast.UnaryOp object at 0x7da1b07966b0> begin[:] name[self].url assign[=] binary_operation[constant[http://] + name[self].url] name[self].no_to_download assign[=] name[args].max_images variable[save_dir] assign[=] binary_operation[name[args].save_dir + call[constant[_{uri.netloc}].format, parameter[]]] if compare[name[args].save_dir not_equal[!=] constant[images]] begin[:] variable[save_dir] assign[=] name[args].save_dir name[self].download_path assign[=] call[name[os].path.join, parameter[call[name[os].getcwd, parameter[]], name[save_dir]]] name[self].use_ghost assign[=] name[args].injected name[self].format_list assign[=] <ast.IfExp object at 0x7da1b07a8880> name[self].min_filesize assign[=] name[args].min_filesize name[self].max_filesize assign[=] name[args].max_filesize name[self].dump_urls assign[=] name[args].dump_urls name[self].proxy_url assign[=] name[args].proxy_server name[self].proxies assign[=] dictionary[[], []] if name[self].proxy_url begin[:] if <ast.UnaryOp object at 0x7da1b07a9270> begin[:] name[self].proxy_url assign[=] binary_operation[constant[http://] + name[self].proxy_url] variable[proxy_start_length] assign[=] binary_operation[call[name[self].proxy_url.find, parameter[constant[://]]] + constant[3]] name[self].proxies assign[=] dictionary[[<ast.Subscript object at 0x7da1b07ab160>], [<ast.Attribute object at 0x7da1b07a9ba0>]] name[self].scrape_reverse assign[=] name[args].scrape_reverse name[self].filename_pattern assign[=] name[args].filename_pattern name[self].nthreads assign[=] name[args].nthreads return[tuple[[<ast.Attribute object at 0x7da1b07aa320>, <ast.Attribute object at 0x7da1b07ab730>, <ast.Attribute object at 0x7da1b07a8d00>, <ast.Attribute object at 0x7da1b07aa770>, <ast.Attribute object at 0x7da1b07aa3b0>, <ast.Attribute object at 0x7da1b07a9090>, <ast.Attribute object at 0x7da1b07a97b0>, <ast.Attribute object at 0x7da1b07a9030>, <ast.Attribute object at 0x7da1b07ab5b0>, <ast.Attribute object at 0x7da1b07aa020>]]]
keyword[def] identifier[get_arguments] ( identifier[self] ): literal[string] identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ( identifier[description] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[int] , identifier[help] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[type] = identifier[int] , identifier[default] = keyword[None] , identifier[help] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[type] = identifier[str] , identifier[default] = literal[string] , identifier[help] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , literal[string] , identifier[help] = literal[string] , identifier[action] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[str] , identifier[default] = keyword[None] , identifier[help] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] , identifier[help] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] , identifier[help] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , identifier[default] = keyword[False] , identifier[help] = literal[string] , identifier[action] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[string] , identifier[default] = keyword[None] , identifier[help] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , identifier[default] = keyword[False] , identifier[help] = literal[string] , identifier[action] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[str] , identifier[default] = keyword[None] , identifier[help] = literal[string] ) identifier[parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[int] , identifier[default] = literal[int] , identifier[help] = literal[string] ) identifier[args] = identifier[parser] . identifier[parse_args] () identifier[self] . identifier[url] = identifier[args] . identifier[url2scrape] [ literal[int] ] keyword[if] keyword[not] identifier[re] . identifier[match] ( literal[string] , identifier[self] . identifier[url] ): identifier[self] . identifier[url] = literal[string] + identifier[self] . identifier[url] identifier[self] . identifier[no_to_download] = identifier[args] . identifier[max_images] identifier[save_dir] = identifier[args] . identifier[save_dir] + literal[string] . identifier[format] ( identifier[uri] = identifier[urlparse] ( identifier[self] . identifier[url] )) keyword[if] identifier[args] . identifier[save_dir] != literal[string] : identifier[save_dir] = identifier[args] . identifier[save_dir] identifier[self] . identifier[download_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[getcwd] (), identifier[save_dir] ) identifier[self] . identifier[use_ghost] = identifier[args] . identifier[injected] identifier[self] . identifier[format_list] = identifier[args] . identifier[formats] keyword[if] identifier[args] . identifier[formats] keyword[else] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] identifier[self] . identifier[min_filesize] = identifier[args] . identifier[min_filesize] identifier[self] . identifier[max_filesize] = identifier[args] . identifier[max_filesize] identifier[self] . identifier[dump_urls] = identifier[args] . identifier[dump_urls] identifier[self] . identifier[proxy_url] = identifier[args] . identifier[proxy_server] identifier[self] . identifier[proxies] ={} keyword[if] identifier[self] . identifier[proxy_url] : keyword[if] keyword[not] identifier[re] . identifier[match] ( literal[string] , identifier[self] . identifier[proxy_url] ): identifier[self] . identifier[proxy_url] = literal[string] + identifier[self] . identifier[proxy_url] identifier[proxy_start_length] = identifier[self] . identifier[proxy_url] . identifier[find] ( literal[string] )+ literal[int] identifier[self] . identifier[proxies] ={ identifier[self] . identifier[proxy_url] [:( identifier[proxy_start_length] - literal[int] )]: identifier[self] . identifier[proxy_url] } identifier[self] . identifier[scrape_reverse] = identifier[args] . identifier[scrape_reverse] identifier[self] . identifier[filename_pattern] = identifier[args] . identifier[filename_pattern] identifier[self] . identifier[nthreads] = identifier[args] . identifier[nthreads] keyword[return] ( identifier[self] . identifier[url] , identifier[self] . identifier[no_to_download] , identifier[self] . identifier[format_list] , identifier[self] . identifier[download_path] , identifier[self] . identifier[min_filesize] , identifier[self] . identifier[max_filesize] , identifier[self] . identifier[dump_urls] , identifier[self] . identifier[scrape_reverse] , identifier[self] . identifier[use_ghost] , identifier[self] . identifier[filename_pattern] )
def get_arguments(self): """ Gets the arguments from the command line. """ parser = argparse.ArgumentParser(description='Downloads images from given URL') parser.add_argument('url2scrape', nargs=1, help='URL to scrape') parser.add_argument('-m', '--max-images', type=int, default=None, help='Limit on number of images\n') parser.add_argument('-s', '--save-dir', type=str, default='images', help='Directory in which images should be saved') parser.add_argument('-g', '--injected', help='Scrape injected images', action='store_true') parser.add_argument('--proxy-server', type=str, default=None, help='Proxy server to use') parser.add_argument('--min-filesize', type=int, default=0, help='Limit on size of image in bytes') parser.add_argument('--max-filesize', type=int, default=100000000, help='Limit on size of image in bytes') parser.add_argument('--dump-urls', default=False, help='Print the URLs of the images', action='store_true') parser.add_argument('--formats', nargs='*', default=None, help='Specify formats in a list without any separator. This argument must be after the URL.') parser.add_argument('--scrape-reverse', default=False, help='Scrape the images in reverse order', action='store_true') parser.add_argument('--filename-pattern', type=str, default=None, help='Only scrape images with filenames that match the given regex pattern') parser.add_argument('--nthreads', type=int, default=10, help='The number of threads to use when downloading images.') args = parser.parse_args() self.url = args.url2scrape[0] if not re.match('^[a-zA-Z]+://', self.url): self.url = 'http://' + self.url # depends on [control=['if'], data=[]] self.no_to_download = args.max_images save_dir = args.save_dir + '_{uri.netloc}'.format(uri=urlparse(self.url)) if args.save_dir != 'images': save_dir = args.save_dir # depends on [control=['if'], data=[]] self.download_path = os.path.join(os.getcwd(), save_dir) self.use_ghost = args.injected self.format_list = args.formats if args.formats else ['jpg', 'png', 'gif', 'svg', 'jpeg'] self.min_filesize = args.min_filesize self.max_filesize = args.max_filesize self.dump_urls = args.dump_urls self.proxy_url = args.proxy_server self.proxies = {} if self.proxy_url: if not re.match('^[a-zA-Z]+://', self.proxy_url): self.proxy_url = 'http://' + self.proxy_url # depends on [control=['if'], data=[]] proxy_start_length = self.proxy_url.find('://') + 3 self.proxies = {self.proxy_url[:proxy_start_length - 3]: self.proxy_url} # depends on [control=['if'], data=[]] self.scrape_reverse = args.scrape_reverse self.filename_pattern = args.filename_pattern self.nthreads = args.nthreads return (self.url, self.no_to_download, self.format_list, self.download_path, self.min_filesize, self.max_filesize, self.dump_urls, self.scrape_reverse, self.use_ghost, self.filename_pattern)
def _clear(self): """ Clear the current image. """ self._plain_image = [" " * self._width for _ in range(self._height)] self._colour_map = [[(None, 0, 0) for _ in range(self._width)] for _ in range(self._height)]
def function[_clear, parameter[self]]: constant[ Clear the current image. ] name[self]._plain_image assign[=] <ast.ListComp object at 0x7da1b1d35b40> name[self]._colour_map assign[=] <ast.ListComp object at 0x7da1b1d37f70>
keyword[def] identifier[_clear] ( identifier[self] ): literal[string] identifier[self] . identifier[_plain_image] =[ literal[string] * identifier[self] . identifier[_width] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[self] . identifier[_height] )] identifier[self] . identifier[_colour_map] =[[( keyword[None] , literal[int] , literal[int] ) keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[self] . identifier[_width] )] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[self] . identifier[_height] )]
def _clear(self): """ Clear the current image. """ self._plain_image = [' ' * self._width for _ in range(self._height)] self._colour_map = [[(None, 0, 0) for _ in range(self._width)] for _ in range(self._height)]
def transform_32_33(inst, new_inst, i, n, offset, instructions, new_asm): """MAKEFUNCTION adds another const. probably MAKECLASS as well """ add_size = xdis.op_size(new_inst.opcode, opcode_33) if inst.opname in ('MAKE_FUNCTION','MAKE_CLOSURE'): # Previous instruction should be a load const which # contains the name of the function to call prev_inst = instructions[i-1] assert prev_inst.opname == 'LOAD_CONST' assert isinstance(prev_inst.arg, int) # Add the function name as an additional LOAD_CONST load_fn_const = Instruction() load_fn_const.opname = 'LOAD_CONST' load_fn_const.opcode = opcode_33.opmap['LOAD_CONST'] load_fn_const.line_no = None prev_const = new_asm.code.co_consts[prev_inst.arg] if hasattr(prev_const, 'co_name'): fn_name = new_asm.code.co_consts[prev_inst.arg].co_name else: fn_name = 'what-is-up' const_index = len(new_asm.code.co_consts) new_asm.code.co_consts = list(new_asm.code.co_consts) new_asm.code.co_consts.append(fn_name) load_fn_const.arg = const_index load_fn_const.offset = offset load_fn_const.starts_line = False load_fn_const.is_jump_target = False new_asm.code.instructions.append(load_fn_const) load_const_size = xdis.op_size(load_fn_const.opcode, opcode_33) add_size += load_const_size new_inst.offset = offset + add_size pass return add_size
def function[transform_32_33, parameter[inst, new_inst, i, n, offset, instructions, new_asm]]: constant[MAKEFUNCTION adds another const. probably MAKECLASS as well ] variable[add_size] assign[=] call[name[xdis].op_size, parameter[name[new_inst].opcode, name[opcode_33]]] if compare[name[inst].opname in tuple[[<ast.Constant object at 0x7da1b267bf10>, <ast.Constant object at 0x7da1b267b700>]]] begin[:] variable[prev_inst] assign[=] call[name[instructions]][binary_operation[name[i] - constant[1]]] assert[compare[name[prev_inst].opname equal[==] constant[LOAD_CONST]]] assert[call[name[isinstance], parameter[name[prev_inst].arg, name[int]]]] variable[load_fn_const] assign[=] call[name[Instruction], parameter[]] name[load_fn_const].opname assign[=] constant[LOAD_CONST] name[load_fn_const].opcode assign[=] call[name[opcode_33].opmap][constant[LOAD_CONST]] name[load_fn_const].line_no assign[=] constant[None] variable[prev_const] assign[=] call[name[new_asm].code.co_consts][name[prev_inst].arg] if call[name[hasattr], parameter[name[prev_const], constant[co_name]]] begin[:] variable[fn_name] assign[=] call[name[new_asm].code.co_consts][name[prev_inst].arg].co_name variable[const_index] assign[=] call[name[len], parameter[name[new_asm].code.co_consts]] name[new_asm].code.co_consts assign[=] call[name[list], parameter[name[new_asm].code.co_consts]] call[name[new_asm].code.co_consts.append, parameter[name[fn_name]]] name[load_fn_const].arg assign[=] name[const_index] name[load_fn_const].offset assign[=] name[offset] name[load_fn_const].starts_line assign[=] constant[False] name[load_fn_const].is_jump_target assign[=] constant[False] call[name[new_asm].code.instructions.append, parameter[name[load_fn_const]]] variable[load_const_size] assign[=] call[name[xdis].op_size, parameter[name[load_fn_const].opcode, name[opcode_33]]] <ast.AugAssign object at 0x7da1b267ae00> name[new_inst].offset assign[=] binary_operation[name[offset] + name[add_size]] pass return[name[add_size]]
keyword[def] identifier[transform_32_33] ( identifier[inst] , identifier[new_inst] , identifier[i] , identifier[n] , identifier[offset] , identifier[instructions] , identifier[new_asm] ): literal[string] identifier[add_size] = identifier[xdis] . identifier[op_size] ( identifier[new_inst] . identifier[opcode] , identifier[opcode_33] ) keyword[if] identifier[inst] . identifier[opname] keyword[in] ( literal[string] , literal[string] ): identifier[prev_inst] = identifier[instructions] [ identifier[i] - literal[int] ] keyword[assert] identifier[prev_inst] . identifier[opname] == literal[string] keyword[assert] identifier[isinstance] ( identifier[prev_inst] . identifier[arg] , identifier[int] ) identifier[load_fn_const] = identifier[Instruction] () identifier[load_fn_const] . identifier[opname] = literal[string] identifier[load_fn_const] . identifier[opcode] = identifier[opcode_33] . identifier[opmap] [ literal[string] ] identifier[load_fn_const] . identifier[line_no] = keyword[None] identifier[prev_const] = identifier[new_asm] . identifier[code] . identifier[co_consts] [ identifier[prev_inst] . identifier[arg] ] keyword[if] identifier[hasattr] ( identifier[prev_const] , literal[string] ): identifier[fn_name] = identifier[new_asm] . identifier[code] . identifier[co_consts] [ identifier[prev_inst] . identifier[arg] ]. identifier[co_name] keyword[else] : identifier[fn_name] = literal[string] identifier[const_index] = identifier[len] ( identifier[new_asm] . identifier[code] . identifier[co_consts] ) identifier[new_asm] . identifier[code] . identifier[co_consts] = identifier[list] ( identifier[new_asm] . identifier[code] . identifier[co_consts] ) identifier[new_asm] . identifier[code] . identifier[co_consts] . identifier[append] ( identifier[fn_name] ) identifier[load_fn_const] . identifier[arg] = identifier[const_index] identifier[load_fn_const] . identifier[offset] = identifier[offset] identifier[load_fn_const] . identifier[starts_line] = keyword[False] identifier[load_fn_const] . identifier[is_jump_target] = keyword[False] identifier[new_asm] . identifier[code] . identifier[instructions] . identifier[append] ( identifier[load_fn_const] ) identifier[load_const_size] = identifier[xdis] . identifier[op_size] ( identifier[load_fn_const] . identifier[opcode] , identifier[opcode_33] ) identifier[add_size] += identifier[load_const_size] identifier[new_inst] . identifier[offset] = identifier[offset] + identifier[add_size] keyword[pass] keyword[return] identifier[add_size]
def transform_32_33(inst, new_inst, i, n, offset, instructions, new_asm): """MAKEFUNCTION adds another const. probably MAKECLASS as well """ add_size = xdis.op_size(new_inst.opcode, opcode_33) if inst.opname in ('MAKE_FUNCTION', 'MAKE_CLOSURE'): # Previous instruction should be a load const which # contains the name of the function to call prev_inst = instructions[i - 1] assert prev_inst.opname == 'LOAD_CONST' assert isinstance(prev_inst.arg, int) # Add the function name as an additional LOAD_CONST load_fn_const = Instruction() load_fn_const.opname = 'LOAD_CONST' load_fn_const.opcode = opcode_33.opmap['LOAD_CONST'] load_fn_const.line_no = None prev_const = new_asm.code.co_consts[prev_inst.arg] if hasattr(prev_const, 'co_name'): fn_name = new_asm.code.co_consts[prev_inst.arg].co_name # depends on [control=['if'], data=[]] else: fn_name = 'what-is-up' const_index = len(new_asm.code.co_consts) new_asm.code.co_consts = list(new_asm.code.co_consts) new_asm.code.co_consts.append(fn_name) load_fn_const.arg = const_index load_fn_const.offset = offset load_fn_const.starts_line = False load_fn_const.is_jump_target = False new_asm.code.instructions.append(load_fn_const) load_const_size = xdis.op_size(load_fn_const.opcode, opcode_33) add_size += load_const_size new_inst.offset = offset + add_size pass # depends on [control=['if'], data=[]] return add_size
def validate_params(valid_options, params): """ Helps us validate the parameters for the request :param valid_options: a list of strings of valid options for the api request :param params: a dict, the key-value store which we really only care about the key which has tells us what the user is using for the API request :returns: None or throws an exception if the validation fails """ #crazy little if statement hanging by himself :( if not params: return #We only allow one version of the data parameter to be passed data_filter = ['data', 'source', 'external_url', 'embed'] multiple_data = [key for key in params.keys() if key in data_filter] if len(multiple_data) > 1: raise Exception("You can't mix and match data parameters") #No bad fields which are not in valid options can pass disallowed_fields = [key for key in params.keys() if key not in valid_options] if disallowed_fields: field_strings = ",".join(disallowed_fields) raise Exception("{} are not allowed fields".format(field_strings))
def function[validate_params, parameter[valid_options, params]]: constant[ Helps us validate the parameters for the request :param valid_options: a list of strings of valid options for the api request :param params: a dict, the key-value store which we really only care about the key which has tells us what the user is using for the API request :returns: None or throws an exception if the validation fails ] if <ast.UnaryOp object at 0x7da18fe93b20> begin[:] return[None] variable[data_filter] assign[=] list[[<ast.Constant object at 0x7da18fe93580>, <ast.Constant object at 0x7da18fe91180>, <ast.Constant object at 0x7da18fe90ac0>, <ast.Constant object at 0x7da18fe92da0>]] variable[multiple_data] assign[=] <ast.ListComp object at 0x7da18fe90610> if compare[call[name[len], parameter[name[multiple_data]]] greater[>] constant[1]] begin[:] <ast.Raise object at 0x7da18fe92740> variable[disallowed_fields] assign[=] <ast.ListComp object at 0x7da18fe93310> if name[disallowed_fields] begin[:] variable[field_strings] assign[=] call[constant[,].join, parameter[name[disallowed_fields]]] <ast.Raise object at 0x7da18fe91cc0>
keyword[def] identifier[validate_params] ( identifier[valid_options] , identifier[params] ): literal[string] keyword[if] keyword[not] identifier[params] : keyword[return] identifier[data_filter] =[ literal[string] , literal[string] , literal[string] , literal[string] ] identifier[multiple_data] =[ identifier[key] keyword[for] identifier[key] keyword[in] identifier[params] . identifier[keys] () keyword[if] identifier[key] keyword[in] identifier[data_filter] ] keyword[if] identifier[len] ( identifier[multiple_data] )> literal[int] : keyword[raise] identifier[Exception] ( literal[string] ) identifier[disallowed_fields] =[ identifier[key] keyword[for] identifier[key] keyword[in] identifier[params] . identifier[keys] () keyword[if] identifier[key] keyword[not] keyword[in] identifier[valid_options] ] keyword[if] identifier[disallowed_fields] : identifier[field_strings] = literal[string] . identifier[join] ( identifier[disallowed_fields] ) keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[field_strings] ))
def validate_params(valid_options, params): """ Helps us validate the parameters for the request :param valid_options: a list of strings of valid options for the api request :param params: a dict, the key-value store which we really only care about the key which has tells us what the user is using for the API request :returns: None or throws an exception if the validation fails """ #crazy little if statement hanging by himself :( if not params: return # depends on [control=['if'], data=[]] #We only allow one version of the data parameter to be passed data_filter = ['data', 'source', 'external_url', 'embed'] multiple_data = [key for key in params.keys() if key in data_filter] if len(multiple_data) > 1: raise Exception("You can't mix and match data parameters") # depends on [control=['if'], data=[]] #No bad fields which are not in valid options can pass disallowed_fields = [key for key in params.keys() if key not in valid_options] if disallowed_fields: field_strings = ','.join(disallowed_fields) raise Exception('{} are not allowed fields'.format(field_strings)) # depends on [control=['if'], data=[]]
def parameters(self, namespaced=False): """returns the exception varlink error parameters""" if namespaced: return json.loads(json.dumps(self.args[0]['parameters']), object_hook=lambda d: SimpleNamespace(**d)) else: return self.args[0].get('parameters')
def function[parameters, parameter[self, namespaced]]: constant[returns the exception varlink error parameters] if name[namespaced] begin[:] return[call[name[json].loads, parameter[call[name[json].dumps, parameter[call[call[name[self].args][constant[0]]][constant[parameters]]]]]]]
keyword[def] identifier[parameters] ( identifier[self] , identifier[namespaced] = keyword[False] ): literal[string] keyword[if] identifier[namespaced] : keyword[return] identifier[json] . identifier[loads] ( identifier[json] . identifier[dumps] ( identifier[self] . identifier[args] [ literal[int] ][ literal[string] ]), identifier[object_hook] = keyword[lambda] identifier[d] : identifier[SimpleNamespace] (** identifier[d] )) keyword[else] : keyword[return] identifier[self] . identifier[args] [ literal[int] ]. identifier[get] ( literal[string] )
def parameters(self, namespaced=False): """returns the exception varlink error parameters""" if namespaced: return json.loads(json.dumps(self.args[0]['parameters']), object_hook=lambda d: SimpleNamespace(**d)) # depends on [control=['if'], data=[]] else: return self.args[0].get('parameters')
def insert_entry(self, entry, taxids): """Insert UniProt entry" :param entry: XML node entry :param taxids: Optional[iter[int]] taxids: NCBI taxonomy IDs """ entry_dict = entry.attrib entry_dict['created'] = datetime.strptime(entry_dict['created'], '%Y-%m-%d') entry_dict['modified'] = datetime.strptime(entry_dict['modified'], '%Y-%m-%d') taxid = self.get_taxid(entry) if taxids is None or taxid in taxids: entry_dict = self.update_entry_dict(entry, entry_dict, taxid) entry_obj = models.Entry(**entry_dict) del entry_dict self.session.add(entry_obj)
def function[insert_entry, parameter[self, entry, taxids]]: constant[Insert UniProt entry" :param entry: XML node entry :param taxids: Optional[iter[int]] taxids: NCBI taxonomy IDs ] variable[entry_dict] assign[=] name[entry].attrib call[name[entry_dict]][constant[created]] assign[=] call[name[datetime].strptime, parameter[call[name[entry_dict]][constant[created]], constant[%Y-%m-%d]]] call[name[entry_dict]][constant[modified]] assign[=] call[name[datetime].strptime, parameter[call[name[entry_dict]][constant[modified]], constant[%Y-%m-%d]]] variable[taxid] assign[=] call[name[self].get_taxid, parameter[name[entry]]] if <ast.BoolOp object at 0x7da1b0c65030> begin[:] variable[entry_dict] assign[=] call[name[self].update_entry_dict, parameter[name[entry], name[entry_dict], name[taxid]]] variable[entry_obj] assign[=] call[name[models].Entry, parameter[]] <ast.Delete object at 0x7da1b0c65600> call[name[self].session.add, parameter[name[entry_obj]]]
keyword[def] identifier[insert_entry] ( identifier[self] , identifier[entry] , identifier[taxids] ): literal[string] identifier[entry_dict] = identifier[entry] . identifier[attrib] identifier[entry_dict] [ literal[string] ]= identifier[datetime] . identifier[strptime] ( identifier[entry_dict] [ literal[string] ], literal[string] ) identifier[entry_dict] [ literal[string] ]= identifier[datetime] . identifier[strptime] ( identifier[entry_dict] [ literal[string] ], literal[string] ) identifier[taxid] = identifier[self] . identifier[get_taxid] ( identifier[entry] ) keyword[if] identifier[taxids] keyword[is] keyword[None] keyword[or] identifier[taxid] keyword[in] identifier[taxids] : identifier[entry_dict] = identifier[self] . identifier[update_entry_dict] ( identifier[entry] , identifier[entry_dict] , identifier[taxid] ) identifier[entry_obj] = identifier[models] . identifier[Entry] (** identifier[entry_dict] ) keyword[del] identifier[entry_dict] identifier[self] . identifier[session] . identifier[add] ( identifier[entry_obj] )
def insert_entry(self, entry, taxids): """Insert UniProt entry" :param entry: XML node entry :param taxids: Optional[iter[int]] taxids: NCBI taxonomy IDs """ entry_dict = entry.attrib entry_dict['created'] = datetime.strptime(entry_dict['created'], '%Y-%m-%d') entry_dict['modified'] = datetime.strptime(entry_dict['modified'], '%Y-%m-%d') taxid = self.get_taxid(entry) if taxids is None or taxid in taxids: entry_dict = self.update_entry_dict(entry, entry_dict, taxid) entry_obj = models.Entry(**entry_dict) del entry_dict self.session.add(entry_obj) # depends on [control=['if'], data=[]]
def _lookup_by_mapping(): """Return a the init system based on a constant mapping of distribution+version to init system.. See constants.py for the mapping. A failover of the version is proposed for when no version is supplied. For instance, Arch Linux's version will most probably be "rolling" at any given time, which means that the init system cannot be idenfied by the version of the distro. On top of trying to identify by the distro's ID, if /etc/os-release contains an "ID_LIKE" field, it will be tried. That, again is true for Arch where the distro's ID changes (Manjaro, Antergos, etc...) But the "ID_LIKE" field is always (?) `arch`. """ like = distro.like().lower() distribution_id = distro.id().lower() version = distro.major_version() if 'arch' in (distribution_id, like): version = 'any' init_sys = constants.DIST_TO_INITSYS.get( distribution_id, constants.DIST_TO_INITSYS.get(like)) if init_sys: system = init_sys.get(version) return [system] if system else []
def function[_lookup_by_mapping, parameter[]]: constant[Return a the init system based on a constant mapping of distribution+version to init system.. See constants.py for the mapping. A failover of the version is proposed for when no version is supplied. For instance, Arch Linux's version will most probably be "rolling" at any given time, which means that the init system cannot be idenfied by the version of the distro. On top of trying to identify by the distro's ID, if /etc/os-release contains an "ID_LIKE" field, it will be tried. That, again is true for Arch where the distro's ID changes (Manjaro, Antergos, etc...) But the "ID_LIKE" field is always (?) `arch`. ] variable[like] assign[=] call[call[name[distro].like, parameter[]].lower, parameter[]] variable[distribution_id] assign[=] call[call[name[distro].id, parameter[]].lower, parameter[]] variable[version] assign[=] call[name[distro].major_version, parameter[]] if compare[constant[arch] in tuple[[<ast.Name object at 0x7da18f722fb0>, <ast.Name object at 0x7da18f7230a0>]]] begin[:] variable[version] assign[=] constant[any] variable[init_sys] assign[=] call[name[constants].DIST_TO_INITSYS.get, parameter[name[distribution_id], call[name[constants].DIST_TO_INITSYS.get, parameter[name[like]]]]] if name[init_sys] begin[:] variable[system] assign[=] call[name[init_sys].get, parameter[name[version]]] return[<ast.IfExp object at 0x7da18f722020>]
keyword[def] identifier[_lookup_by_mapping] (): literal[string] identifier[like] = identifier[distro] . identifier[like] (). identifier[lower] () identifier[distribution_id] = identifier[distro] . identifier[id] (). identifier[lower] () identifier[version] = identifier[distro] . identifier[major_version] () keyword[if] literal[string] keyword[in] ( identifier[distribution_id] , identifier[like] ): identifier[version] = literal[string] identifier[init_sys] = identifier[constants] . identifier[DIST_TO_INITSYS] . identifier[get] ( identifier[distribution_id] , identifier[constants] . identifier[DIST_TO_INITSYS] . identifier[get] ( identifier[like] )) keyword[if] identifier[init_sys] : identifier[system] = identifier[init_sys] . identifier[get] ( identifier[version] ) keyword[return] [ identifier[system] ] keyword[if] identifier[system] keyword[else] []
def _lookup_by_mapping(): """Return a the init system based on a constant mapping of distribution+version to init system.. See constants.py for the mapping. A failover of the version is proposed for when no version is supplied. For instance, Arch Linux's version will most probably be "rolling" at any given time, which means that the init system cannot be idenfied by the version of the distro. On top of trying to identify by the distro's ID, if /etc/os-release contains an "ID_LIKE" field, it will be tried. That, again is true for Arch where the distro's ID changes (Manjaro, Antergos, etc...) But the "ID_LIKE" field is always (?) `arch`. """ like = distro.like().lower() distribution_id = distro.id().lower() version = distro.major_version() if 'arch' in (distribution_id, like): version = 'any' # depends on [control=['if'], data=[]] init_sys = constants.DIST_TO_INITSYS.get(distribution_id, constants.DIST_TO_INITSYS.get(like)) if init_sys: system = init_sys.get(version) return [system] if system else [] # depends on [control=['if'], data=[]]
def info(self, **kwargs): """ Get the system wide configuration info. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('info') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
def function[info, parameter[self]]: constant[ Get the system wide configuration info. Returns: A dict respresentation of the JSON returned from the API. ] variable[path] assign[=] call[name[self]._get_path, parameter[constant[info]]] variable[response] assign[=] call[name[self]._GET, parameter[name[path], name[kwargs]]] call[name[self]._set_attrs_to_values, parameter[name[response]]] return[name[response]]
keyword[def] identifier[info] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[path] = identifier[self] . identifier[_get_path] ( literal[string] ) identifier[response] = identifier[self] . identifier[_GET] ( identifier[path] , identifier[kwargs] ) identifier[self] . identifier[_set_attrs_to_values] ( identifier[response] ) keyword[return] identifier[response]
def info(self, **kwargs): """ Get the system wide configuration info. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('info') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
def update_count(self, name, incr_by=1, key=None): """Update the value of CountMetric or MultiCountMetric :type name: str :param name: name of the registered metric to be updated. :type incr_by: int :param incr_by: specifies how much to increment. Default is 1. :type key: str or None :param key: specifies a key for MultiCountMetric. Needs to be `None` for updating CountMetric. """ if name not in self.metrics: Log.error("In update_count(): %s is not registered in the metric", name) if key is None and isinstance(self.metrics[name], CountMetric): self.metrics[name].incr(incr_by) elif key is not None and isinstance(self.metrics[name], MultiCountMetric): self.metrics[name].incr(key, incr_by) else: Log.error("In update_count(): %s is registered but not supported with this method", name)
def function[update_count, parameter[self, name, incr_by, key]]: constant[Update the value of CountMetric or MultiCountMetric :type name: str :param name: name of the registered metric to be updated. :type incr_by: int :param incr_by: specifies how much to increment. Default is 1. :type key: str or None :param key: specifies a key for MultiCountMetric. Needs to be `None` for updating CountMetric. ] if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self].metrics] begin[:] call[name[Log].error, parameter[constant[In update_count(): %s is not registered in the metric], name[name]]] if <ast.BoolOp object at 0x7da18ede4f10> begin[:] call[call[name[self].metrics][name[name]].incr, parameter[name[incr_by]]]
keyword[def] identifier[update_count] ( identifier[self] , identifier[name] , identifier[incr_by] = literal[int] , identifier[key] = keyword[None] ): literal[string] keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[metrics] : identifier[Log] . identifier[error] ( literal[string] , identifier[name] ) keyword[if] identifier[key] keyword[is] keyword[None] keyword[and] identifier[isinstance] ( identifier[self] . identifier[metrics] [ identifier[name] ], identifier[CountMetric] ): identifier[self] . identifier[metrics] [ identifier[name] ]. identifier[incr] ( identifier[incr_by] ) keyword[elif] identifier[key] keyword[is] keyword[not] keyword[None] keyword[and] identifier[isinstance] ( identifier[self] . identifier[metrics] [ identifier[name] ], identifier[MultiCountMetric] ): identifier[self] . identifier[metrics] [ identifier[name] ]. identifier[incr] ( identifier[key] , identifier[incr_by] ) keyword[else] : identifier[Log] . identifier[error] ( literal[string] , identifier[name] )
def update_count(self, name, incr_by=1, key=None): """Update the value of CountMetric or MultiCountMetric :type name: str :param name: name of the registered metric to be updated. :type incr_by: int :param incr_by: specifies how much to increment. Default is 1. :type key: str or None :param key: specifies a key for MultiCountMetric. Needs to be `None` for updating CountMetric. """ if name not in self.metrics: Log.error('In update_count(): %s is not registered in the metric', name) # depends on [control=['if'], data=['name']] if key is None and isinstance(self.metrics[name], CountMetric): self.metrics[name].incr(incr_by) # depends on [control=['if'], data=[]] elif key is not None and isinstance(self.metrics[name], MultiCountMetric): self.metrics[name].incr(key, incr_by) # depends on [control=['if'], data=[]] else: Log.error('In update_count(): %s is registered but not supported with this method', name)
def geometry_linestring(lat, lon, elev): """ GeoJSON Linestring. Latitude and Longitude have 2 values each. :param list lat: Latitude values :param list lon: Longitude values :return dict: """ logger_excel.info("enter geometry_linestring") d = OrderedDict() coordinates = [] temp = ["", ""] # Point type, Matching pairs. if lat[0] == lat[1] and lon[0] == lon[1]: logger_excel.info("matching geo coordinate") lat.pop() lon.pop() d = geometry_point(lat, lon, elev) else: # Creates coordinates list logger_excel.info("unique geo coordinates") for i in lon: temp[0] = i for j in lat: temp[1] = j coordinates.append(copy.copy(temp)) if elev: for i in coordinates: i.append(elev) # Create geometry block d['type'] = 'Linestring' d['coordinates'] = coordinates logger_excel.info("exit geometry_linestring") return d
def function[geometry_linestring, parameter[lat, lon, elev]]: constant[ GeoJSON Linestring. Latitude and Longitude have 2 values each. :param list lat: Latitude values :param list lon: Longitude values :return dict: ] call[name[logger_excel].info, parameter[constant[enter geometry_linestring]]] variable[d] assign[=] call[name[OrderedDict], parameter[]] variable[coordinates] assign[=] list[[]] variable[temp] assign[=] list[[<ast.Constant object at 0x7da18fe91fc0>, <ast.Constant object at 0x7da18fe90e20>]] if <ast.BoolOp object at 0x7da18fe93e20> begin[:] call[name[logger_excel].info, parameter[constant[matching geo coordinate]]] call[name[lat].pop, parameter[]] call[name[lon].pop, parameter[]] variable[d] assign[=] call[name[geometry_point], parameter[name[lat], name[lon], name[elev]]] call[name[logger_excel].info, parameter[constant[exit geometry_linestring]]] return[name[d]]
keyword[def] identifier[geometry_linestring] ( identifier[lat] , identifier[lon] , identifier[elev] ): literal[string] identifier[logger_excel] . identifier[info] ( literal[string] ) identifier[d] = identifier[OrderedDict] () identifier[coordinates] =[] identifier[temp] =[ literal[string] , literal[string] ] keyword[if] identifier[lat] [ literal[int] ]== identifier[lat] [ literal[int] ] keyword[and] identifier[lon] [ literal[int] ]== identifier[lon] [ literal[int] ]: identifier[logger_excel] . identifier[info] ( literal[string] ) identifier[lat] . identifier[pop] () identifier[lon] . identifier[pop] () identifier[d] = identifier[geometry_point] ( identifier[lat] , identifier[lon] , identifier[elev] ) keyword[else] : identifier[logger_excel] . identifier[info] ( literal[string] ) keyword[for] identifier[i] keyword[in] identifier[lon] : identifier[temp] [ literal[int] ]= identifier[i] keyword[for] identifier[j] keyword[in] identifier[lat] : identifier[temp] [ literal[int] ]= identifier[j] identifier[coordinates] . identifier[append] ( identifier[copy] . identifier[copy] ( identifier[temp] )) keyword[if] identifier[elev] : keyword[for] identifier[i] keyword[in] identifier[coordinates] : identifier[i] . identifier[append] ( identifier[elev] ) identifier[d] [ literal[string] ]= literal[string] identifier[d] [ literal[string] ]= identifier[coordinates] identifier[logger_excel] . identifier[info] ( literal[string] ) keyword[return] identifier[d]
def geometry_linestring(lat, lon, elev): """ GeoJSON Linestring. Latitude and Longitude have 2 values each. :param list lat: Latitude values :param list lon: Longitude values :return dict: """ logger_excel.info('enter geometry_linestring') d = OrderedDict() coordinates = [] temp = ['', ''] # Point type, Matching pairs. if lat[0] == lat[1] and lon[0] == lon[1]: logger_excel.info('matching geo coordinate') lat.pop() lon.pop() d = geometry_point(lat, lon, elev) # depends on [control=['if'], data=[]] else: # Creates coordinates list logger_excel.info('unique geo coordinates') for i in lon: temp[0] = i for j in lat: temp[1] = j coordinates.append(copy.copy(temp)) # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']] if elev: for i in coordinates: i.append(elev) # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]] # Create geometry block d['type'] = 'Linestring' d['coordinates'] = coordinates logger_excel.info('exit geometry_linestring') return d
def triangulize(image, tile_size): """Processes the given image by breaking it down into tiles of the given size and applying a triangular effect to each tile. Returns the processed image as a PIL Image object. The image can be given as anything suitable for passing to `Image.open` (ie, the path to an image or as a file-like object containing image data). If tile_size is 0, the tile size will be guessed based on the image size. It will also be adjusted to be divisible by 2 if it is not already. """ if isinstance(image, basestring) or hasattr(image, 'read'): image = Image.open(image) assert isinstance(tile_size, int) # Make sure we have a usable tile size, by guessing based on image size # and making sure it's a multiple of two. if tile_size == 0: tile_size = guess_tile_size(image) if tile_size % 2 != 0: tile_size = (tile_size / 2) * 2 logging.info('Input image size: %r', image.size) logging.info('Tile size: %r', tile_size) # Preprocess image to make sure it's at a size we can handle image = prep_image(image, tile_size) logging.info('Prepped image size: %r', image.size) # Get pixmap (for direct pixel access) and draw objects for the image. pix = image.load() draw = ImageDraw.Draw(image) # Process the image, tile by tile for x, y in iter_tiles(image, tile_size): process_tile(x, y, tile_size, pix, draw, image) return image
def function[triangulize, parameter[image, tile_size]]: constant[Processes the given image by breaking it down into tiles of the given size and applying a triangular effect to each tile. Returns the processed image as a PIL Image object. The image can be given as anything suitable for passing to `Image.open` (ie, the path to an image or as a file-like object containing image data). If tile_size is 0, the tile size will be guessed based on the image size. It will also be adjusted to be divisible by 2 if it is not already. ] if <ast.BoolOp object at 0x7da18bcc9660> begin[:] variable[image] assign[=] call[name[Image].open, parameter[name[image]]] assert[call[name[isinstance], parameter[name[tile_size], name[int]]]] if compare[name[tile_size] equal[==] constant[0]] begin[:] variable[tile_size] assign[=] call[name[guess_tile_size], parameter[name[image]]] if compare[binary_operation[name[tile_size] <ast.Mod object at 0x7da2590d6920> constant[2]] not_equal[!=] constant[0]] begin[:] variable[tile_size] assign[=] binary_operation[binary_operation[name[tile_size] / constant[2]] * constant[2]] call[name[logging].info, parameter[constant[Input image size: %r], name[image].size]] call[name[logging].info, parameter[constant[Tile size: %r], name[tile_size]]] variable[image] assign[=] call[name[prep_image], parameter[name[image], name[tile_size]]] call[name[logging].info, parameter[constant[Prepped image size: %r], name[image].size]] variable[pix] assign[=] call[name[image].load, parameter[]] variable[draw] assign[=] call[name[ImageDraw].Draw, parameter[name[image]]] for taget[tuple[[<ast.Name object at 0x7da207f00c70>, <ast.Name object at 0x7da207f00eb0>]]] in starred[call[name[iter_tiles], parameter[name[image], name[tile_size]]]] begin[:] call[name[process_tile], parameter[name[x], name[y], name[tile_size], name[pix], name[draw], name[image]]] return[name[image]]
keyword[def] identifier[triangulize] ( identifier[image] , identifier[tile_size] ): literal[string] keyword[if] identifier[isinstance] ( identifier[image] , identifier[basestring] ) keyword[or] identifier[hasattr] ( identifier[image] , literal[string] ): identifier[image] = identifier[Image] . identifier[open] ( identifier[image] ) keyword[assert] identifier[isinstance] ( identifier[tile_size] , identifier[int] ) keyword[if] identifier[tile_size] == literal[int] : identifier[tile_size] = identifier[guess_tile_size] ( identifier[image] ) keyword[if] identifier[tile_size] % literal[int] != literal[int] : identifier[tile_size] =( identifier[tile_size] / literal[int] )* literal[int] identifier[logging] . identifier[info] ( literal[string] , identifier[image] . identifier[size] ) identifier[logging] . identifier[info] ( literal[string] , identifier[tile_size] ) identifier[image] = identifier[prep_image] ( identifier[image] , identifier[tile_size] ) identifier[logging] . identifier[info] ( literal[string] , identifier[image] . identifier[size] ) identifier[pix] = identifier[image] . identifier[load] () identifier[draw] = identifier[ImageDraw] . identifier[Draw] ( identifier[image] ) keyword[for] identifier[x] , identifier[y] keyword[in] identifier[iter_tiles] ( identifier[image] , identifier[tile_size] ): identifier[process_tile] ( identifier[x] , identifier[y] , identifier[tile_size] , identifier[pix] , identifier[draw] , identifier[image] ) keyword[return] identifier[image]
def triangulize(image, tile_size): """Processes the given image by breaking it down into tiles of the given size and applying a triangular effect to each tile. Returns the processed image as a PIL Image object. The image can be given as anything suitable for passing to `Image.open` (ie, the path to an image or as a file-like object containing image data). If tile_size is 0, the tile size will be guessed based on the image size. It will also be adjusted to be divisible by 2 if it is not already. """ if isinstance(image, basestring) or hasattr(image, 'read'): image = Image.open(image) # depends on [control=['if'], data=[]] assert isinstance(tile_size, int) # Make sure we have a usable tile size, by guessing based on image size # and making sure it's a multiple of two. if tile_size == 0: tile_size = guess_tile_size(image) # depends on [control=['if'], data=['tile_size']] if tile_size % 2 != 0: tile_size = tile_size / 2 * 2 # depends on [control=['if'], data=[]] logging.info('Input image size: %r', image.size) logging.info('Tile size: %r', tile_size) # Preprocess image to make sure it's at a size we can handle image = prep_image(image, tile_size) logging.info('Prepped image size: %r', image.size) # Get pixmap (for direct pixel access) and draw objects for the image. pix = image.load() draw = ImageDraw.Draw(image) # Process the image, tile by tile for (x, y) in iter_tiles(image, tile_size): process_tile(x, y, tile_size, pix, draw, image) # depends on [control=['for'], data=[]] return image
def ttl(self): """LeaseTimeToLive retrieves lease information. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. :return: """ result = self.client.post(self.client.get_url("/kv/lease/timetolive"), json={"ID": self.id}) return int(result['TTL'])
def function[ttl, parameter[self]]: constant[LeaseTimeToLive retrieves lease information. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. :return: ] variable[result] assign[=] call[name[self].client.post, parameter[call[name[self].client.get_url, parameter[constant[/kv/lease/timetolive]]]]] return[call[name[int], parameter[call[name[result]][constant[TTL]]]]]
keyword[def] identifier[ttl] ( identifier[self] ): literal[string] identifier[result] = identifier[self] . identifier[client] . identifier[post] ( identifier[self] . identifier[client] . identifier[get_url] ( literal[string] ), identifier[json] ={ literal[string] : identifier[self] . identifier[id] }) keyword[return] identifier[int] ( identifier[result] [ literal[string] ])
def ttl(self): """LeaseTimeToLive retrieves lease information. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. :return: """ result = self.client.post(self.client.get_url('/kv/lease/timetolive'), json={'ID': self.id}) return int(result['TTL'])
def chunks_of(max_chunk_size, list_to_chunk): """ Yields the list with a max size of max_chunk_size """ for i in range(0, len(list_to_chunk), max_chunk_size): yield list_to_chunk[i:i + max_chunk_size]
def function[chunks_of, parameter[max_chunk_size, list_to_chunk]]: constant[ Yields the list with a max size of max_chunk_size ] for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[list_to_chunk]]], name[max_chunk_size]]]] begin[:] <ast.Yield object at 0x7da1b28fd450>
keyword[def] identifier[chunks_of] ( identifier[max_chunk_size] , identifier[list_to_chunk] ): literal[string] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[list_to_chunk] ), identifier[max_chunk_size] ): keyword[yield] identifier[list_to_chunk] [ identifier[i] : identifier[i] + identifier[max_chunk_size] ]
def chunks_of(max_chunk_size, list_to_chunk): """ Yields the list with a max size of max_chunk_size """ for i in range(0, len(list_to_chunk), max_chunk_size): yield list_to_chunk[i:i + max_chunk_size] # depends on [control=['for'], data=['i']]
def stop_pipeline(url, pipeline_id, auth, verify_ssl): """Stop a running pipeline. The API waits for the pipeline to be 'STOPPED' before returning. Args: url (str): the host url in the form 'http://host:port/'. pipeline_id (str): the ID of of the exported pipeline. auth (tuple): a tuple of username, and password. verify_ssl (bool): whether to verify ssl certificates Returns: dict: the response json """ stop_result = requests.post(url + '/' + pipeline_id + '/stop', headers=X_REQ_BY, auth=auth, verify=verify_ssl) stop_result.raise_for_status() logging.info("Pipeline stop requested.") poll_pipeline_status(STATUS_STOPPED, url, pipeline_id, auth, verify_ssl) logging.info('Pipeline stopped.') return stop_result.json()
def function[stop_pipeline, parameter[url, pipeline_id, auth, verify_ssl]]: constant[Stop a running pipeline. The API waits for the pipeline to be 'STOPPED' before returning. Args: url (str): the host url in the form 'http://host:port/'. pipeline_id (str): the ID of of the exported pipeline. auth (tuple): a tuple of username, and password. verify_ssl (bool): whether to verify ssl certificates Returns: dict: the response json ] variable[stop_result] assign[=] call[name[requests].post, parameter[binary_operation[binary_operation[binary_operation[name[url] + constant[/]] + name[pipeline_id]] + constant[/stop]]]] call[name[stop_result].raise_for_status, parameter[]] call[name[logging].info, parameter[constant[Pipeline stop requested.]]] call[name[poll_pipeline_status], parameter[name[STATUS_STOPPED], name[url], name[pipeline_id], name[auth], name[verify_ssl]]] call[name[logging].info, parameter[constant[Pipeline stopped.]]] return[call[name[stop_result].json, parameter[]]]
keyword[def] identifier[stop_pipeline] ( identifier[url] , identifier[pipeline_id] , identifier[auth] , identifier[verify_ssl] ): literal[string] identifier[stop_result] = identifier[requests] . identifier[post] ( identifier[url] + literal[string] + identifier[pipeline_id] + literal[string] , identifier[headers] = identifier[X_REQ_BY] , identifier[auth] = identifier[auth] , identifier[verify] = identifier[verify_ssl] ) identifier[stop_result] . identifier[raise_for_status] () identifier[logging] . identifier[info] ( literal[string] ) identifier[poll_pipeline_status] ( identifier[STATUS_STOPPED] , identifier[url] , identifier[pipeline_id] , identifier[auth] , identifier[verify_ssl] ) identifier[logging] . identifier[info] ( literal[string] ) keyword[return] identifier[stop_result] . identifier[json] ()
def stop_pipeline(url, pipeline_id, auth, verify_ssl): """Stop a running pipeline. The API waits for the pipeline to be 'STOPPED' before returning. Args: url (str): the host url in the form 'http://host:port/'. pipeline_id (str): the ID of of the exported pipeline. auth (tuple): a tuple of username, and password. verify_ssl (bool): whether to verify ssl certificates Returns: dict: the response json """ stop_result = requests.post(url + '/' + pipeline_id + '/stop', headers=X_REQ_BY, auth=auth, verify=verify_ssl) stop_result.raise_for_status() logging.info('Pipeline stop requested.') poll_pipeline_status(STATUS_STOPPED, url, pipeline_id, auth, verify_ssl) logging.info('Pipeline stopped.') return stop_result.json()
def add_asset_hashes(self, path='dist/assets'): """ Scan through a directory and add hashes for each file found. """ for fullpath in _listfiles(os.path.join(self.root_path, path)): relpath = fullpath.replace(self.root_path + '/' + path + '/', '') md5sum = hashlib.md5(open(fullpath, 'rb').read()).hexdigest() LOG.debug('MD5 of %s (%s): %s', fullpath, relpath, md5sum) self.asset_hash[relpath] = md5sum
def function[add_asset_hashes, parameter[self, path]]: constant[ Scan through a directory and add hashes for each file found. ] for taget[name[fullpath]] in starred[call[name[_listfiles], parameter[call[name[os].path.join, parameter[name[self].root_path, name[path]]]]]] begin[:] variable[relpath] assign[=] call[name[fullpath].replace, parameter[binary_operation[binary_operation[binary_operation[name[self].root_path + constant[/]] + name[path]] + constant[/]], constant[]]] variable[md5sum] assign[=] call[call[name[hashlib].md5, parameter[call[call[name[open], parameter[name[fullpath], constant[rb]]].read, parameter[]]]].hexdigest, parameter[]] call[name[LOG].debug, parameter[constant[MD5 of %s (%s): %s], name[fullpath], name[relpath], name[md5sum]]] call[name[self].asset_hash][name[relpath]] assign[=] name[md5sum]
keyword[def] identifier[add_asset_hashes] ( identifier[self] , identifier[path] = literal[string] ): literal[string] keyword[for] identifier[fullpath] keyword[in] identifier[_listfiles] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[root_path] , identifier[path] )): identifier[relpath] = identifier[fullpath] . identifier[replace] ( identifier[self] . identifier[root_path] + literal[string] + identifier[path] + literal[string] , literal[string] ) identifier[md5sum] = identifier[hashlib] . identifier[md5] ( identifier[open] ( identifier[fullpath] , literal[string] ). identifier[read] ()). identifier[hexdigest] () identifier[LOG] . identifier[debug] ( literal[string] , identifier[fullpath] , identifier[relpath] , identifier[md5sum] ) identifier[self] . identifier[asset_hash] [ identifier[relpath] ]= identifier[md5sum]
def add_asset_hashes(self, path='dist/assets'): """ Scan through a directory and add hashes for each file found. """ for fullpath in _listfiles(os.path.join(self.root_path, path)): relpath = fullpath.replace(self.root_path + '/' + path + '/', '') md5sum = hashlib.md5(open(fullpath, 'rb').read()).hexdigest() LOG.debug('MD5 of %s (%s): %s', fullpath, relpath, md5sum) self.asset_hash[relpath] = md5sum # depends on [control=['for'], data=['fullpath']]
def check(self, spec, data): '''Given a mongo-style spec and some data or python object, check whether the object complies with the spec. Fails eagerly. ''' path_eval = self.path_eval for keypath, specvalue in spec.items(): if keypath.startswith('$'): optext = keypath checkable = data args = (optext, specvalue, checkable) generator = self.dispatch_operator(*args) else: try: checkable = path_eval(data, keypath) except self.InvalidPath: # The spec referenced an item or attribute that # doesn't exist. Fail! return False generator = self.dispatch_literal(specvalue, checkable) for result in generator: if not result: return False return True
def function[check, parameter[self, spec, data]]: constant[Given a mongo-style spec and some data or python object, check whether the object complies with the spec. Fails eagerly. ] variable[path_eval] assign[=] name[self].path_eval for taget[tuple[[<ast.Name object at 0x7da20c7cb190>, <ast.Name object at 0x7da20c7c8430>]]] in starred[call[name[spec].items, parameter[]]] begin[:] if call[name[keypath].startswith, parameter[constant[$]]] begin[:] variable[optext] assign[=] name[keypath] variable[checkable] assign[=] name[data] variable[args] assign[=] tuple[[<ast.Name object at 0x7da20c7cbbe0>, <ast.Name object at 0x7da20c7c8fa0>, <ast.Name object at 0x7da20c7c9d20>]] variable[generator] assign[=] call[name[self].dispatch_operator, parameter[<ast.Starred object at 0x7da20c7c9510>]] for taget[name[result]] in starred[name[generator]] begin[:] if <ast.UnaryOp object at 0x7da20c7caec0> begin[:] return[constant[False]] return[constant[True]]
keyword[def] identifier[check] ( identifier[self] , identifier[spec] , identifier[data] ): literal[string] identifier[path_eval] = identifier[self] . identifier[path_eval] keyword[for] identifier[keypath] , identifier[specvalue] keyword[in] identifier[spec] . identifier[items] (): keyword[if] identifier[keypath] . identifier[startswith] ( literal[string] ): identifier[optext] = identifier[keypath] identifier[checkable] = identifier[data] identifier[args] =( identifier[optext] , identifier[specvalue] , identifier[checkable] ) identifier[generator] = identifier[self] . identifier[dispatch_operator] (* identifier[args] ) keyword[else] : keyword[try] : identifier[checkable] = identifier[path_eval] ( identifier[data] , identifier[keypath] ) keyword[except] identifier[self] . identifier[InvalidPath] : keyword[return] keyword[False] identifier[generator] = identifier[self] . identifier[dispatch_literal] ( identifier[specvalue] , identifier[checkable] ) keyword[for] identifier[result] keyword[in] identifier[generator] : keyword[if] keyword[not] identifier[result] : keyword[return] keyword[False] keyword[return] keyword[True]
def check(self, spec, data): """Given a mongo-style spec and some data or python object, check whether the object complies with the spec. Fails eagerly. """ path_eval = self.path_eval for (keypath, specvalue) in spec.items(): if keypath.startswith('$'): optext = keypath checkable = data args = (optext, specvalue, checkable) generator = self.dispatch_operator(*args) # depends on [control=['if'], data=[]] else: try: checkable = path_eval(data, keypath) # depends on [control=['try'], data=[]] except self.InvalidPath: # The spec referenced an item or attribute that # doesn't exist. Fail! return False # depends on [control=['except'], data=[]] generator = self.dispatch_literal(specvalue, checkable) for result in generator: if not result: return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['result']] # depends on [control=['for'], data=[]] return True
def make_action_list(self, item_list, **kwargs): ''' Generates a list of actions for sending to Elasticsearch ''' action_list = [] es_index = get2(kwargs, "es_index", self.es_index) action_type = kwargs.get("action_type","index") action_settings = {'_op_type': action_type, '_index': es_index} doc_type = kwargs.get("doc_type", self.doc_type) if not doc_type: doc_type = "unk" id_field = kwargs.get("id_field") for item in item_list: action = get_es_action_item(item, action_settings, doc_type, id_field) action_list.append(action) return action_list
def function[make_action_list, parameter[self, item_list]]: constant[ Generates a list of actions for sending to Elasticsearch ] variable[action_list] assign[=] list[[]] variable[es_index] assign[=] call[name[get2], parameter[name[kwargs], constant[es_index], name[self].es_index]] variable[action_type] assign[=] call[name[kwargs].get, parameter[constant[action_type], constant[index]]] variable[action_settings] assign[=] dictionary[[<ast.Constant object at 0x7da2046224d0>, <ast.Constant object at 0x7da2046223b0>], [<ast.Name object at 0x7da204623d60>, <ast.Name object at 0x7da204620ee0>]] variable[doc_type] assign[=] call[name[kwargs].get, parameter[constant[doc_type], name[self].doc_type]] if <ast.UnaryOp object at 0x7da1b1502440> begin[:] variable[doc_type] assign[=] constant[unk] variable[id_field] assign[=] call[name[kwargs].get, parameter[constant[id_field]]] for taget[name[item]] in starred[name[item_list]] begin[:] variable[action] assign[=] call[name[get_es_action_item], parameter[name[item], name[action_settings], name[doc_type], name[id_field]]] call[name[action_list].append, parameter[name[action]]] return[name[action_list]]
keyword[def] identifier[make_action_list] ( identifier[self] , identifier[item_list] ,** identifier[kwargs] ): literal[string] identifier[action_list] =[] identifier[es_index] = identifier[get2] ( identifier[kwargs] , literal[string] , identifier[self] . identifier[es_index] ) identifier[action_type] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] ) identifier[action_settings] ={ literal[string] : identifier[action_type] , literal[string] : identifier[es_index] } identifier[doc_type] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[self] . identifier[doc_type] ) keyword[if] keyword[not] identifier[doc_type] : identifier[doc_type] = literal[string] identifier[id_field] = identifier[kwargs] . identifier[get] ( literal[string] ) keyword[for] identifier[item] keyword[in] identifier[item_list] : identifier[action] = identifier[get_es_action_item] ( identifier[item] , identifier[action_settings] , identifier[doc_type] , identifier[id_field] ) identifier[action_list] . identifier[append] ( identifier[action] ) keyword[return] identifier[action_list]
def make_action_list(self, item_list, **kwargs): """ Generates a list of actions for sending to Elasticsearch """ action_list = [] es_index = get2(kwargs, 'es_index', self.es_index) action_type = kwargs.get('action_type', 'index') action_settings = {'_op_type': action_type, '_index': es_index} doc_type = kwargs.get('doc_type', self.doc_type) if not doc_type: doc_type = 'unk' # depends on [control=['if'], data=[]] id_field = kwargs.get('id_field') for item in item_list: action = get_es_action_item(item, action_settings, doc_type, id_field) action_list.append(action) # depends on [control=['for'], data=['item']] return action_list
def _write(self, lines, fname): """ Writes a intermediate temporary sorted file :param lines: The lines to write. :param fname: The name of the temporary file. :return: """ with open(fname, 'wb') as out_fhndl: for line in sorted(lines, key=self.key): pickle.dump(line, out_fhndl)
def function[_write, parameter[self, lines, fname]]: constant[ Writes a intermediate temporary sorted file :param lines: The lines to write. :param fname: The name of the temporary file. :return: ] with call[name[open], parameter[name[fname], constant[wb]]] begin[:] for taget[name[line]] in starred[call[name[sorted], parameter[name[lines]]]] begin[:] call[name[pickle].dump, parameter[name[line], name[out_fhndl]]]
keyword[def] identifier[_write] ( identifier[self] , identifier[lines] , identifier[fname] ): literal[string] keyword[with] identifier[open] ( identifier[fname] , literal[string] ) keyword[as] identifier[out_fhndl] : keyword[for] identifier[line] keyword[in] identifier[sorted] ( identifier[lines] , identifier[key] = identifier[self] . identifier[key] ): identifier[pickle] . identifier[dump] ( identifier[line] , identifier[out_fhndl] )
def _write(self, lines, fname): """ Writes a intermediate temporary sorted file :param lines: The lines to write. :param fname: The name of the temporary file. :return: """ with open(fname, 'wb') as out_fhndl: for line in sorted(lines, key=self.key): pickle.dump(line, out_fhndl) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['out_fhndl']]
def reload(self): """Reload the configuration from disk returning True if the configuration has changed from the previous values. """ config = self._default_configuration() if self._file_path: config.update(self._load_config_file()) if config != self._values: self._values = config return True return False
def function[reload, parameter[self]]: constant[Reload the configuration from disk returning True if the configuration has changed from the previous values. ] variable[config] assign[=] call[name[self]._default_configuration, parameter[]] if name[self]._file_path begin[:] call[name[config].update, parameter[call[name[self]._load_config_file, parameter[]]]] if compare[name[config] not_equal[!=] name[self]._values] begin[:] name[self]._values assign[=] name[config] return[constant[True]] return[constant[False]]
keyword[def] identifier[reload] ( identifier[self] ): literal[string] identifier[config] = identifier[self] . identifier[_default_configuration] () keyword[if] identifier[self] . identifier[_file_path] : identifier[config] . identifier[update] ( identifier[self] . identifier[_load_config_file] ()) keyword[if] identifier[config] != identifier[self] . identifier[_values] : identifier[self] . identifier[_values] = identifier[config] keyword[return] keyword[True] keyword[return] keyword[False]
def reload(self): """Reload the configuration from disk returning True if the configuration has changed from the previous values. """ config = self._default_configuration() if self._file_path: config.update(self._load_config_file()) # depends on [control=['if'], data=[]] if config != self._values: self._values = config return True # depends on [control=['if'], data=['config']] return False
def config_flag(self, which, new=None): """ Get or set a config flag. 'which' can be either a string ('PACING_20MS' etc.), or an integer. You should ALWAYS use a string, unless you really know what you are doing. """ flag = _get_flag(which, ConfigFlags) if flag: if not self.capabilities.have_config_flag(flag): raise yubikey_base.YubiKeyVersionError('Config flag %s requires %s, and this is %s %d.%d' % (which, flag.req_string(self.capabilities.model), \ self.capabilities.model, self.ykver[0], self.ykver[1])) req_major, req_minor = flag.req_version() self._require_version(major=req_major, minor=req_minor) value = flag.to_integer() else: if type(which) is not int: raise yubico_exception.InputError('Unknown non-integer ConfigFlag (%s)' % which) value = which return self.config_flags.get_set(value, new)
def function[config_flag, parameter[self, which, new]]: constant[ Get or set a config flag. 'which' can be either a string ('PACING_20MS' etc.), or an integer. You should ALWAYS use a string, unless you really know what you are doing. ] variable[flag] assign[=] call[name[_get_flag], parameter[name[which], name[ConfigFlags]]] if name[flag] begin[:] if <ast.UnaryOp object at 0x7da1b08d5750> begin[:] <ast.Raise object at 0x7da1b08d45b0> <ast.Tuple object at 0x7da1b08d58a0> assign[=] call[name[flag].req_version, parameter[]] call[name[self]._require_version, parameter[]] variable[value] assign[=] call[name[flag].to_integer, parameter[]] return[call[name[self].config_flags.get_set, parameter[name[value], name[new]]]]
keyword[def] identifier[config_flag] ( identifier[self] , identifier[which] , identifier[new] = keyword[None] ): literal[string] identifier[flag] = identifier[_get_flag] ( identifier[which] , identifier[ConfigFlags] ) keyword[if] identifier[flag] : keyword[if] keyword[not] identifier[self] . identifier[capabilities] . identifier[have_config_flag] ( identifier[flag] ): keyword[raise] identifier[yubikey_base] . identifier[YubiKeyVersionError] ( literal[string] %( identifier[which] , identifier[flag] . identifier[req_string] ( identifier[self] . identifier[capabilities] . identifier[model] ), identifier[self] . identifier[capabilities] . identifier[model] , identifier[self] . identifier[ykver] [ literal[int] ], identifier[self] . identifier[ykver] [ literal[int] ])) identifier[req_major] , identifier[req_minor] = identifier[flag] . identifier[req_version] () identifier[self] . identifier[_require_version] ( identifier[major] = identifier[req_major] , identifier[minor] = identifier[req_minor] ) identifier[value] = identifier[flag] . identifier[to_integer] () keyword[else] : keyword[if] identifier[type] ( identifier[which] ) keyword[is] keyword[not] identifier[int] : keyword[raise] identifier[yubico_exception] . identifier[InputError] ( literal[string] % identifier[which] ) identifier[value] = identifier[which] keyword[return] identifier[self] . identifier[config_flags] . identifier[get_set] ( identifier[value] , identifier[new] )
def config_flag(self, which, new=None): """ Get or set a config flag. 'which' can be either a string ('PACING_20MS' etc.), or an integer. You should ALWAYS use a string, unless you really know what you are doing. """ flag = _get_flag(which, ConfigFlags) if flag: if not self.capabilities.have_config_flag(flag): raise yubikey_base.YubiKeyVersionError('Config flag %s requires %s, and this is %s %d.%d' % (which, flag.req_string(self.capabilities.model), self.capabilities.model, self.ykver[0], self.ykver[1])) # depends on [control=['if'], data=[]] (req_major, req_minor) = flag.req_version() self._require_version(major=req_major, minor=req_minor) value = flag.to_integer() # depends on [control=['if'], data=[]] else: if type(which) is not int: raise yubico_exception.InputError('Unknown non-integer ConfigFlag (%s)' % which) # depends on [control=['if'], data=[]] value = which return self.config_flags.get_set(value, new)
def interval_host(host, time, f, *args, **kwargs): ''' Creates an Event attached to the *host* for management that will execute the *f* function every *time* seconds. See example in :ref:`sample_inter` :param Proxy host: proxy of the host. Can be obtained from inside a class with ``self.host``. :param int time: seconds for the intervals. :param func f: function to be called every *time* seconds. :param list args: arguments for *f*. :return: :class:`Event` instance of the interval. ''' def wrap(*args, **kwargs): thread = getcurrent() args = list(args) stop_event = args[0] del args[0] args = tuple(args) while not stop_event.is_set(): f(*args, **kwargs) stop_event.wait(time) host.detach_interval(thread) t2_stop = Event() args = list(args) args.insert(0, t2_stop) args = tuple(args) t = spawn(wrap, *args, **kwargs) thread_id = t host.attach_interval(thread_id, t2_stop) return t2_stop
def function[interval_host, parameter[host, time, f]]: constant[ Creates an Event attached to the *host* for management that will execute the *f* function every *time* seconds. See example in :ref:`sample_inter` :param Proxy host: proxy of the host. Can be obtained from inside a class with ``self.host``. :param int time: seconds for the intervals. :param func f: function to be called every *time* seconds. :param list args: arguments for *f*. :return: :class:`Event` instance of the interval. ] def function[wrap, parameter[]]: variable[thread] assign[=] call[name[getcurrent], parameter[]] variable[args] assign[=] call[name[list], parameter[name[args]]] variable[stop_event] assign[=] call[name[args]][constant[0]] <ast.Delete object at 0x7da20c76f3d0> variable[args] assign[=] call[name[tuple], parameter[name[args]]] while <ast.UnaryOp object at 0x7da20c76da50> begin[:] call[name[f], parameter[<ast.Starred object at 0x7da20c76d030>]] call[name[stop_event].wait, parameter[name[time]]] call[name[host].detach_interval, parameter[name[thread]]] variable[t2_stop] assign[=] call[name[Event], parameter[]] variable[args] assign[=] call[name[list], parameter[name[args]]] call[name[args].insert, parameter[constant[0], name[t2_stop]]] variable[args] assign[=] call[name[tuple], parameter[name[args]]] variable[t] assign[=] call[name[spawn], parameter[name[wrap], <ast.Starred object at 0x7da20c76c850>]] variable[thread_id] assign[=] name[t] call[name[host].attach_interval, parameter[name[thread_id], name[t2_stop]]] return[name[t2_stop]]
keyword[def] identifier[interval_host] ( identifier[host] , identifier[time] , identifier[f] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[def] identifier[wrap] (* identifier[args] ,** identifier[kwargs] ): identifier[thread] = identifier[getcurrent] () identifier[args] = identifier[list] ( identifier[args] ) identifier[stop_event] = identifier[args] [ literal[int] ] keyword[del] identifier[args] [ literal[int] ] identifier[args] = identifier[tuple] ( identifier[args] ) keyword[while] keyword[not] identifier[stop_event] . identifier[is_set] (): identifier[f] (* identifier[args] ,** identifier[kwargs] ) identifier[stop_event] . identifier[wait] ( identifier[time] ) identifier[host] . identifier[detach_interval] ( identifier[thread] ) identifier[t2_stop] = identifier[Event] () identifier[args] = identifier[list] ( identifier[args] ) identifier[args] . identifier[insert] ( literal[int] , identifier[t2_stop] ) identifier[args] = identifier[tuple] ( identifier[args] ) identifier[t] = identifier[spawn] ( identifier[wrap] ,* identifier[args] ,** identifier[kwargs] ) identifier[thread_id] = identifier[t] identifier[host] . identifier[attach_interval] ( identifier[thread_id] , identifier[t2_stop] ) keyword[return] identifier[t2_stop]
def interval_host(host, time, f, *args, **kwargs): """ Creates an Event attached to the *host* for management that will execute the *f* function every *time* seconds. See example in :ref:`sample_inter` :param Proxy host: proxy of the host. Can be obtained from inside a class with ``self.host``. :param int time: seconds for the intervals. :param func f: function to be called every *time* seconds. :param list args: arguments for *f*. :return: :class:`Event` instance of the interval. """ def wrap(*args, **kwargs): thread = getcurrent() args = list(args) stop_event = args[0] del args[0] args = tuple(args) while not stop_event.is_set(): f(*args, **kwargs) stop_event.wait(time) # depends on [control=['while'], data=[]] host.detach_interval(thread) t2_stop = Event() args = list(args) args.insert(0, t2_stop) args = tuple(args) t = spawn(wrap, *args, **kwargs) thread_id = t host.attach_interval(thread_id, t2_stop) return t2_stop
def get_dict_leaves(data): """ Given a nested dictionary, this returns all its leave elements in a list. :param adict: :return: list """ result = [] if isinstance(data, dict): for item in data.values(): result.extend(get_dict_leaves(item)) elif isinstance(data, list): result.extend(data) else: result.append(data) return result
def function[get_dict_leaves, parameter[data]]: constant[ Given a nested dictionary, this returns all its leave elements in a list. :param adict: :return: list ] variable[result] assign[=] list[[]] if call[name[isinstance], parameter[name[data], name[dict]]] begin[:] for taget[name[item]] in starred[call[name[data].values, parameter[]]] begin[:] call[name[result].extend, parameter[call[name[get_dict_leaves], parameter[name[item]]]]] return[name[result]]
keyword[def] identifier[get_dict_leaves] ( identifier[data] ): literal[string] identifier[result] =[] keyword[if] identifier[isinstance] ( identifier[data] , identifier[dict] ): keyword[for] identifier[item] keyword[in] identifier[data] . identifier[values] (): identifier[result] . identifier[extend] ( identifier[get_dict_leaves] ( identifier[item] )) keyword[elif] identifier[isinstance] ( identifier[data] , identifier[list] ): identifier[result] . identifier[extend] ( identifier[data] ) keyword[else] : identifier[result] . identifier[append] ( identifier[data] ) keyword[return] identifier[result]
def get_dict_leaves(data): """ Given a nested dictionary, this returns all its leave elements in a list. :param adict: :return: list """ result = [] if isinstance(data, dict): for item in data.values(): result.extend(get_dict_leaves(item)) # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]] elif isinstance(data, list): result.extend(data) # depends on [control=['if'], data=[]] else: result.append(data) return result
def _q_to_dcm(self, q): """ Create DCM from q :param q: array q which represents a quaternion [w, x, y, z] :returns: 3x3 dcm array """ assert(len(q) == 4) assert(np.allclose(QuaternionBase.norm_array(q), 1)) dcm = np.zeros([3, 3]) a = q[0] b = q[1] c = q[2] d = q[3] a_sq = a * a b_sq = b * b c_sq = c * c d_sq = d * d dcm[0][0] = a_sq + b_sq - c_sq - d_sq dcm[0][1] = 2 * (b * c - a * d) dcm[0][2] = 2 * (a * c + b * d) dcm[1][0] = 2 * (b * c + a * d) dcm[1][1] = a_sq - b_sq + c_sq - d_sq dcm[1][2] = 2 * (c * d - a * b) dcm[2][0] = 2 * (b * d - a * c) dcm[2][1] = 2 * (a * b + c * d) dcm[2][2] = a_sq - b_sq - c_sq + d_sq return dcm
def function[_q_to_dcm, parameter[self, q]]: constant[ Create DCM from q :param q: array q which represents a quaternion [w, x, y, z] :returns: 3x3 dcm array ] assert[compare[call[name[len], parameter[name[q]]] equal[==] constant[4]]] assert[call[name[np].allclose, parameter[call[name[QuaternionBase].norm_array, parameter[name[q]]], constant[1]]]] variable[dcm] assign[=] call[name[np].zeros, parameter[list[[<ast.Constant object at 0x7da2041da8c0>, <ast.Constant object at 0x7da2041db6d0>]]]] variable[a] assign[=] call[name[q]][constant[0]] variable[b] assign[=] call[name[q]][constant[1]] variable[c] assign[=] call[name[q]][constant[2]] variable[d] assign[=] call[name[q]][constant[3]] variable[a_sq] assign[=] binary_operation[name[a] * name[a]] variable[b_sq] assign[=] binary_operation[name[b] * name[b]] variable[c_sq] assign[=] binary_operation[name[c] * name[c]] variable[d_sq] assign[=] binary_operation[name[d] * name[d]] call[call[name[dcm]][constant[0]]][constant[0]] assign[=] binary_operation[binary_operation[binary_operation[name[a_sq] + name[b_sq]] - name[c_sq]] - name[d_sq]] call[call[name[dcm]][constant[0]]][constant[1]] assign[=] binary_operation[constant[2] * binary_operation[binary_operation[name[b] * name[c]] - binary_operation[name[a] * name[d]]]] call[call[name[dcm]][constant[0]]][constant[2]] assign[=] binary_operation[constant[2] * binary_operation[binary_operation[name[a] * name[c]] + binary_operation[name[b] * name[d]]]] call[call[name[dcm]][constant[1]]][constant[0]] assign[=] binary_operation[constant[2] * binary_operation[binary_operation[name[b] * name[c]] + binary_operation[name[a] * name[d]]]] call[call[name[dcm]][constant[1]]][constant[1]] assign[=] binary_operation[binary_operation[binary_operation[name[a_sq] - name[b_sq]] + name[c_sq]] - name[d_sq]] call[call[name[dcm]][constant[1]]][constant[2]] assign[=] binary_operation[constant[2] * binary_operation[binary_operation[name[c] * name[d]] - binary_operation[name[a] * name[b]]]] call[call[name[dcm]][constant[2]]][constant[0]] assign[=] binary_operation[constant[2] * binary_operation[binary_operation[name[b] * name[d]] - binary_operation[name[a] * name[c]]]] call[call[name[dcm]][constant[2]]][constant[1]] assign[=] binary_operation[constant[2] * binary_operation[binary_operation[name[a] * name[b]] + binary_operation[name[c] * name[d]]]] call[call[name[dcm]][constant[2]]][constant[2]] assign[=] binary_operation[binary_operation[binary_operation[name[a_sq] - name[b_sq]] - name[c_sq]] + name[d_sq]] return[name[dcm]]
keyword[def] identifier[_q_to_dcm] ( identifier[self] , identifier[q] ): literal[string] keyword[assert] ( identifier[len] ( identifier[q] )== literal[int] ) keyword[assert] ( identifier[np] . identifier[allclose] ( identifier[QuaternionBase] . identifier[norm_array] ( identifier[q] ), literal[int] )) identifier[dcm] = identifier[np] . identifier[zeros] ([ literal[int] , literal[int] ]) identifier[a] = identifier[q] [ literal[int] ] identifier[b] = identifier[q] [ literal[int] ] identifier[c] = identifier[q] [ literal[int] ] identifier[d] = identifier[q] [ literal[int] ] identifier[a_sq] = identifier[a] * identifier[a] identifier[b_sq] = identifier[b] * identifier[b] identifier[c_sq] = identifier[c] * identifier[c] identifier[d_sq] = identifier[d] * identifier[d] identifier[dcm] [ literal[int] ][ literal[int] ]= identifier[a_sq] + identifier[b_sq] - identifier[c_sq] - identifier[d_sq] identifier[dcm] [ literal[int] ][ literal[int] ]= literal[int] *( identifier[b] * identifier[c] - identifier[a] * identifier[d] ) identifier[dcm] [ literal[int] ][ literal[int] ]= literal[int] *( identifier[a] * identifier[c] + identifier[b] * identifier[d] ) identifier[dcm] [ literal[int] ][ literal[int] ]= literal[int] *( identifier[b] * identifier[c] + identifier[a] * identifier[d] ) identifier[dcm] [ literal[int] ][ literal[int] ]= identifier[a_sq] - identifier[b_sq] + identifier[c_sq] - identifier[d_sq] identifier[dcm] [ literal[int] ][ literal[int] ]= literal[int] *( identifier[c] * identifier[d] - identifier[a] * identifier[b] ) identifier[dcm] [ literal[int] ][ literal[int] ]= literal[int] *( identifier[b] * identifier[d] - identifier[a] * identifier[c] ) identifier[dcm] [ literal[int] ][ literal[int] ]= literal[int] *( identifier[a] * identifier[b] + identifier[c] * identifier[d] ) identifier[dcm] [ literal[int] ][ literal[int] ]= identifier[a_sq] - identifier[b_sq] - identifier[c_sq] + identifier[d_sq] keyword[return] identifier[dcm]
def _q_to_dcm(self, q): """ Create DCM from q :param q: array q which represents a quaternion [w, x, y, z] :returns: 3x3 dcm array """ assert len(q) == 4 assert np.allclose(QuaternionBase.norm_array(q), 1) dcm = np.zeros([3, 3]) a = q[0] b = q[1] c = q[2] d = q[3] a_sq = a * a b_sq = b * b c_sq = c * c d_sq = d * d dcm[0][0] = a_sq + b_sq - c_sq - d_sq dcm[0][1] = 2 * (b * c - a * d) dcm[0][2] = 2 * (a * c + b * d) dcm[1][0] = 2 * (b * c + a * d) dcm[1][1] = a_sq - b_sq + c_sq - d_sq dcm[1][2] = 2 * (c * d - a * b) dcm[2][0] = 2 * (b * d - a * c) dcm[2][1] = 2 * (a * b + c * d) dcm[2][2] = a_sq - b_sq - c_sq + d_sq return dcm
def _find_function(name, region=None, key=None, keyid=None, profile=None): ''' Given function name, find and return matching Lambda information. ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) for funcs in __utils__['boto3.paged_call'](conn.list_functions): for func in funcs['Functions']: if func['FunctionName'] == name: return func return None
def function[_find_function, parameter[name, region, key, keyid, profile]]: constant[ Given function name, find and return matching Lambda information. ] variable[conn] assign[=] call[name[_get_conn], parameter[]] for taget[name[funcs]] in starred[call[call[name[__utils__]][constant[boto3.paged_call]], parameter[name[conn].list_functions]]] begin[:] for taget[name[func]] in starred[call[name[funcs]][constant[Functions]]] begin[:] if compare[call[name[func]][constant[FunctionName]] equal[==] name[name]] begin[:] return[name[func]] return[constant[None]]
keyword[def] identifier[_find_function] ( identifier[name] , identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ): literal[string] identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] ) keyword[for] identifier[funcs] keyword[in] identifier[__utils__] [ literal[string] ]( identifier[conn] . identifier[list_functions] ): keyword[for] identifier[func] keyword[in] identifier[funcs] [ literal[string] ]: keyword[if] identifier[func] [ literal[string] ]== identifier[name] : keyword[return] identifier[func] keyword[return] keyword[None]
def _find_function(name, region=None, key=None, keyid=None, profile=None): """ Given function name, find and return matching Lambda information. """ conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) for funcs in __utils__['boto3.paged_call'](conn.list_functions): for func in funcs['Functions']: if func['FunctionName'] == name: return func # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['func']] # depends on [control=['for'], data=['funcs']] return None
def remove(self, index=None, hash=None, keepSorted=True): """ Removes a particle from the simulation. Parameters ---------- index : int, optional Specify particle to remove by index. hash : c_uint32 or string, optional Specifiy particle to remove by hash (if a string is passed, the corresponding hash is calculated). keepSorted : bool, optional By default, remove preserves the order of particles in the particles array. Might set it to zero in cases with many particles and many removals to speed things up. """ if index is not None: clibrebound.reb_remove(byref(self), index, keepSorted) if hash is not None: hash_types = c_uint32, c_uint, c_ulong PY3 = sys.version_info[0] == 3 if PY3: string_types = str, int_types = int, else: string_types = basestring, int_types = int, long if isinstance(hash, string_types): clibrebound.reb_remove_by_hash(byref(self), rebhash(hash), keepSorted) elif isinstance(hash, int_types): clibrebound.reb_remove_by_hash(byref(self), c_uint32(hash), keepSorted) elif isinstance(hash, hash_types): clibrebound.reb_remove_by_hash(byref(self), hash, keepSorted) if hasattr(self, '_widgets'): self._display_heartbeat(pointer(self)) self.process_messages()
def function[remove, parameter[self, index, hash, keepSorted]]: constant[ Removes a particle from the simulation. Parameters ---------- index : int, optional Specify particle to remove by index. hash : c_uint32 or string, optional Specifiy particle to remove by hash (if a string is passed, the corresponding hash is calculated). keepSorted : bool, optional By default, remove preserves the order of particles in the particles array. Might set it to zero in cases with many particles and many removals to speed things up. ] if compare[name[index] is_not constant[None]] begin[:] call[name[clibrebound].reb_remove, parameter[call[name[byref], parameter[name[self]]], name[index], name[keepSorted]]] if compare[name[hash] is_not constant[None]] begin[:] variable[hash_types] assign[=] tuple[[<ast.Name object at 0x7da204347b50>, <ast.Name object at 0x7da204344d90>, <ast.Name object at 0x7da2044c28c0>]] variable[PY3] assign[=] compare[call[name[sys].version_info][constant[0]] equal[==] constant[3]] if name[PY3] begin[:] variable[string_types] assign[=] tuple[[<ast.Name object at 0x7da20e961cc0>]] variable[int_types] assign[=] tuple[[<ast.Name object at 0x7da20e961690>]] if call[name[isinstance], parameter[name[hash], name[string_types]]] begin[:] call[name[clibrebound].reb_remove_by_hash, parameter[call[name[byref], parameter[name[self]]], call[name[rebhash], parameter[name[hash]]], name[keepSorted]]] if call[name[hasattr], parameter[name[self], constant[_widgets]]] begin[:] call[name[self]._display_heartbeat, parameter[call[name[pointer], parameter[name[self]]]]] call[name[self].process_messages, parameter[]]
keyword[def] identifier[remove] ( identifier[self] , identifier[index] = keyword[None] , identifier[hash] = keyword[None] , identifier[keepSorted] = keyword[True] ): literal[string] keyword[if] identifier[index] keyword[is] keyword[not] keyword[None] : identifier[clibrebound] . identifier[reb_remove] ( identifier[byref] ( identifier[self] ), identifier[index] , identifier[keepSorted] ) keyword[if] identifier[hash] keyword[is] keyword[not] keyword[None] : identifier[hash_types] = identifier[c_uint32] , identifier[c_uint] , identifier[c_ulong] identifier[PY3] = identifier[sys] . identifier[version_info] [ literal[int] ]== literal[int] keyword[if] identifier[PY3] : identifier[string_types] = identifier[str] , identifier[int_types] = identifier[int] , keyword[else] : identifier[string_types] = identifier[basestring] , identifier[int_types] = identifier[int] , identifier[long] keyword[if] identifier[isinstance] ( identifier[hash] , identifier[string_types] ): identifier[clibrebound] . identifier[reb_remove_by_hash] ( identifier[byref] ( identifier[self] ), identifier[rebhash] ( identifier[hash] ), identifier[keepSorted] ) keyword[elif] identifier[isinstance] ( identifier[hash] , identifier[int_types] ): identifier[clibrebound] . identifier[reb_remove_by_hash] ( identifier[byref] ( identifier[self] ), identifier[c_uint32] ( identifier[hash] ), identifier[keepSorted] ) keyword[elif] identifier[isinstance] ( identifier[hash] , identifier[hash_types] ): identifier[clibrebound] . identifier[reb_remove_by_hash] ( identifier[byref] ( identifier[self] ), identifier[hash] , identifier[keepSorted] ) keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[self] . identifier[_display_heartbeat] ( identifier[pointer] ( identifier[self] )) identifier[self] . identifier[process_messages] ()
def remove(self, index=None, hash=None, keepSorted=True): """ Removes a particle from the simulation. Parameters ---------- index : int, optional Specify particle to remove by index. hash : c_uint32 or string, optional Specifiy particle to remove by hash (if a string is passed, the corresponding hash is calculated). keepSorted : bool, optional By default, remove preserves the order of particles in the particles array. Might set it to zero in cases with many particles and many removals to speed things up. """ if index is not None: clibrebound.reb_remove(byref(self), index, keepSorted) # depends on [control=['if'], data=['index']] if hash is not None: hash_types = (c_uint32, c_uint, c_ulong) PY3 = sys.version_info[0] == 3 if PY3: string_types = (str,) int_types = (int,) # depends on [control=['if'], data=[]] else: string_types = (basestring,) int_types = (int, long) if isinstance(hash, string_types): clibrebound.reb_remove_by_hash(byref(self), rebhash(hash), keepSorted) # depends on [control=['if'], data=[]] elif isinstance(hash, int_types): clibrebound.reb_remove_by_hash(byref(self), c_uint32(hash), keepSorted) # depends on [control=['if'], data=[]] elif isinstance(hash, hash_types): clibrebound.reb_remove_by_hash(byref(self), hash, keepSorted) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['hash']] if hasattr(self, '_widgets'): self._display_heartbeat(pointer(self)) # depends on [control=['if'], data=[]] self.process_messages()
def p_goto(p): """ statement : goto NUMBER | goto ID """ entry = check_and_make_label(p[2], p.lineno(2)) if entry is not None: p[0] = make_sentence(p[1].upper(), entry) else: p[0] = None
def function[p_goto, parameter[p]]: constant[ statement : goto NUMBER | goto ID ] variable[entry] assign[=] call[name[check_and_make_label], parameter[call[name[p]][constant[2]], call[name[p].lineno, parameter[constant[2]]]]] if compare[name[entry] is_not constant[None]] begin[:] call[name[p]][constant[0]] assign[=] call[name[make_sentence], parameter[call[call[name[p]][constant[1]].upper, parameter[]], name[entry]]]
keyword[def] identifier[p_goto] ( identifier[p] ): literal[string] identifier[entry] = identifier[check_and_make_label] ( identifier[p] [ literal[int] ], identifier[p] . identifier[lineno] ( literal[int] )) keyword[if] identifier[entry] keyword[is] keyword[not] keyword[None] : identifier[p] [ literal[int] ]= identifier[make_sentence] ( identifier[p] [ literal[int] ]. identifier[upper] (), identifier[entry] ) keyword[else] : identifier[p] [ literal[int] ]= keyword[None]
def p_goto(p): """ statement : goto NUMBER | goto ID """ entry = check_and_make_label(p[2], p.lineno(2)) if entry is not None: p[0] = make_sentence(p[1].upper(), entry) # depends on [control=['if'], data=['entry']] else: p[0] = None
def respond(self, text, sessionID = "general"): """ Generate a response to the user input. :type text: str :param text: The string to be mapped :rtype: str """ text = self.__normalize(text) previousText = self.__normalize(self.conversation[sessionID][-2]) text_correction = self.__correction(text) current_topic = self.topic[sessionID] current_topic_order = current_topic.split(".") while current_topic_order: try:return self.__response_on_topic(text, previousText, text_correction, current_topic, sessionID) except ValueError as e:pass current_topic_order.pop() current_topic = ".".join(current_topic_order) try:return self.__response_on_topic(text, previousText, text_correction, current_topic, sessionID) except ValueError as e:return "Sorry I couldn't find anything relevant"
def function[respond, parameter[self, text, sessionID]]: constant[ Generate a response to the user input. :type text: str :param text: The string to be mapped :rtype: str ] variable[text] assign[=] call[name[self].__normalize, parameter[name[text]]] variable[previousText] assign[=] call[name[self].__normalize, parameter[call[call[name[self].conversation][name[sessionID]]][<ast.UnaryOp object at 0x7da20c991ab0>]]] variable[text_correction] assign[=] call[name[self].__correction, parameter[name[text]]] variable[current_topic] assign[=] call[name[self].topic][name[sessionID]] variable[current_topic_order] assign[=] call[name[current_topic].split, parameter[constant[.]]] while name[current_topic_order] begin[:] <ast.Try object at 0x7da20c990ee0> call[name[current_topic_order].pop, parameter[]] variable[current_topic] assign[=] call[constant[.].join, parameter[name[current_topic_order]]] <ast.Try object at 0x7da20c991a20>
keyword[def] identifier[respond] ( identifier[self] , identifier[text] , identifier[sessionID] = literal[string] ): literal[string] identifier[text] = identifier[self] . identifier[__normalize] ( identifier[text] ) identifier[previousText] = identifier[self] . identifier[__normalize] ( identifier[self] . identifier[conversation] [ identifier[sessionID] ][- literal[int] ]) identifier[text_correction] = identifier[self] . identifier[__correction] ( identifier[text] ) identifier[current_topic] = identifier[self] . identifier[topic] [ identifier[sessionID] ] identifier[current_topic_order] = identifier[current_topic] . identifier[split] ( literal[string] ) keyword[while] identifier[current_topic_order] : keyword[try] : keyword[return] identifier[self] . identifier[__response_on_topic] ( identifier[text] , identifier[previousText] , identifier[text_correction] , identifier[current_topic] , identifier[sessionID] ) keyword[except] identifier[ValueError] keyword[as] identifier[e] : keyword[pass] identifier[current_topic_order] . identifier[pop] () identifier[current_topic] = literal[string] . identifier[join] ( identifier[current_topic_order] ) keyword[try] : keyword[return] identifier[self] . identifier[__response_on_topic] ( identifier[text] , identifier[previousText] , identifier[text_correction] , identifier[current_topic] , identifier[sessionID] ) keyword[except] identifier[ValueError] keyword[as] identifier[e] : keyword[return] literal[string]
def respond(self, text, sessionID='general'): """ Generate a response to the user input. :type text: str :param text: The string to be mapped :rtype: str """ text = self.__normalize(text) previousText = self.__normalize(self.conversation[sessionID][-2]) text_correction = self.__correction(text) current_topic = self.topic[sessionID] current_topic_order = current_topic.split('.') while current_topic_order: try: return self.__response_on_topic(text, previousText, text_correction, current_topic, sessionID) # depends on [control=['try'], data=[]] except ValueError as e: pass # depends on [control=['except'], data=[]] current_topic_order.pop() current_topic = '.'.join(current_topic_order) # depends on [control=['while'], data=[]] try: return self.__response_on_topic(text, previousText, text_correction, current_topic, sessionID) # depends on [control=['try'], data=[]] except ValueError as e: return "Sorry I couldn't find anything relevant" # depends on [control=['except'], data=[]]
def validate_ENUM(in_value, restriction): """ Test to ensure that the given value is contained in the provided list. the value parameter must be either a single value or a 1-dimensional list. All the values in this list must satisfy the ENUM """ value = _get_val(in_value) if type(value) is list: for subval in value: if type(subval) is tuple: subval = subval[1] validate_ENUM(subval, restriction) else: if value not in restriction: raise ValidationError("ENUM : %s"%(restriction))
def function[validate_ENUM, parameter[in_value, restriction]]: constant[ Test to ensure that the given value is contained in the provided list. the value parameter must be either a single value or a 1-dimensional list. All the values in this list must satisfy the ENUM ] variable[value] assign[=] call[name[_get_val], parameter[name[in_value]]] if compare[call[name[type], parameter[name[value]]] is name[list]] begin[:] for taget[name[subval]] in starred[name[value]] begin[:] if compare[call[name[type], parameter[name[subval]]] is name[tuple]] begin[:] variable[subval] assign[=] call[name[subval]][constant[1]] call[name[validate_ENUM], parameter[name[subval], name[restriction]]]
keyword[def] identifier[validate_ENUM] ( identifier[in_value] , identifier[restriction] ): literal[string] identifier[value] = identifier[_get_val] ( identifier[in_value] ) keyword[if] identifier[type] ( identifier[value] ) keyword[is] identifier[list] : keyword[for] identifier[subval] keyword[in] identifier[value] : keyword[if] identifier[type] ( identifier[subval] ) keyword[is] identifier[tuple] : identifier[subval] = identifier[subval] [ literal[int] ] identifier[validate_ENUM] ( identifier[subval] , identifier[restriction] ) keyword[else] : keyword[if] identifier[value] keyword[not] keyword[in] identifier[restriction] : keyword[raise] identifier[ValidationError] ( literal[string] %( identifier[restriction] ))
def validate_ENUM(in_value, restriction): """ Test to ensure that the given value is contained in the provided list. the value parameter must be either a single value or a 1-dimensional list. All the values in this list must satisfy the ENUM """ value = _get_val(in_value) if type(value) is list: for subval in value: if type(subval) is tuple: subval = subval[1] # depends on [control=['if'], data=[]] validate_ENUM(subval, restriction) # depends on [control=['for'], data=['subval']] # depends on [control=['if'], data=[]] elif value not in restriction: raise ValidationError('ENUM : %s' % restriction) # depends on [control=['if'], data=['restriction']]
async def fromURL( cls, url, *, credentials=None, insecure=False): """Return a `SessionAPI` for a given MAAS instance.""" try: description = await helpers.fetch_api_description( url, insecure=insecure) except helpers.RemoteError as error: # For now just re-raise as SessionError. raise SessionError(str(error)) else: session = cls(description, credentials) session.insecure = insecure return session
<ast.AsyncFunctionDef object at 0x7da20c76f1f0>
keyword[async] keyword[def] identifier[fromURL] ( identifier[cls] , identifier[url] ,*, identifier[credentials] = keyword[None] , identifier[insecure] = keyword[False] ): literal[string] keyword[try] : identifier[description] = keyword[await] identifier[helpers] . identifier[fetch_api_description] ( identifier[url] , identifier[insecure] = identifier[insecure] ) keyword[except] identifier[helpers] . identifier[RemoteError] keyword[as] identifier[error] : keyword[raise] identifier[SessionError] ( identifier[str] ( identifier[error] )) keyword[else] : identifier[session] = identifier[cls] ( identifier[description] , identifier[credentials] ) identifier[session] . identifier[insecure] = identifier[insecure] keyword[return] identifier[session]
async def fromURL(cls, url, *, credentials=None, insecure=False): """Return a `SessionAPI` for a given MAAS instance.""" try: description = await helpers.fetch_api_description(url, insecure=insecure) # depends on [control=['try'], data=[]] except helpers.RemoteError as error: # For now just re-raise as SessionError. raise SessionError(str(error)) # depends on [control=['except'], data=['error']] else: session = cls(description, credentials) session.insecure = insecure return session
def annual_event_counts(kind='all'): """ Returns a QuerySet of dicts, each one with these keys: * year - a date object representing the year * total - the number of events of `kind` that year kind - The Event `kind`, or 'all' for all kinds (default). """ qs = Event.objects if kind != 'all': qs = qs.filter(kind=kind) qs = qs.annotate(year=TruncYear('date')) \ .values('year') \ .annotate(total=Count('id')) \ .order_by('year') return qs
def function[annual_event_counts, parameter[kind]]: constant[ Returns a QuerySet of dicts, each one with these keys: * year - a date object representing the year * total - the number of events of `kind` that year kind - The Event `kind`, or 'all' for all kinds (default). ] variable[qs] assign[=] name[Event].objects if compare[name[kind] not_equal[!=] constant[all]] begin[:] variable[qs] assign[=] call[name[qs].filter, parameter[]] variable[qs] assign[=] call[call[call[call[name[qs].annotate, parameter[]].values, parameter[constant[year]]].annotate, parameter[]].order_by, parameter[constant[year]]] return[name[qs]]
keyword[def] identifier[annual_event_counts] ( identifier[kind] = literal[string] ): literal[string] identifier[qs] = identifier[Event] . identifier[objects] keyword[if] identifier[kind] != literal[string] : identifier[qs] = identifier[qs] . identifier[filter] ( identifier[kind] = identifier[kind] ) identifier[qs] = identifier[qs] . identifier[annotate] ( identifier[year] = identifier[TruncYear] ( literal[string] )). identifier[values] ( literal[string] ). identifier[annotate] ( identifier[total] = identifier[Count] ( literal[string] )). identifier[order_by] ( literal[string] ) keyword[return] identifier[qs]
def annual_event_counts(kind='all'): """ Returns a QuerySet of dicts, each one with these keys: * year - a date object representing the year * total - the number of events of `kind` that year kind - The Event `kind`, or 'all' for all kinds (default). """ qs = Event.objects if kind != 'all': qs = qs.filter(kind=kind) # depends on [control=['if'], data=['kind']] qs = qs.annotate(year=TruncYear('date')).values('year').annotate(total=Count('id')).order_by('year') return qs
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'position') and self.position is not None: _dict['position'] = self.position if hasattr(self, 'document_id') and self.document_id is not None: _dict['document_id'] = self.document_id if hasattr(self, 'score') and self.score is not None: _dict['score'] = self.score if hasattr(self, 'confidence') and self.confidence is not None: _dict['confidence'] = self.confidence if hasattr(self, 'collection_id') and self.collection_id is not None: _dict['collection_id'] = self.collection_id return _dict
def function[_to_dict, parameter[self]]: constant[Return a json dictionary representing this model.] variable[_dict] assign[=] dictionary[[], []] if <ast.BoolOp object at 0x7da18fe907f0> begin[:] call[name[_dict]][constant[position]] assign[=] name[self].position if <ast.BoolOp object at 0x7da18fe90ca0> begin[:] call[name[_dict]][constant[document_id]] assign[=] name[self].document_id if <ast.BoolOp object at 0x7da18dc07700> begin[:] call[name[_dict]][constant[score]] assign[=] name[self].score if <ast.BoolOp object at 0x7da204620f40> begin[:] call[name[_dict]][constant[confidence]] assign[=] name[self].confidence if <ast.BoolOp object at 0x7da2046228f0> begin[:] call[name[_dict]][constant[collection_id]] assign[=] name[self].collection_id return[name[_dict]]
keyword[def] identifier[_to_dict] ( identifier[self] ): literal[string] identifier[_dict] ={} keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[position] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[position] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[document_id] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[document_id] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[score] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[score] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[confidence] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[confidence] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[collection_id] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[collection_id] keyword[return] identifier[_dict]
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'position') and self.position is not None: _dict['position'] = self.position # depends on [control=['if'], data=[]] if hasattr(self, 'document_id') and self.document_id is not None: _dict['document_id'] = self.document_id # depends on [control=['if'], data=[]] if hasattr(self, 'score') and self.score is not None: _dict['score'] = self.score # depends on [control=['if'], data=[]] if hasattr(self, 'confidence') and self.confidence is not None: _dict['confidence'] = self.confidence # depends on [control=['if'], data=[]] if hasattr(self, 'collection_id') and self.collection_id is not None: _dict['collection_id'] = self.collection_id # depends on [control=['if'], data=[]] return _dict
def namedb_op_sanity_check( opcode, op_data, record ): """ Sanity checks over operation and state graph data: * opcode and op_data must be consistent * record must have an opcode * the given opcode must be reachable from it. """ assert 'address' in record, "BUG: current record has no 'address' field" assert op_data.has_key('op'), "BUG: operation data is missing its 'op'" op_data_opcode = op_get_opcode_name( op_data['op'] ) assert record.has_key('op'), "BUG: current record is missing its 'op'" cur_opcode = op_get_opcode_name( record['op'] ) assert op_data_opcode is not None, "BUG: undefined operation '%s'" % op_data['op'] assert cur_opcode is not None, "BUG: undefined current operation '%s'" % record['op'] if op_data_opcode != opcode: # only allowed of the serialized opcode is the same # (i.e. as is the case for register/renew) assert NAME_OPCODES.get( op_data_opcode, None ) is not None, "BUG: unrecognized opcode '%s'" % op_data_opcode assert NAME_OPCODES.get( opcode, None ) is not None, "BUG: unrecognized opcode '%s'" % opcode assert NAME_OPCODES[op_data_opcode] == NAME_OPCODES[opcode], "BUG: %s != %s" % (opcode, op_data_opcode) assert opcode in OPCODE_SEQUENCE_GRAPH, "BUG: impossible to arrive at operation '%s'" % opcode assert cur_opcode in OPCODE_SEQUENCE_GRAPH, "BUG: impossible to have processed operation '%s'" % cur_opcode assert opcode in OPCODE_SEQUENCE_GRAPH[ cur_opcode ], "BUG: impossible sequence from '%s' to '%s'" % (cur_opcode, opcode) return True
def function[namedb_op_sanity_check, parameter[opcode, op_data, record]]: constant[ Sanity checks over operation and state graph data: * opcode and op_data must be consistent * record must have an opcode * the given opcode must be reachable from it. ] assert[compare[constant[address] in name[record]]] assert[call[name[op_data].has_key, parameter[constant[op]]]] variable[op_data_opcode] assign[=] call[name[op_get_opcode_name], parameter[call[name[op_data]][constant[op]]]] assert[call[name[record].has_key, parameter[constant[op]]]] variable[cur_opcode] assign[=] call[name[op_get_opcode_name], parameter[call[name[record]][constant[op]]]] assert[compare[name[op_data_opcode] is_not constant[None]]] assert[compare[name[cur_opcode] is_not constant[None]]] if compare[name[op_data_opcode] not_equal[!=] name[opcode]] begin[:] assert[compare[call[name[NAME_OPCODES].get, parameter[name[op_data_opcode], constant[None]]] is_not constant[None]]] assert[compare[call[name[NAME_OPCODES].get, parameter[name[opcode], constant[None]]] is_not constant[None]]] assert[compare[call[name[NAME_OPCODES]][name[op_data_opcode]] equal[==] call[name[NAME_OPCODES]][name[opcode]]]] assert[compare[name[opcode] in name[OPCODE_SEQUENCE_GRAPH]]] assert[compare[name[cur_opcode] in name[OPCODE_SEQUENCE_GRAPH]]] assert[compare[name[opcode] in call[name[OPCODE_SEQUENCE_GRAPH]][name[cur_opcode]]]] return[constant[True]]
keyword[def] identifier[namedb_op_sanity_check] ( identifier[opcode] , identifier[op_data] , identifier[record] ): literal[string] keyword[assert] literal[string] keyword[in] identifier[record] , literal[string] keyword[assert] identifier[op_data] . identifier[has_key] ( literal[string] ), literal[string] identifier[op_data_opcode] = identifier[op_get_opcode_name] ( identifier[op_data] [ literal[string] ]) keyword[assert] identifier[record] . identifier[has_key] ( literal[string] ), literal[string] identifier[cur_opcode] = identifier[op_get_opcode_name] ( identifier[record] [ literal[string] ]) keyword[assert] identifier[op_data_opcode] keyword[is] keyword[not] keyword[None] , literal[string] % identifier[op_data] [ literal[string] ] keyword[assert] identifier[cur_opcode] keyword[is] keyword[not] keyword[None] , literal[string] % identifier[record] [ literal[string] ] keyword[if] identifier[op_data_opcode] != identifier[opcode] : keyword[assert] identifier[NAME_OPCODES] . identifier[get] ( identifier[op_data_opcode] , keyword[None] ) keyword[is] keyword[not] keyword[None] , literal[string] % identifier[op_data_opcode] keyword[assert] identifier[NAME_OPCODES] . identifier[get] ( identifier[opcode] , keyword[None] ) keyword[is] keyword[not] keyword[None] , literal[string] % identifier[opcode] keyword[assert] identifier[NAME_OPCODES] [ identifier[op_data_opcode] ]== identifier[NAME_OPCODES] [ identifier[opcode] ], literal[string] %( identifier[opcode] , identifier[op_data_opcode] ) keyword[assert] identifier[opcode] keyword[in] identifier[OPCODE_SEQUENCE_GRAPH] , literal[string] % identifier[opcode] keyword[assert] identifier[cur_opcode] keyword[in] identifier[OPCODE_SEQUENCE_GRAPH] , literal[string] % identifier[cur_opcode] keyword[assert] identifier[opcode] keyword[in] identifier[OPCODE_SEQUENCE_GRAPH] [ identifier[cur_opcode] ], literal[string] %( identifier[cur_opcode] , identifier[opcode] ) keyword[return] keyword[True]
def namedb_op_sanity_check(opcode, op_data, record): """ Sanity checks over operation and state graph data: * opcode and op_data must be consistent * record must have an opcode * the given opcode must be reachable from it. """ assert 'address' in record, "BUG: current record has no 'address' field" assert op_data.has_key('op'), "BUG: operation data is missing its 'op'" op_data_opcode = op_get_opcode_name(op_data['op']) assert record.has_key('op'), "BUG: current record is missing its 'op'" cur_opcode = op_get_opcode_name(record['op']) assert op_data_opcode is not None, "BUG: undefined operation '%s'" % op_data['op'] assert cur_opcode is not None, "BUG: undefined current operation '%s'" % record['op'] if op_data_opcode != opcode: # only allowed of the serialized opcode is the same # (i.e. as is the case for register/renew) assert NAME_OPCODES.get(op_data_opcode, None) is not None, "BUG: unrecognized opcode '%s'" % op_data_opcode assert NAME_OPCODES.get(opcode, None) is not None, "BUG: unrecognized opcode '%s'" % opcode assert NAME_OPCODES[op_data_opcode] == NAME_OPCODES[opcode], 'BUG: %s != %s' % (opcode, op_data_opcode) # depends on [control=['if'], data=['op_data_opcode', 'opcode']] assert opcode in OPCODE_SEQUENCE_GRAPH, "BUG: impossible to arrive at operation '%s'" % opcode assert cur_opcode in OPCODE_SEQUENCE_GRAPH, "BUG: impossible to have processed operation '%s'" % cur_opcode assert opcode in OPCODE_SEQUENCE_GRAPH[cur_opcode], "BUG: impossible sequence from '%s' to '%s'" % (cur_opcode, opcode) return True
def find(self, *args, **kwargs): """Find and return the files collection documents that match ``filter``. Returns a cursor that iterates across files matching arbitrary queries on the files collection. Can be combined with other modifiers for additional control. For example:: cursor = bucket.find({"filename": "lisa.txt"}, no_cursor_timeout=True) while (yield cursor.fetch_next): grid_out = cursor.next_object() data = yield grid_out.read() This iterates through all versions of "lisa.txt" stored in GridFS. Note that setting no_cursor_timeout to True may be important to prevent the cursor from timing out during long multi-file processing work. As another example, the call:: most_recent_three = fs.find().sort("uploadDate", -1).limit(3) would return a cursor to the three most recently uploaded files in GridFS. Follows a similar interface to :meth:`~motor.MotorCollection.find` in :class:`~motor.MotorCollection`. :Parameters: - `filter`: Search query. - `batch_size` (optional): The number of documents to return per batch. - `limit` (optional): The maximum number of documents to return. - `no_cursor_timeout` (optional): The server normally times out idle cursors after an inactivity period (10 minutes) to prevent excess memory use. Set this option to True prevent that. - `skip` (optional): The number of documents to skip before returning. - `sort` (optional): The order by which to sort results. Defaults to None. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`, created with :meth:`~MotorClient.start_session`. If a :class:`~pymongo.client_session.ClientSession` is passed to :meth:`find`, all returned :class:`MotorGridOut` instances are associated with that session. .. versionchanged:: 1.2 Added session parameter. """ cursor = self.delegate.find(*args, **kwargs) grid_out_cursor = create_class_with_framework( AgnosticGridOutCursor, self._framework, self.__module__) return grid_out_cursor(cursor, self.collection)
def function[find, parameter[self]]: constant[Find and return the files collection documents that match ``filter``. Returns a cursor that iterates across files matching arbitrary queries on the files collection. Can be combined with other modifiers for additional control. For example:: cursor = bucket.find({"filename": "lisa.txt"}, no_cursor_timeout=True) while (yield cursor.fetch_next): grid_out = cursor.next_object() data = yield grid_out.read() This iterates through all versions of "lisa.txt" stored in GridFS. Note that setting no_cursor_timeout to True may be important to prevent the cursor from timing out during long multi-file processing work. As another example, the call:: most_recent_three = fs.find().sort("uploadDate", -1).limit(3) would return a cursor to the three most recently uploaded files in GridFS. Follows a similar interface to :meth:`~motor.MotorCollection.find` in :class:`~motor.MotorCollection`. :Parameters: - `filter`: Search query. - `batch_size` (optional): The number of documents to return per batch. - `limit` (optional): The maximum number of documents to return. - `no_cursor_timeout` (optional): The server normally times out idle cursors after an inactivity period (10 minutes) to prevent excess memory use. Set this option to True prevent that. - `skip` (optional): The number of documents to skip before returning. - `sort` (optional): The order by which to sort results. Defaults to None. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`, created with :meth:`~MotorClient.start_session`. If a :class:`~pymongo.client_session.ClientSession` is passed to :meth:`find`, all returned :class:`MotorGridOut` instances are associated with that session. .. versionchanged:: 1.2 Added session parameter. ] variable[cursor] assign[=] call[name[self].delegate.find, parameter[<ast.Starred object at 0x7da20c6c47c0>]] variable[grid_out_cursor] assign[=] call[name[create_class_with_framework], parameter[name[AgnosticGridOutCursor], name[self]._framework, name[self].__module__]] return[call[name[grid_out_cursor], parameter[name[cursor], name[self].collection]]]
keyword[def] identifier[find] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[cursor] = identifier[self] . identifier[delegate] . identifier[find] (* identifier[args] ,** identifier[kwargs] ) identifier[grid_out_cursor] = identifier[create_class_with_framework] ( identifier[AgnosticGridOutCursor] , identifier[self] . identifier[_framework] , identifier[self] . identifier[__module__] ) keyword[return] identifier[grid_out_cursor] ( identifier[cursor] , identifier[self] . identifier[collection] )
def find(self, *args, **kwargs): """Find and return the files collection documents that match ``filter``. Returns a cursor that iterates across files matching arbitrary queries on the files collection. Can be combined with other modifiers for additional control. For example:: cursor = bucket.find({"filename": "lisa.txt"}, no_cursor_timeout=True) while (yield cursor.fetch_next): grid_out = cursor.next_object() data = yield grid_out.read() This iterates through all versions of "lisa.txt" stored in GridFS. Note that setting no_cursor_timeout to True may be important to prevent the cursor from timing out during long multi-file processing work. As another example, the call:: most_recent_three = fs.find().sort("uploadDate", -1).limit(3) would return a cursor to the three most recently uploaded files in GridFS. Follows a similar interface to :meth:`~motor.MotorCollection.find` in :class:`~motor.MotorCollection`. :Parameters: - `filter`: Search query. - `batch_size` (optional): The number of documents to return per batch. - `limit` (optional): The maximum number of documents to return. - `no_cursor_timeout` (optional): The server normally times out idle cursors after an inactivity period (10 minutes) to prevent excess memory use. Set this option to True prevent that. - `skip` (optional): The number of documents to skip before returning. - `sort` (optional): The order by which to sort results. Defaults to None. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`, created with :meth:`~MotorClient.start_session`. If a :class:`~pymongo.client_session.ClientSession` is passed to :meth:`find`, all returned :class:`MotorGridOut` instances are associated with that session. .. versionchanged:: 1.2 Added session parameter. """ cursor = self.delegate.find(*args, **kwargs) grid_out_cursor = create_class_with_framework(AgnosticGridOutCursor, self._framework, self.__module__) return grid_out_cursor(cursor, self.collection)
def rmdir(self, paths): ''' Delete a directory :param paths: Paths to delete :type paths: list :returns: a generator that yields dictionaries .. note: directories have to be empty. ''' if not isinstance(paths, list): raise InvalidInputException("Paths should be a list") if not paths: raise InvalidInputException("rmdir: no path given") processor = lambda path, node: self._handle_rmdir(path, node) for item in self._find_items(paths, processor, include_toplevel=True): if item: yield item
def function[rmdir, parameter[self, paths]]: constant[ Delete a directory :param paths: Paths to delete :type paths: list :returns: a generator that yields dictionaries .. note: directories have to be empty. ] if <ast.UnaryOp object at 0x7da1b08f9810> begin[:] <ast.Raise object at 0x7da1b08f8460> if <ast.UnaryOp object at 0x7da1b08f9720> begin[:] <ast.Raise object at 0x7da1b08f9870> variable[processor] assign[=] <ast.Lambda object at 0x7da1b08fb1c0> for taget[name[item]] in starred[call[name[self]._find_items, parameter[name[paths], name[processor]]]] begin[:] if name[item] begin[:] <ast.Yield object at 0x7da1b08f8220>
keyword[def] identifier[rmdir] ( identifier[self] , identifier[paths] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[paths] , identifier[list] ): keyword[raise] identifier[InvalidInputException] ( literal[string] ) keyword[if] keyword[not] identifier[paths] : keyword[raise] identifier[InvalidInputException] ( literal[string] ) identifier[processor] = keyword[lambda] identifier[path] , identifier[node] : identifier[self] . identifier[_handle_rmdir] ( identifier[path] , identifier[node] ) keyword[for] identifier[item] keyword[in] identifier[self] . identifier[_find_items] ( identifier[paths] , identifier[processor] , identifier[include_toplevel] = keyword[True] ): keyword[if] identifier[item] : keyword[yield] identifier[item]
def rmdir(self, paths): """ Delete a directory :param paths: Paths to delete :type paths: list :returns: a generator that yields dictionaries .. note: directories have to be empty. """ if not isinstance(paths, list): raise InvalidInputException('Paths should be a list') # depends on [control=['if'], data=[]] if not paths: raise InvalidInputException('rmdir: no path given') # depends on [control=['if'], data=[]] processor = lambda path, node: self._handle_rmdir(path, node) for item in self._find_items(paths, processor, include_toplevel=True): if item: yield item # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
def provision_system_vessel(items, database_name, overwrite=False, clear=False, skip_user_check=False): """Provisions the default system vessel""" from hfos.provisions.base import provisionList from hfos.database import objectmodels vessel = objectmodels['vessel'].find_one({'name': 'Default System Vessel'}) if vessel is not None: if overwrite is False: hfoslog('Default vessel already existing. Skipping provisions.') return else: vessel.delete() provisionList([SystemVessel], 'vessel', overwrite, clear, skip_user_check) sysconfig = objectmodels['systemconfig'].find_one({'active': True}) hfoslog('Adapting system config for default vessel:', sysconfig) sysconfig.vesseluuid = SystemVessel['uuid'] sysconfig.save() hfoslog('Provisioning: Vessel: Done.', emitter='PROVISIONS')
def function[provision_system_vessel, parameter[items, database_name, overwrite, clear, skip_user_check]]: constant[Provisions the default system vessel] from relative_module[hfos.provisions.base] import module[provisionList] from relative_module[hfos.database] import module[objectmodels] variable[vessel] assign[=] call[call[name[objectmodels]][constant[vessel]].find_one, parameter[dictionary[[<ast.Constant object at 0x7da1b0fc46a0>], [<ast.Constant object at 0x7da1b0fc6fb0>]]]] if compare[name[vessel] is_not constant[None]] begin[:] if compare[name[overwrite] is constant[False]] begin[:] call[name[hfoslog], parameter[constant[Default vessel already existing. Skipping provisions.]]] return[None] call[name[provisionList], parameter[list[[<ast.Name object at 0x7da1b0fc7bb0>]], constant[vessel], name[overwrite], name[clear], name[skip_user_check]]] variable[sysconfig] assign[=] call[call[name[objectmodels]][constant[systemconfig]].find_one, parameter[dictionary[[<ast.Constant object at 0x7da1b0fc6770>], [<ast.Constant object at 0x7da1b0fc6860>]]]] call[name[hfoslog], parameter[constant[Adapting system config for default vessel:], name[sysconfig]]] name[sysconfig].vesseluuid assign[=] call[name[SystemVessel]][constant[uuid]] call[name[sysconfig].save, parameter[]] call[name[hfoslog], parameter[constant[Provisioning: Vessel: Done.]]]
keyword[def] identifier[provision_system_vessel] ( identifier[items] , identifier[database_name] , identifier[overwrite] = keyword[False] , identifier[clear] = keyword[False] , identifier[skip_user_check] = keyword[False] ): literal[string] keyword[from] identifier[hfos] . identifier[provisions] . identifier[base] keyword[import] identifier[provisionList] keyword[from] identifier[hfos] . identifier[database] keyword[import] identifier[objectmodels] identifier[vessel] = identifier[objectmodels] [ literal[string] ]. identifier[find_one] ({ literal[string] : literal[string] }) keyword[if] identifier[vessel] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[overwrite] keyword[is] keyword[False] : identifier[hfoslog] ( literal[string] ) keyword[return] keyword[else] : identifier[vessel] . identifier[delete] () identifier[provisionList] ([ identifier[SystemVessel] ], literal[string] , identifier[overwrite] , identifier[clear] , identifier[skip_user_check] ) identifier[sysconfig] = identifier[objectmodels] [ literal[string] ]. identifier[find_one] ({ literal[string] : keyword[True] }) identifier[hfoslog] ( literal[string] , identifier[sysconfig] ) identifier[sysconfig] . identifier[vesseluuid] = identifier[SystemVessel] [ literal[string] ] identifier[sysconfig] . identifier[save] () identifier[hfoslog] ( literal[string] , identifier[emitter] = literal[string] )
def provision_system_vessel(items, database_name, overwrite=False, clear=False, skip_user_check=False): """Provisions the default system vessel""" from hfos.provisions.base import provisionList from hfos.database import objectmodels vessel = objectmodels['vessel'].find_one({'name': 'Default System Vessel'}) if vessel is not None: if overwrite is False: hfoslog('Default vessel already existing. Skipping provisions.') return # depends on [control=['if'], data=[]] else: vessel.delete() # depends on [control=['if'], data=['vessel']] provisionList([SystemVessel], 'vessel', overwrite, clear, skip_user_check) sysconfig = objectmodels['systemconfig'].find_one({'active': True}) hfoslog('Adapting system config for default vessel:', sysconfig) sysconfig.vesseluuid = SystemVessel['uuid'] sysconfig.save() hfoslog('Provisioning: Vessel: Done.', emitter='PROVISIONS')