rem stringlengths 0 322k | add stringlengths 0 2.05M | context stringlengths 8 228k |
|---|---|---|
'{outbound} method %s()", not found' % (action) | '{outbound} method %s()", not found' % action | def outbound(cls, action): """ Find the handler B{outbound} method for the specified I{action}. @param action: The I{action} part of an event subject. @type action: str @return: The handler instance method. @rtype: instancemethod """ mutex.acquire() try: method = cls.outbounds.get(action) if method is None: raise Excep... |
raise threading.ThreadError('Thread is not active') | raise _ThreadInterruptionError('Thread is not active') | def _tid(thread): """ Determine a thread's id. """ if not thread.is_alive(): raise threading.ThreadError('Thread is not active') if hasattr(thread, '_thread_id'): return thread._thread_id for tid, tobj in threading._active.items(): if tobj is thread: thread._thread_id = tid return tid raise AssertionError('Could not de... |
raise AssertionError('Could not determine thread id') | raise _ThreadInterruptionError('Could not determine thread id') | def _tid(thread): """ Determine a thread's id. """ if not thread.is_alive(): raise threading.ThreadError('Thread is not active') if hasattr(thread, '_thread_id'): return thread._thread_id for tid, tobj in threading._active.items(): if tobj is thread: thread._thread_id = tid return tid raise AssertionError('Could not de... |
raise ValueError('Invalid thread id') | raise _ThreadInterruptionError('Invalid thread id') | def _raise_exception_in_thread(tid, exc_type): """ Raises an exception in the threads with id tid. """ assert inspect.isclass(exc_type) # NOTE this returns the number of threads that it modified, which should # only be 1 or 0 (if the thread id wasn't found) long_tid = ctypes.c_long(tid) exc_ptr = ctypes.py_object(exc_t... |
raise SystemError('PyThreadState_SetAsyncExc failed') | raise _ThreadInterruptionError('PyThreadState_SetAsyncExc failed') | def _raise_exception_in_thread(tid, exc_type): """ Raises an exception in the threads with id tid. """ assert inspect.isclass(exc_type) # NOTE this returns the number of threads that it modified, which should # only be 1 or 0 (if the thread id wasn't found) long_tid = ctypes.c_long(tid) exc_ptr = ctypes.py_object(exc_t... |
_raise_exception_in_thread(_tid(self), exc_type) | _raise_exception_in_thread(_tid(thread), exc_type) | def raise_exception(self, exc_type): """ Raise and exception in this thread. NOTE this is executed in the context of the calling thread and blocks until the exception has been delivered to this thread and this thread exists. """ # first, kill off all the descendants for thread in get_descendants(self): while thread.is... |
except (threading.ThreadError, AssertionError, ValueError, SystemError), e: | except _ThreadInterruptionError, e: | def raise_exception(self, exc_type): """ Raise and exception in this thread. NOTE this is executed in the context of the calling thread and blocks until the exception has been delivered to this thread and this thread exists. """ # first, kill off all the descendants for thread in get_descendants(self): while thread.is... |
repo = self.pconn.repository(id) | repo = self.get_repo(id) | def run(self): id = self.get_required_option('id') repo = self.pconn.repository(id) syncs = self.pconn.sync_list(id) print_header(_('Status for %s') % id) print _('Repository: %s') % repo['id'] print _('Number of Packages: %d') % repo['package_count'] last_sync = repo['last_sync'] if last_sync is None: last_sync = 'nev... |
repo = self.pconn.repository(id) | repo = self.get_repo(id) | def run(self): id = self.get_required_option('id') repo = self.pconn.repository(id) files = repo['files'] packages = self.pconn.packages(id) print_header(_('Contents of %s') % id) print _('files in %s:') % id if not files: print _(' none') else: for f in sorted(repo['files']): print ' ' + f print _('packages in %s:') %... |
repo = self.pconn.repository(id) if not repo: system_exit(os.EX_DATAERR, _("Repository with id: [%s] not found") % id) | repo = self.get_repo(id) | def run(self): id = self.get_required_option('id') repo = self.pconn.repository(id) if not repo: system_exit(os.EX_DATAERR, _("Repository with id: [%s] not found") % id) optdict = vars(self.opts) for k, v in optdict.items(): if not v: continue method = self.find(k) if method: # special method stale = method(repo, v) if... |
if relative_path is None and r['source'] is not None : url_parse = urlparse(str(r['source']["url"])) r['relative_path'] = url_parse.path | if relative_path is None: if r['source'] is not None : url_parse = urlparse(str(r['source']["url"])) r['relative_path'] = url_parse.path else: r['relative_path'] = r['id'] | def create(self, id, name, arch, feed=None, symlinks=False, sync_schedule=None, cert_data=None, groupid=None, relative_path=None): """ Create a new Repository object and return it """ repo = self.repository(id) if repo is not None: raise PulpException("A Repo with id %s already exists" % id) self._validate_schedule(syn... |
self.objectdb.remove(repo, safe=True) | self.objectdb.remove({'id' : id}, safe=True) | def delete(self, id): repo = self._get_existing_repo(id) repo_sync.delete_schedule(repo) repo_location = "%s/%s" % (config.config.get('paths', 'local_storage'), "repos") #delete any data associated to this repo for field in ['relative_path', 'cert', 'key', 'ca']: if field == 'relative_path' and repo[field]: fpath = os.... |
default_to_publish = config.config.get('repos', 'default_to_published') | default_to_publish = \ config.config.getboolean('repos', 'default_to_published') | def create(self, id, name, arch, feed=None, symlinks=False, sync_schedule=None, cert_data=None, groupid=None, relative_path=None, gpgkeys=[]): """ Create a new Repository object and return it """ repo = self.repository(id) if repo is not None: raise PulpException("A Repo with id %s already exists" % id) self._validate_... |
log.debug("Skipping update of groups metadata since missing repomd file: '%s'" % | log.warn("Skipping update of groups metadata since missing repomd file: '%s'" % | def _update_groups_metadata(self, repoid): """ Updates the groups metadata (example: comps.xml) for a given repo @param repoid: repo id @return: True if metadata was successfully updated, otherwise False """ repo = self._get_existing_repo(repoid) try: # If the repomd file is not valid, or if we are missingg # a group m... |
log.debug("_update_groups_metadata exception caught: %s" % (e)) log.debug("Traceback: %s" % (traceback.format_exc())) | log.warn("_update_groups_metadata exception caught: %s" % (e)) log.warn("Traceback: %s" % (traceback.format_exc())) | def _update_groups_metadata(self, repoid): """ Updates the groups metadata (example: comps.xml) for a given repo @param repoid: repo id @return: True if metadata was successfully updated, otherwise False """ repo = self._get_existing_repo(repoid) try: # If the repomd file is not valid, or if we are missingg # a group m... |
help=_("Package filename to remove to this repository")) | help=_("Package filename to remove from this repository")) | def setup_parser(self): super(RemovePackages, self).setup_parser() self.parser.add_option("-p", "--package", action="append", dest="pkgname", help=_("Package filename to remove to this repository")) |
help=_("Errata Id to delete to this repository")) | help=_("Errata Id to delete from this repository")) | def setup_parser(self): super(RemoveErrata, self).setup_parser() self.parser.add_option("-e", "--errata", action="append", dest="errataid", help=_("Errata Id to delete to this repository")) |
self.assertTrue(False) | def test_update_delete_schedule(self): ''' Tests multiple updates to a repo's sync schedule and the case where multiple updates are created with no schedules. ''' | |
rmd = yum.repoMDObject.RepoMD("temp_pulp", repomd_path) | rmd = yum.repoMDObject.RepoMD("temp_pulp", path) | def get_repomd_filetype_path(path, filetype): """ @param path: path to repo @param filetype: metadata type to query, example "group", "primary", etc @return: Path for filetype, or None """ rmd = yum.repoMDObject.RepoMD("temp_pulp", repomd_path) if rmd: data = rmd.getData(filetype) return data.location[1] return None |
def POST(self): | def POST(self, id): | def POST(self): """ @return: True on successful update or repository meta data """ repo = self.input() API.update(repo) return self.output(True) |
@return: True on successful update or repository meta data | @return: True on successful update of repository meta data | def POST(self): """ @return: True on successful update or repository meta data """ repo = self.input() API.update(repo) return self.output(True) |
user = api.create(login=user_data['login'], password=user_data['password'], name=user_data['name']) | user = api.create(user_data['login'], user_data['password'], user_data['name']) | def PUT(self): """ Create a new user @return: user that was created """ user_data = self.params() user = api.create(login=user_data['login'], password=user_data['password'], name=user_data['name']) return self.created(user['id'], user) |
def _raise_exception_in_thread(tid, exctype): | def _raise_exception_in_thread(tid, exc_type): | def _raise_exception_in_thread(tid, exctype): """ Raises an exception in the threads with id tid. """ if not inspect.isclass(exctype): raise TypeError('Only types can be raised (not instances)') # NOTE this returns the number of threads that it modified, which should # only be 1 or 0 (if the thread id wasn't found) exc... |
if not inspect.isclass(exctype): raise TypeError('Only types can be raised (not instances)') | assert inspect.isclass(exc_type) | def _raise_exception_in_thread(tid, exctype): """ Raises an exception in the threads with id tid. """ if not inspect.isclass(exctype): raise TypeError('Only types can be raised (not instances)') # NOTE this returns the number of threads that it modified, which should # only be 1 or 0 (if the thread id wasn't found) exc... |
excptr = ctypes.py_object(exctype) num = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, excptr) | long_tid = ctypes.c_long(tid) exc_ptr = ctypes.py_object(exc_type) num = ctypes.pythonapi.PyThreadState_SetAsyncExc(long_tid, exc_ptr) | def _raise_exception_in_thread(tid, exctype): """ Raises an exception in the threads with id tid. """ if not inspect.isclass(exctype): raise TypeError('Only types can be raised (not instances)') # NOTE this returns the number of threads that it modified, which should # only be 1 or 0 (if the thread id wasn't found) exc... |
nullptr = ctypes.py_object() ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, nullptr) | null_ptr = ctypes.py_object() ctypes.pythonapi.PyThreadState_SetAsyncExc(long_tid, null_ptr) | def _raise_exception_in_thread(tid, exctype): """ Raises an exception in the threads with id tid. """ if not inspect.isclass(exctype): raise TypeError('Only types can be raised (not instances)') # NOTE this returns the number of threads that it modified, which should # only be 1 or 0 (if the thread id wasn't found) exc... |
def raise_exception(self, exctype): | def raise_exception(self, exc_type): | def _tid(self): """ Determine this thread's id. |
Raises the given exception type in the context of this thread. If the thread is busy in a system call (time.sleep(), socket.accept(), ...) the exception is simply ignored. If you are sure that your exception should terminate the thread, one way to ensure that it works is: t = InterruptableThread(...) ... t.raise_exce... | def raise_exception(self, exctype): """ Raises the given exception type in the context of this thread. | |
_raise_exception_in_thread(self._tid, exctype) | try: while self.is_alive(): _raise_exception_in_thread(self._tid, exc_type) self.join(self._default_timeout) except threading.ThreadError: pass | def raise_exception(self, exctype): """ Raises the given exception type in the context of this thread. |
_default_sleep = 0.0005 def _ensure_exception(self, exctype): """ Ensure that the exception gets raised in the thread or that the thread is already dead. @type exctype: type or class @param exctype: type or class of exception to raise in the tread """ try: while self.is_alive(): self.raise_exception(exctype) time.slee... | def raise_exception(self, exctype): """ Raises the given exception type in the context of this thread. | |
self._ensure_exception(TimeoutException) | self.raise_exception(TimeoutException) | def timeout(self): """ Raise a TimeoutException in the thread. """ self._ensure_exception(TimeoutException) |
help="common repository name") | help="Common repository name") | def generate_options(self): |
help="package arch the repo should support.") | help="Package arch the repo should support.") | def generate_options(self): |
if "updateinfo" in ftypes and not skip.has_key('errata') or skip['errata'] != 1: | if "updateinfo" in ftypes and (not skip.has_key('errata') or skip['errata'] != 1): | def add_packages_from_dir(self, dir, repo, skip={}): added_packages = {} added_errataids = [] if not skip.has_key('packages') or skip['packages'] != 1: startTime = time.time() log.debug("Begin to add packages from %s into %s" % (dir, repo['id'])) package_list = pulp.server.util.get_repo_packages(dir) log.debug("Process... |
log.debug("%s" % (traceback.format_exc())) | log.error("%s" % (traceback.format_exc())) | def import_package(self, package, repo): try: retval = None file_name = package.relativepath hashtype = "sha256" checksum = package.checksum found = self.package_api.packages(name=package.name, epoch=package.epoch, version=package.version, release=package.release, arch=package.arch, filename=file_name, checksum_type=ha... |
self.parser.add_option("--name", dest="name", help="Consumer group name") | self.parser.add_option("--id", dest="id", help="Consumer group id") | def generate_options(self): |
self.options.label) | self.options.id) | def _delete(self): (self.options, self.args) = self.parser.parse_args() if not self.options.id: print("Group id required. Try --help") sys.exit(0) try: self.cgconn.delete(id=self.options.id) print _(" Successful deleted Consumer Group [ %s ] " % self.options.id) except RestlibException, re: print _(" Deleted operation ... |
repo_source.url) | repo_source['url']) | def sync(self, repo, repo_source): # Parse the repo source for necessary pieces # Expected format: <server>/<channel> pieces = repo_source['url'].split('/') if len(pieces) < 2: raise PulpException('Feed format for RHN type must be <server>/<channel>. Feed: %s', repo_source.url) |
self.repo_api.create('repo-1', 'repo-1', 'i386', 'yum:localhost', '1 * * * *') self.repo_api.create('repo-2', 'repo-2', 'i386', 'yum:localhost', '2 * * * *') self.repo_api.create('repo-3', 'repo-3', 'i386', 'yum:localhost', None) | self.repo_api.create('repo-1', 'repo-1', 'i386', 'yum:localhost', sync_schedule='1 * * * *') self.repo_api.create('repo-2', 'repo-2', 'i386', 'yum:localhost', sync_schedule='2 * * * *') self.repo_api.create('repo-3', 'repo-3', 'i386', 'yum:localhost', sync_schedule=None) | def test_all_schedules(self): # Setup self.repo_api.create('repo-1', 'repo-1', 'i386', 'yum:localhost', '1 * * * *') self.repo_api.create('repo-2', 'repo-2', 'i386', 'yum:localhost', '2 * * * *') self.repo_api.create('repo-3', 'repo-3', 'i386', 'yum:localhost', None) self.repo_api.create('repo-4', 'repo-4', 'i386', 'yu... |
if now - task.start_time < self.timeout: | if task.start_time is None or now - task.start_time < self.timeout: | def _timeout_tasks(self): """ Stop tasks that have met or exceeded the queue's timeout length. """ if self.timeout is None: return running_tasks = self.__storage.running_tasks() if not running_tasks: return now = datetime.now() for task in running_tasks: if now - task.start_time < self.timeout: continue thread = self._... |
while task.state not in task_complete_states: time.sleep(self._default_sleep) | self._wait_for_task(task) | def _timeout_tasks(self): """ Stop tasks that have met or exceeded the queue's timeout length. """ if self.timeout is None: return running_tasks = self.__storage.running_tasks() if not running_tasks: return now = datetime.now() for task in running_tasks: if now - task.start_time < self.timeout: continue thread = self._... |
while task.state not in task_complete_states: time.sleep(self._default_sleep) | self._wait_for_task(task) | def cancel(self, task): self.__lock.acquire() try: thread = self.__threads[task] thread.cancel() while task.state not in task_complete_states: time.sleep(self._default_sleep) task.cancel() finally: self.__lock.release() |
(con["id"], con["description"], con["repoids"], con["package_profile"], con["key_value_pairs"]) | (con["id"], con["description"], \ con["repoids"],con["key_value_pairs"]) | def run(self): key = self.opts.key value = self.opts.value cons = self.cconn.consumers() baseurl = "%s://%s:%s" % (_cfg.server.scheme, _cfg.server.host, _cfg.server.port) for con in cons: con['package_profile'] = urlparse.urljoin(baseurl, con['package_profile']) if key is None: print_header(_("Consumer Information")) f... |
super(Create, self).setup_parser() | self.parser.add_option('--id', dest='id', help=_("consumer identifier eg: foo.example.com (required)")) | def setup_parser(self): super(Create, self).setup_parser() self.parser.add_option("--description", dest="description", help=_("consumer description eg: foo's web server")) |
if os.path.isdir(fn): continue | def keyfiles(self): """ Get a list of GPG key files at the specified I{path}. @param path: An absolute path to a file containing a GPG key. @type path: str @return: A list of tuples: (key-path, key-content) @rtype: list """ keys = [] pattern = '----BEGIN PGP PUBLIC KEY BLOCK-----' path = os.path.join(keydir(), self.pat... | |
self.parser.add_option('--username', dest='username', | self.parser.add_option('-u', '--username', dest='username', | def setup_parser(self): self.parser.add_option('--username', dest='username', help=_('pulp account username')) self.parser.add_option('--password', dest='password', help=_('pulp account password')) |
self.parser.add_option('--password', dest='password', | self.parser.add_option('-p', '--password', dest='password', | def setup_parser(self): self.parser.add_option('--username', dest='username', help=_('pulp account username')) self.parser.add_option('--password', dest='password', help=_('pulp account password')) |
def _print_sync_finsih(self, state, progress): | def _print_sync_finish(self, state, progress): | def _print_sync_finsih(self, state, progress): self._print_sync_progress(progress) print '' print _('Sync: %s') % state.title() |
def test_loadConfig(self): | def test_load_config(self): | def test_loadConfig(self): # Setup origFile = '../../etc/pulp.ini' overrideFile = './data/test-override-pulp.ini' # Test & Verify config = loadConfig(origFile) self.assertEqual(config.get('paths', 'http_mount'), '/var/www/pulp') |
origFile = '../../etc/pulp.ini' overrideFile = './data/test-override-pulp.ini' | orig_file = os.path.abspath(os.path.dirname(__file__)) + '/../../etc/pulp/pulp.ini' override_file = os.path.abspath(os.path.dirname(__file__)) + '/../common/test-override-pulp.ini' | def test_loadConfig(self): # Setup origFile = '../../etc/pulp.ini' overrideFile = './data/test-override-pulp.ini' # Test & Verify config = loadConfig(origFile) self.assertEqual(config.get('paths', 'http_mount'), '/var/www/pulp') |
config = loadConfig(origFile) self.assertEqual(config.get('paths', 'http_mount'), '/var/www/pulp') | config = loadConfig(orig_file) self.assertEqual(config.get('paths', 'local_storage'), '/var/lib/pulp') | def test_loadConfig(self): # Setup origFile = '../../etc/pulp.ini' overrideFile = './data/test-override-pulp.ini' # Test & Verify config = loadConfig(origFile) self.assertEqual(config.get('paths', 'http_mount'), '/var/www/pulp') |
config = loadConfig(overrideFile, config=config) assert(config.get('paths', 'http_mount') == '/tmp/pulp') | config = loadConfig(override_file, config=config) self.assertEqual(config.get('paths', 'local_storage'), '/tmp/pulp') | def test_loadConfig(self): # Setup origFile = '../../etc/pulp.ini' overrideFile = './data/test-override-pulp.ini' # Test & Verify config = loadConfig(origFile) self.assertEqual(config.get('paths', 'http_mount'), '/var/www/pulp') |
help=_("schedule for automatically synchronizing the repository")) | help=_("cron entry date and time syntax for scheduling automatic repository synchronizations")) | def setup_parser(self): super(Create, self).setup_parser() self.parser.add_option("--name", dest="name", help=_("common repository name")) self.parser.add_option("--arch", dest="arch", help=_("package arch the repository should support")) self.parser.add_option("--feed", dest="feed", help=_("url feed to populate the re... |
help=_("schedule for automatically synchronizing the repository")) | help=_("cron entry date and time syntax for scheduling automatic repository synchronizations")) | def setup_parser(self): super(Update, self).setup_parser() self.parser.add_option("--name", dest="name", help=_("common repository name")) self.parser.add_option("--arch", dest="arch", help=_("package arch the repository should support")) self.parser.add_option("--feed", dest="feed", help=_("url feed to populate the re... |
while not isinstance(task.exception, TimeoutException): | while task.state not in task_complete_states: | def _timeout_tasks(self): """ """ if self.timeout is None: return running_tasks = self.__storage.running_tasks() if not running_tasks: return now = datetime.now() for task in running_tasks: if now - task.start_time < self.timeout: continue thread = self.__threads[task] # this will cause a deadlock because we are holdin... |
while not isinstance(task.exception, CancelException): | while task.state not in task_complete_states: | def cancel(self, task): self.__lock.acquire() try: thread = self.__threads[task] thread.cancel() while not isinstance(task.exception, CancelException): time.sleep(0.0005) task.cancel() finally: self.__lock.release() |
self.rapi.add_package_to_group(repo["id"], pkggroup["id"], | self.rapi.add_packages_to_group(repo["id"], pkggroup["id"], | def test_repo_package_groups(self): repo = self.rapi.create('some-id','some name', \ 'i386', 'yum:http://example.com') pkggroup = self.rapi.create_packagegroup(repo["id"], 'test-group-id', 'test-group-name', 'test-group-description') package = self.create_package('test_repo_packages') self.rapi.add_package(repo["id"], ... |
print _("\nPackage install failed") | system_exit(-1, _("\nPackage install failed")) | def run(self): consumerid = self.opts.consumerid consumergroupid = self.opts.consumergroupid if not (consumerid or consumergroupid): system_exit(os.EX_USAGE, _("Consumer or consumer group id required. try --help")) pnames = self.opts.pnames if not pnames: system_exit(os.EX_DATAERR, _("Nothing to upload.")) if consumerg... |
return self.objectdb.find_one({'id': id}, fields=fields) | consumers = list(self.objectdb.find(spec={'id': id}, fields=fields)) if not consumers: return None return consumers[0] | def consumer(self, id, fields=None): """ Return a single Consumer object """ return self.objectdb.find_one({'id': id}, fields=fields) |
pulp_handler = logging.handlers.RotatingFileHandler(pulp_file, maxBytes=max_size, backupCount=backups) | pulp_handler = handlers.RotatingFileHandler(pulp_file, maxBytes=max_size, backupCount=backups) | def configure_pulp_grinder_logging(): """ Pull the log file configurations from the global config and/or default config and initialize the top-level logging for both pulp and grinder. """ level_name = config.config.get('logs', 'level').upper() level = getattr(logging, level_name, logging.INFO) max_size = config.config.... |
grinder_handler = logging.handlers.RotatingFileHandler(grinder_file, maxBytes=max_size, backupCount=backups) | grinder_handler = handlers.RotatingFileHandler(grinder_file, maxBytes=max_size, backupCount=backups) | def configure_pulp_grinder_logging(): """ Pull the log file configurations from the global config and/or default config and initialize the top-level logging for both pulp and grinder. """ level_name = config.config.get('logs', 'level').upper() level = getattr(logging, level_name, logging.INFO) max_size = config.config.... |
handler = logging.handlers.TimedRotatingFileHandler(file, when=units, interval=lifetime, backupCount=backups) | handler = handlers.TimedRotatingFileHandler(file, when=units, interval=lifetime, backupCount=backups) | def configure_audit_logging(): """ Pull the audit logging configuration from the global config and/or default config and initialize pulp's audit logging. """ file = config.config.get('auditing', 'events_file') check_log_file(file) units = 'D' backups = config.config.getint('auditing', 'backups') lifetime = config.confi... |
if not started: configure_pulp_grinder_logging() configure_audit_logging() started = True | if started: return configure_pulp_grinder_logging() configure_audit_logging() started = True | def start_logging(): """ Convenience function to start pulp's different logging mechanisms. """ assert config.config is not None global started if not started: configure_pulp_grinder_logging() configure_audit_logging() started = True |
if started: logging.shutdown() started = False | if not started: return logging.shutdown() logging.Logger.manager.loggerDict = {} started = False | def stop_logging(): """ Convenience function to stop pulp's different logging mechanisms. """ global started if started: logging.shutdown() started = False |
print _('repository: %s') % repo['id'] print _('number of packages: %d') % repo['package_count'] last_sync = 'never' if repo['last_sync'] is None else str(repo['last_sync']) print _('last sync: %s') % last_sync | print _(' repository: %s') % repo['id'] print _(' number of packages: %d') % repo['package_count'] last_sync = repo['last_sync'] if last_sync is None: last_sync = 'never' else: last_sync = str(parse_date(last_sync)) print _(' last sync: %s') % last_sync | def run(self): id = self.get_required_option('id') repo = self.pconn.repository(id) syncs = self.pconn.sync_list(id) print _('repository: %s') % repo['id'] print _('number of packages: %d') % repo['package_count'] last_sync = 'never' if repo['last_sync'] is None else str(repo['last_sync']) print _('last sync: %s') % la... |
"system-config-boot", gtype="default") | "pulp-test-package", gtype="default") | def immutable_groups(self): #TODO until we fix group import, this tests needs to be commented out |
["newPackage"], gtype="default") | ["pulp-test-package"], gtype="default") | def immutable_groups(self): #TODO until we fix group import, this tests needs to be commented out |
["test_package_name"], gtype="default") | ["pulp-test-package"], gtype="default") | def immutable_groups(self): #TODO until we fix group import, this tests needs to be commented out |
self.assertTrue("test_package_name" in found["default_package_names"]) | self.assertTrue("pulp-test-package" in found["default_package_names"]) | def immutable_groups(self): #TODO until we fix group import, this tests needs to be commented out |
"test_package_name", gtype="default") | "pulp-test-package", gtype="default") | def immutable_groups(self): #TODO until we fix group import, this tests needs to be commented out |
self.assertTrue("test_package_name" not in found["default_package_names"]) | self.assertTrue("pulp-test-package" not in found["default_package_names"]) | def immutable_groups(self): #TODO until we fix group import, this tests needs to be commented out |
log.info("No valid server found, default to localhost") | LOG.info("No valid server found, default to localhost") | def check_user_pass_on_ldap(self, username, password=None): ''' verify the credentials for user on ldap server. @param username: Userid to be validated on ldap server @param password: password credentials for userid @return: user instance of the authenticated user if valid credentials were specified; None otherwise @rt... |
log.info("No valid base found, default to localhost") | LOG.info("No valid base found, default to localhost") | def check_user_pass_on_ldap(self, username, password=None): ''' verify the credentials for user on ldap server. @param username: Userid to be validated on ldap server @param password: password credentials for userid @return: user instance of the authenticated user if valid credentials were specified; None otherwise @rt... |
repo = self.input() API.update(repo) | repo_data = self.input() repo_data['id'] = id API.update(repo_data) | def POST(self, id): """ @return: True on successful update of repository meta data """ repo = self.input() API.update(repo) return self.output(True) |
return super_user_role in user.roles | return super_user_role in user['roles'] | def is_superuser(user): """ Return True if the user is a super user @type user: L{pulp.server.db.model.User} instance @param user: user to check @rtype: bool @return: True if the user is a super user, False otherwise """ return super_user_role in user.roles |
path = http.extend_uri_path(repo.id) | path = http.extend_uri_path(repo["id"]) | def PUT(self): """ Create a new repository. @return: repository meta data on successful creation of repository """ repo_data = self.params() |
return - ('no description available') | return _('no description available') | def description(self): """ Return a string showing the command's description """ return - ('no description available') |
""" | def description(self): """ Return a string for this action's description return _('no description available') """ | |
return web.ctx.path | return web.http.url(web.ctx.path) | def _status_path(self, id): """ Construct a URL path that can be used to poll a task's status A status path is constructed as follows: /<collection>/<object id>/<action>/<action id>/ A GET request sent to this path will get a JSON encoded status object """ parts = web.ctx.path.split('/') if parts[-2] == id: return web.... |
user = self.userconn.create(login=self.options.newusername, password=self.options.newpassword, name=self.options.name) | user = self.userconn.create(self.options.newusername, self.options.newpassword, self.options.name) | def _create(self): if not self.options.newusername: print("newusername required. Try --help") sys.exit(0) if not self.options.name: self.options.name = "" if not self.options.newpassword: self.options.newpassword = "" try: user = self.userconn.create(login=self.options.newusername, password=self.options.newpassword, na... |
data = self.input() | data = self.params() | def installpackages(self, id): """ Install packages. Body contains a list of package names. """ data = self.input() names = data.get('packagenames', []) return self.ok(API.installpackages(id, names)) |
def test_query_invalid_consumer_id(self): | def disabled_query_invalid_consumer_id(self): | def test_query_invalid_consumer_id(self): # Test self.assertRaises(PulpException, self.consumer_history_api.query, consumer_id='foo') |
self.consumer_history_api.consumer_created(1) self.consumer_history_api.consumer_created(2, originator='admin1') | def _populate_for_queries(self): ''' Populates the history store with a number of entries to help test the query functionality. ''' | |
def __init__(self, host, port, apihandler, apiprefix='/pulp/api', cert_file=None, key_file=None, username=None, password=None): | def __init__(self, host, port, apihandler, cert_file=None, key_file=None, username=None, password=None): | def __init__(self, host, port, apihandler, apiprefix='/pulp/api', cert_file=None, key_file=None, username=None, password=None): self.host = host # ensure we have an integer, httpslib is picky about the type # passed in for the port self.port = int(port) self.apihandler = ''.join((apiprefix, apihandler)) self.username =... |
self.apihandler = ''.join((apiprefix, apihandler)) | self.apihandler = apihandler | def __init__(self, host, port, apihandler, apiprefix='/pulp/api', cert_file=None, key_file=None, username=None, password=None): self.host = host # ensure we have an integer, httpslib is picky about the type # passed in for the port self.port = int(port) self.apihandler = ''.join((apiprefix, apihandler)) self.username =... |
def __init__(self, host='localhost', port=443, handler="", cert_file=None, key_file=None, | def __init__(self, host='localhost', port=443, handler="/pulp/api", cert_file=None, key_file=None, | def __init__(self, host='localhost', port=443, handler="", cert_file=None, key_file=None, username=None, password=None): self.host = host self.port = port self.handler = handler self.conn = None self.cert_file = cert_file self.key_file = key_file self.username = username self.password = password # initialize connection... |
self.conn = Restlib(self.host, self.port, self.handler, cert_file=self.cert_file, key_file=self.key_file, username=self.username, password=self.password) | self.conn = Restlib(self.host, self.port, self.handler, self.cert_file, self.key_file, self.username, self.password) | def setUp(self): self.conn = Restlib(self.host, self.port, self.handler, cert_file=self.cert_file, key_file=self.key_file, username=self.username, password=self.password) log.info("Connection Established for cli: Host: %s, Port: %s, handler: %s" % (self.host, self.port, self.handler)) log.info("Using cert_file: %s and ... |
return json.dumps("Authorization Failure. Check your username and password or your Certificate", | return json.dumps("Authorization Failure. Check your username and password or your certificate", | def check_roles(*fargs, **kw): ''' Strip off the decorator arguments so we can use those to check the Roles of the current caller. |
dirList = os.listdir(self.config.get('paths', 'local_storage') + '/' + repo['id']) | dirList = os.listdir(self.config.get('paths', 'local_storage') + '/repos/' + repo['id']) | def test_sync(self): repo = self.rapi.create('some-id','some name', 'i386', 'yum:http://mmccune.fedorapeople.org/pulp/') failed = False try: self.rapi.sync('invalid-id-not-found') except Exception: failed = True assert(failed) self.rapi.sync(repo['id']) # Check that local storage has dir and rpms dirList = os.listdir... |
key = self.opts.key | key = getattr(self.opts, 'key', None) | def _get_cert_options(self): cacert = self.opts.cacert cert = self.opts.cert key = self.opts.key if not (cacert and cert and key): return None return {"ca": utils.readFile(cacert), "cert": utils.readFile(cert), "key": utils.readFile(key)} |
msgFile = os.fdopen(fd, 'w') | msgFile = open(filename, 'w') | def writeToFile(filename, message, overwrite=True): dir_name = os.path.dirname(filename) if not os.access(dir_name, os.W_OK): os.mkdir(dir_name) if os.access(filename, os.F_OK) and not overwrite: # already have file there; let's back it up try: os.rename(filename, filename + '.save') except: return False fd = os.open(... |
usage = "usage: %prog consumer [OPTIONS]" | usage = "consumer [OPTIONS]" | def __init__(self, is_admin=True, actions=None): usage = "usage: %prog consumer [OPTIONS]" shortdesc = "consumer specific actions to pulp server." desc = "" self.name = "consumer" self.actions = actions or {"delete" : "Delete the consumer", "update" : "Update consumer profile", "list" : "List of ... |
spe.call('GiveNamedItem', pPlayer, str(item_name)) return True | return spe.call('GiveNamedItem', pPlayer, str(item_name), 0) | def giveNamedItem( userid, item_name ): # Get the player instance pPlayer = spe.getPlayer(int(userid)) # Is the player instance valid? if not pPlayer: # Return False since the player instance was not valid return False # Give the player the item spe.call('GiveNamedItem', pPlayer, str(item_name)) return True |
return False | return None | def giveNamedItem( userid, item_name ): # Get the player instance pPlayer = spe.getPlayer(int(userid)) # Is the player instance valid? if not pPlayer: # Return False since the player instance was not valid return False # Give the player the item return spe.call('GiveNamedItem', pPlayer, str(item_name), 0) |
pEntity = entityByIndex( int(entity_index) ) | pEntity = spe.getEntityOfIndex( int(entity_index) ) | def removeEntityByIndex( entity_index ): # Get entity instance pEntity = entityByIndex( int(entity_index) ) # Make sure it's valid if not pEntity: # Return false if the entity was None. return False # Remove it! spe.call("Remove", pEntity) return True |
pEntity = entityByIndex( int(entity_index) ) | pEntity = spe.getEntityOfIndex( int(entity_index) ) | def setStringKeyvalue( entity_index, keyvalue_name, new_value ): # Get entity instance pEntity = entityByIndex( int(entity_index) ) # Make sure the entity is valid if not pEntity: # Return False if the entity was None. return False # Set the keyvalue spe.call("setkv_string", pEntity, keyvalue_name, new_value) return... |
state = 'IDLE' | IDLE, START, ADDRESS, DATA = range(4) state = IDLE | def sigrokdecode_i2c(inbuf): """I2C protocol decoder""" # FIXME: This should be passed in as metadata, not hardcoded here. signals = (2, 5) channels = 8 o = wr = ack = d = '' bitcount = data = 0 state = 'IDLE' # Get the bit number (and thus probe index) of the SCL/SDA signals. scl_bit, sda_bit = signals # Get SCL/S... |
state = 'ADDRESS' | state = ADDRESS | def sigrokdecode_i2c(inbuf): """I2C protocol decoder""" # FIXME: This should be passed in as metadata, not hardcoded here. signals = (2, 5) channels = 8 o = wr = ack = d = '' bitcount = data = 0 state = 'IDLE' # Get the bit number (and thus probe index) of the SCL/SDA signals. scl_bit, sda_bit = signals # Get SCL/S... |
o += "%d\t\t%s: " % (samplenum, state) | o += "%d\t\tTODO:STATE: " % samplenum | def sigrokdecode_i2c(inbuf): """I2C protocol decoder""" # FIXME: This should be passed in as metadata, not hardcoded here. signals = (2, 5) channels = 8 o = wr = ack = d = '' bitcount = data = 0 state = 'IDLE' # Get the bit number (and thus probe index) of the SCL/SDA signals. scl_bit, sda_bit = signals # Get SCL/S... |
d = (state == 'ADDRESS') and (data & 0xfe) or data | d = (state == ADDRESS) and (data & 0xfe) or data | def sigrokdecode_i2c(inbuf): """I2C protocol decoder""" # FIXME: This should be passed in as metadata, not hardcoded here. signals = (2, 5) channels = 8 o = wr = ack = d = '' bitcount = data = 0 state = 'IDLE' # Get the bit number (and thus probe index) of the SCL/SDA signals. scl_bit, sda_bit = signals # Get SCL/S... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.