id
stringlengths
28
33
content
stringlengths
14
265k
max_stars_repo_path
stringlengths
49
55
crossvul-python_data_good_1622_2
from configparser import RawConfigParser from attic.remote import cache_if_remote import msgpack import os import sys from binascii import hexlify import shutil from .key import PlaintextKey from .helpers import Error, get_cache_dir, decode_dict, st_mtime_ns, unhexlify, UpgradableLock, int_to_bigint, \ bigint_to_int from .hashindex import ChunkIndex class Cache(object): """Client Side cache """ class RepositoryReplay(Error): """Cache is newer than repository, refusing to continue""" class CacheInitAbortedError(Error): """Cache initialization aborted""" class EncryptionMethodMismatch(Error): """Repository encryption method changed since last acccess, refusing to continue """ def __init__(self, repository, key, manifest, path=None, sync=True, warn_if_unencrypted=True): self.lock = None self.timestamp = None self.txn_active = False self.repository = repository self.key = key self.manifest = manifest self.path = path or os.path.join(get_cache_dir(), hexlify(repository.id).decode('ascii')) if not os.path.exists(self.path): if warn_if_unencrypted and isinstance(key, PlaintextKey): if 'ATTIC_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK' not in os.environ: print("""Warning: Attempting to access a previously unknown unencrypted repository\n""", file=sys.stderr) answer = input('Do you want to continue? [yN] ') if not (answer and answer in 'Yy'): raise self.CacheInitAbortedError() self.create() self.open() if sync and self.manifest.id != self.manifest_id: # If repository is older than the cache something fishy is going on if self.timestamp and self.timestamp > manifest.timestamp: raise self.RepositoryReplay() # Make sure an encrypted repository has not been swapped for an unencrypted repository if self.key_type is not None and self.key_type != str(key.TYPE): raise self.EncryptionMethodMismatch() self.sync() self.commit() def __del__(self): self.close() def create(self): """Create a new empty cache at `path` """ os.makedirs(self.path) with open(os.path.join(self.path, 'README'), 'w') as fd: fd.write('This is an Attic cache') config = RawConfigParser() config.add_section('cache') config.set('cache', 'version', '1') config.set('cache', 'repository', hexlify(self.repository.id).decode('ascii')) config.set('cache', 'manifest', '') with open(os.path.join(self.path, 'config'), 'w') as fd: config.write(fd) ChunkIndex().write(os.path.join(self.path, 'chunks').encode('utf-8')) with open(os.path.join(self.path, 'files'), 'w') as fd: pass # empty file def open(self): if not os.path.isdir(self.path): raise Exception('%s Does not look like an Attic cache' % self.path) self.lock = UpgradableLock(os.path.join(self.path, 'config'), exclusive=True) self.rollback() self.config = RawConfigParser() self.config.read(os.path.join(self.path, 'config')) if self.config.getint('cache', 'version') != 1: raise Exception('%s Does not look like an Attic cache') self.id = self.config.get('cache', 'repository') self.manifest_id = unhexlify(self.config.get('cache', 'manifest')) self.timestamp = self.config.get('cache', 'timestamp', fallback=None) self.key_type = self.config.get('cache', 'key_type', fallback=None) self.chunks = ChunkIndex.read(os.path.join(self.path, 'chunks').encode('utf-8')) self.files = None def close(self): if self.lock: self.lock.release() def _read_files(self): self.files = {} self._newest_mtime = 0 with open(os.path.join(self.path, 'files'), 'rb') as fd: u = msgpack.Unpacker(use_list=True) while True: data = fd.read(64 * 1024) if not data: break u.feed(data) for path_hash, item in u: item[0] += 1 self.files[path_hash] = msgpack.packb(item) def begin_txn(self): # Initialize transaction snapshot txn_dir = os.path.join(self.path, 'txn.tmp') os.mkdir(txn_dir) shutil.copy(os.path.join(self.path, 'config'), txn_dir) shutil.copy(os.path.join(self.path, 'chunks'), txn_dir) shutil.copy(os.path.join(self.path, 'files'), txn_dir) os.rename(os.path.join(self.path, 'txn.tmp'), os.path.join(self.path, 'txn.active')) self.txn_active = True def commit(self): """Commit transaction """ if not self.txn_active: return if self.files is not None: with open(os.path.join(self.path, 'files'), 'wb') as fd: for path_hash, item in self.files.items(): # Discard cached files with the newest mtime to avoid # issues with filesystem snapshots and mtime precision item = msgpack.unpackb(item) if item[0] < 10 and bigint_to_int(item[3]) < self._newest_mtime: msgpack.pack((path_hash, item), fd) self.config.set('cache', 'manifest', hexlify(self.manifest.id).decode('ascii')) self.config.set('cache', 'timestamp', self.manifest.timestamp) self.config.set('cache', 'key_type', str(self.key.TYPE)) with open(os.path.join(self.path, 'config'), 'w') as fd: self.config.write(fd) self.chunks.write(os.path.join(self.path, 'chunks').encode('utf-8')) os.rename(os.path.join(self.path, 'txn.active'), os.path.join(self.path, 'txn.tmp')) shutil.rmtree(os.path.join(self.path, 'txn.tmp')) self.txn_active = False def rollback(self): """Roll back partial and aborted transactions """ # Remove partial transaction if os.path.exists(os.path.join(self.path, 'txn.tmp')): shutil.rmtree(os.path.join(self.path, 'txn.tmp')) # Roll back active transaction txn_dir = os.path.join(self.path, 'txn.active') if os.path.exists(txn_dir): shutil.copy(os.path.join(txn_dir, 'config'), self.path) shutil.copy(os.path.join(txn_dir, 'chunks'), self.path) shutil.copy(os.path.join(txn_dir, 'files'), self.path) os.rename(txn_dir, os.path.join(self.path, 'txn.tmp')) if os.path.exists(os.path.join(self.path, 'txn.tmp')): shutil.rmtree(os.path.join(self.path, 'txn.tmp')) self.txn_active = False def sync(self): """Initializes cache by fetching and reading all archive indicies """ def add(id, size, csize): try: count, size, csize = self.chunks[id] self.chunks[id] = count + 1, size, csize except KeyError: self.chunks[id] = 1, size, csize self.begin_txn() print('Initializing cache...') self.chunks.clear() unpacker = msgpack.Unpacker() repository = cache_if_remote(self.repository) for name, info in self.manifest.archives.items(): archive_id = info[b'id'] cdata = repository.get(archive_id) data = self.key.decrypt(archive_id, cdata) add(archive_id, len(data), len(cdata)) archive = msgpack.unpackb(data) if archive[b'version'] != 1: raise Exception('Unknown archive metadata version') decode_dict(archive, (b'name',)) print('Analyzing archive:', archive[b'name']) for key, chunk in zip(archive[b'items'], repository.get_many(archive[b'items'])): data = self.key.decrypt(key, chunk) add(key, len(data), len(chunk)) unpacker.feed(data) for item in unpacker: if b'chunks' in item: for chunk_id, size, csize in item[b'chunks']: add(chunk_id, size, csize) def add_chunk(self, id, data, stats): if not self.txn_active: self.begin_txn() if self.seen_chunk(id): return self.chunk_incref(id, stats) size = len(data) data = self.key.encrypt(data) csize = len(data) self.repository.put(id, data, wait=False) self.chunks[id] = (1, size, csize) stats.update(size, csize, True) return id, size, csize def seen_chunk(self, id): return self.chunks.get(id, (0, 0, 0))[0] def chunk_incref(self, id, stats): if not self.txn_active: self.begin_txn() count, size, csize = self.chunks[id] self.chunks[id] = (count + 1, size, csize) stats.update(size, csize, False) return id, size, csize def chunk_decref(self, id, stats): if not self.txn_active: self.begin_txn() count, size, csize = self.chunks[id] if count == 1: del self.chunks[id] self.repository.delete(id, wait=False) stats.update(-size, -csize, True) else: self.chunks[id] = (count - 1, size, csize) stats.update(-size, -csize, False) def file_known_and_unchanged(self, path_hash, st): if self.files is None: self._read_files() entry = self.files.get(path_hash) if not entry: return None entry = msgpack.unpackb(entry) if entry[2] == st.st_size and bigint_to_int(entry[3]) == st_mtime_ns(st) and entry[1] == st.st_ino: # reset entry age entry[0] = 0 self.files[path_hash] = msgpack.packb(entry) return entry[4] else: return None def memorize_file(self, path_hash, st, ids): # Entry: Age, inode, size, mtime, chunk ids mtime_ns = st_mtime_ns(st) self.files[path_hash] = msgpack.packb((0, st.st_ino, st.st_size, int_to_bigint(mtime_ns), ids)) self._newest_mtime = max(self._newest_mtime, mtime_ns)
./CrossVul/dataset_final_sorted/CWE-264/py/good_1622_2
crossvul-python_data_bad_3772_3
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2011 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common utilities used in testing""" import errno import functools import os import random import socket import StringIO import subprocess import unittest import nose.plugins.skip from glance.common import config from glance.common import utils from glance.common import wsgi from glance import context from glance.openstack.common import cfg CONF = cfg.CONF def get_isolated_test_env(): """ Returns a tuple of (test_id, test_dir) that is unique for an isolated test environment. Also ensure the test_dir is created. """ test_id = random.randint(0, 100000) test_dir = os.path.join("/", "tmp", "test.%d" % test_id) utils.safe_mkdirs(test_dir) return test_id, test_dir class BaseTestCase(unittest.TestCase): def setUp(self): super(BaseTestCase, self).setUp() #NOTE(bcwaldon): parse_args has to be called to register certain # command-line options - specifically we need config_dir for # the following policy tests config.parse_args(args=[]) def tearDown(self): super(BaseTestCase, self).tearDown() CONF.reset() def config(self, **kw): """ Override some configuration values. The keyword arguments are the names of configuration options to override and their values. If a group argument is supplied, the overrides are applied to the specified configuration option group. All overrides are automatically cleared at the end of the current test by the tearDown() method. """ group = kw.pop('group', None) for k, v in kw.iteritems(): CONF.set_override(k, v, group) class skip_test(object): """Decorator that skips a test.""" def __init__(self, msg): self.message = msg def __call__(self, func): def _skipper(*args, **kw): """Wrapped skipper function.""" raise nose.SkipTest(self.message) _skipper.__name__ = func.__name__ _skipper.__doc__ = func.__doc__ return _skipper class skip_if(object): """Decorator that skips a test if condition is true.""" def __init__(self, condition, msg): self.condition = condition self.message = msg def __call__(self, func): def _skipper(*args, **kw): """Wrapped skipper function.""" if self.condition: raise nose.SkipTest(self.message) func(*args, **kw) _skipper.__name__ = func.__name__ _skipper.__doc__ = func.__doc__ return _skipper class skip_unless(object): """Decorator that skips a test if condition is not true.""" def __init__(self, condition, msg): self.condition = condition self.message = msg def __call__(self, func): def _skipper(*args, **kw): """Wrapped skipper function.""" if not self.condition: raise nose.SkipTest(self.message) func(*args, **kw) _skipper.__name__ = func.__name__ _skipper.__doc__ = func.__doc__ return _skipper class requires(object): """Decorator that initiates additional test setup/teardown.""" def __init__(self, setup=None, teardown=None): self.setup = setup self.teardown = teardown def __call__(self, func): def _runner(*args, **kw): if self.setup: self.setup(args[0]) func(*args, **kw) if self.teardown: self.teardown(args[0]) _runner.__name__ = func.__name__ _runner.__doc__ = func.__doc__ return _runner class depends_on_exe(object): """Decorator to skip test if an executable is unavailable""" def __init__(self, exe): self.exe = exe def __call__(self, func): def _runner(*args, **kw): cmd = 'which %s' % self.exe exitcode, out, err = execute(cmd, raise_error=False) if exitcode != 0: args[0].disabled_message = 'test requires exe: %s' % self.exe args[0].disabled = True func(*args, **kw) _runner.__name__ = func.__name__ _runner.__doc__ = func.__doc__ return _runner def skip_if_disabled(func): """Decorator that skips a test if test case is disabled.""" @functools.wraps(func) def wrapped(*a, **kwargs): func.__test__ = False test_obj = a[0] message = getattr(test_obj, 'disabled_message', 'Test disabled') if getattr(test_obj, 'disabled', False): raise nose.SkipTest(message) func(*a, **kwargs) return wrapped def execute(cmd, raise_error=True, no_venv=False, exec_env=None, expect_exit=True, expected_exitcode=0, context=None): """ Executes a command in a subprocess. Returns a tuple of (exitcode, out, err), where out is the string output from stdout and err is the string output from stderr when executing the command. :param cmd: Command string to execute :param raise_error: If returncode is not 0 (success), then raise a RuntimeError? Default: True) :param no_venv: Disable the virtual environment :param exec_env: Optional dictionary of additional environment variables; values may be callables, which will be passed the current value of the named environment variable :param expect_exit: Optional flag true iff timely exit is expected :param expected_exitcode: expected exitcode from the launcher :param context: additional context for error message """ env = os.environ.copy() if exec_env is not None: for env_name, env_val in exec_env.items(): if callable(env_val): env[env_name] = env_val(env.get(env_name)) else: env[env_name] = env_val # If we're asked to omit the virtualenv, and if one is set up, # restore the various environment variables if no_venv and 'VIRTUAL_ENV' in env: # Clip off the first element of PATH env['PATH'] = env['PATH'].split(os.pathsep, 1)[-1] del env['VIRTUAL_ENV'] # Make sure that we use the programs in the # current source directory's bin/ directory. path_ext = [os.path.join(os.getcwd(), 'bin')] # Also jack in the path cmd comes from, if it's absolute executable = cmd.split()[0] if os.path.isabs(executable): path_ext.append(os.path.dirname(executable)) env['PATH'] = ':'.join(path_ext) + ':' + env['PATH'] process = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) if expect_exit: result = process.communicate() (out, err) = result exitcode = process.returncode else: out = '' err = '' exitcode = 0 if exitcode != expected_exitcode and raise_error: msg = "Command %(cmd)s did not succeed. Returned an exit "\ "code of %(exitcode)d."\ "\n\nSTDOUT: %(out)s"\ "\n\nSTDERR: %(err)s" % locals() if context: msg += "\n\nCONTEXT: %s" % context raise RuntimeError(msg) return exitcode, out, err def find_executable(cmdname): """ Searches the path for a given cmdname. Returns an absolute filename if an executable with the given name exists in the path, or None if one does not. :param cmdname: The bare name of the executable to search for """ # Keep an eye out for the possibility of an absolute pathname if os.path.isabs(cmdname): return cmdname # Get a list of the directories to search path = ([os.path.join(os.getcwd(), 'bin')] + os.environ['PATH'].split(os.pathsep)) # Search through each in turn for elem in path: full_path = os.path.join(elem, cmdname) if os.access(full_path, os.X_OK): return full_path # No dice... return None def get_unused_port(): """ Returns an unused port on localhost. """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('localhost', 0)) addr, port = s.getsockname() s.close() return port def xattr_writes_supported(path): """ Returns True if the we can write a file to the supplied path and subsequently write a xattr to that file. """ try: import xattr except ImportError: return False def set_xattr(path, key, value): xattr.setxattr(path, "user.%s" % key, str(value)) # We do a quick attempt to write a user xattr to a temporary file # to check that the filesystem is even enabled to support xattrs fake_filepath = os.path.join(path, 'testing-checkme') result = True with open(fake_filepath, 'wb') as fake_file: fake_file.write("XXX") fake_file.flush() try: set_xattr(fake_filepath, 'hits', '1') except IOError, e: if e.errno == errno.EOPNOTSUPP: result = False else: # Cleanup after ourselves... if os.path.exists(fake_filepath): os.unlink(fake_filepath) return result def minimal_headers(name, public=True): headers = {'Content-Type': 'application/octet-stream', 'X-Image-Meta-Name': name, 'X-Image-Meta-disk_format': 'raw', 'X-Image-Meta-container_format': 'ovf', } if public: headers['X-Image-Meta-Is-Public'] = 'True' return headers def minimal_add_command(port, name, suffix='', public=True): visibility = 'is_public=True' if public else '' return ("bin/glance --port=%d add %s" " disk_format=raw container_format=ovf" " name=%s %s" % (port, visibility, name, suffix)) class FakeAuthMiddleware(wsgi.Middleware): def __init__(self, app, is_admin=False): super(FakeAuthMiddleware, self).__init__(app) self.is_admin = is_admin def process_request(self, req): auth_tok = req.headers.get('X-Auth-Token') user = None tenant = None roles = [] if auth_tok: user, tenant, role = auth_tok.split(':') roles = [role] req.headers['X-User-Id'] = user req.headers['X-Tenant-Id'] = tenant req.headers['X-Roles'] = role req.headers['X-Identity-Status'] = 'Confirmed' kwargs = { 'user': user, 'tenant': tenant, 'roles': roles, 'is_admin': self.is_admin, } req.context = context.RequestContext(**kwargs) class FakeHTTPResponse(object): def __init__(self, status=200, headers=None, data=None, *args, **kwargs): data = data or 'I am a teapot, short and stout\n' self.data = StringIO.StringIO(data) self.read = self.data.read self.status = status self.headers = headers or {'content-length': len(data)} def getheader(self, name, default=None): return self.headers.get(name.lower(), default)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3772_3
crossvul-python_data_bad_3690_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import routes from keystone import catalog from keystone import exception from keystone import identity from keystone import policy from keystone import token from keystone.common import logging from keystone.common import utils from keystone.common import wsgi LOG = logging.getLogger(__name__) class AdminRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('admin') mapper.connect('/', controller=version_controller, action='get_version') # Token Operations auth_controller = TokenController() mapper.connect('/tokens', controller=auth_controller, action='authenticate', conditions=dict(method=['POST'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token', conditions=dict(method=['GET'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token_head', conditions=dict(method=['HEAD'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='delete_token', conditions=dict(method=['DELETE'])) mapper.connect('/tokens/{token_id}/endpoints', controller=auth_controller, action='endpoints', conditions=dict(method=['GET'])) # Miscellaneous Operations extensions_controller = AdminExtensionsController() mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) identity_router = identity.AdminRouter() routers = [identity_router] super(AdminRouter, self).__init__(mapper, routers) class PublicRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('public') mapper.connect('/', controller=version_controller, action='get_version') # Token Operations auth_controller = TokenController() mapper.connect('/tokens', controller=auth_controller, action='authenticate', conditions=dict(method=['POST'])) # Miscellaneous extensions_controller = PublicExtensionsController() mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) identity_router = identity.PublicRouter() routers = [identity_router] super(PublicRouter, self).__init__(mapper, routers) class PublicVersionRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('public') mapper.connect('/', controller=version_controller, action='get_versions') routers = [] super(PublicVersionRouter, self).__init__(mapper, routers) class AdminVersionRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('admin') mapper.connect('/', controller=version_controller, action='get_versions') routers = [] super(AdminVersionRouter, self).__init__(mapper, routers) class VersionController(wsgi.Application): def __init__(self, version_type): self.catalog_api = catalog.Manager() self.url_key = "%sURL" % version_type super(VersionController, self).__init__() def _get_identity_url(self, context): catalog_ref = self.catalog_api.get_catalog( context=context, user_id=None, tenant_id=None) for region, region_ref in catalog_ref.iteritems(): for service, service_ref in region_ref.iteritems(): if service == 'identity': return service_ref[self.url_key] raise exception.NotImplemented() def _get_versions_list(self, context): """The list of versions is dependent on the context.""" identity_url = self._get_identity_url(context) if not identity_url.endswith('/'): identity_url = identity_url + '/' versions = {} versions['v2.0'] = { "id": "v2.0", "status": "beta", "updated": "2011-11-19T00:00:00Z", "links": [ { "rel": "self", "href": identity_url, }, { "rel": "describedby", "type": "text/html", "href": "http://docs.openstack.org/api/openstack-" "identity-service/2.0/content/" }, { "rel": "describedby", "type": "application/pdf", "href": "http://docs.openstack.org/api/openstack-" "identity-service/2.0/identity-dev-guide-" "2.0.pdf" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.identity-v2.0" "+json" }, { "base": "application/xml", "type": "application/vnd.openstack.identity-v2.0" "+xml" } ] } return versions def get_versions(self, context): versions = self._get_versions_list(context) return wsgi.render_response(status=(300, 'Multiple Choices'), body={ "versions": { "values": versions.values() } }) def get_version(self, context): versions = self._get_versions_list(context) return wsgi.render_response(body={ "version": versions['v2.0'] }) class NoopController(wsgi.Application): def __init__(self): super(NoopController, self).__init__() def noop(self, context): return {} class TokenController(wsgi.Application): def __init__(self): self.catalog_api = catalog.Manager() self.identity_api = identity.Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(TokenController, self).__init__() def authenticate(self, context, auth=None): """Authenticate credentials and return a token. Accept auth as a dict that looks like:: { "auth":{ "passwordCredentials":{ "username":"test_user", "password":"mypass" }, "tenantName":"customer-x" } } In this case, tenant is optional, if not provided the token will be considered "unscoped" and can later be used to get a scoped token. Alternatively, this call accepts auth with only a token and tenant that will return a token that is scoped to that tenant. """ token_id = uuid.uuid4().hex if 'passwordCredentials' in auth: username = auth['passwordCredentials'].get('username', '') password = auth['passwordCredentials'].get('password', '') tenant_name = auth.get('tenantName', None) user_id = auth['passwordCredentials'].get('userId', None) if username: user_ref = self.identity_api.get_user_by_name( context=context, user_name=username) if user_ref: user_id = user_ref['id'] # more compat tenant_id = auth.get('tenantId', None) if tenant_name: tenant_ref = self.identity_api.get_tenant_by_name( context=context, tenant_name=tenant_name) if tenant_ref: tenant_id = tenant_ref['id'] try: auth_info = self.identity_api.authenticate(context=context, user_id=user_id, password=password, tenant_id=tenant_id) (user_ref, tenant_ref, metadata_ref) = auth_info # If the user is disabled don't allow them to authenticate if not user_ref.get('enabled', True): LOG.warning('User %s is disabled' % user_id) raise exception.Unauthorized() except AssertionError as e: raise exception.Unauthorized(e.message) token_ref = self.token_api.create_token( context, token_id, dict(id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref)) if tenant_ref: catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) else: catalog_ref = {} elif 'token' in auth: token = auth['token'].get('id', None) tenant_name = auth.get('tenantName') # more compat if tenant_name: tenant_ref = self.identity_api.get_tenant_by_name( context=context, tenant_name=tenant_name) tenant_id = tenant_ref['id'] else: tenant_id = auth.get('tenantId', None) try: old_token_ref = self.token_api.get_token(context=context, token_id=token) except exception.NotFound: raise exception.Unauthorized() user_ref = old_token_ref['user'] # If the user is disabled don't allow them to authenticate current_user_ref = self.identity_api.get_user( context=context, user_id=user_ref['id']) if not current_user_ref.get('enabled', True): LOG.warning('User %s is disabled' % user_ref['id']) raise exception.Unauthorized() tenants = self.identity_api.get_tenants_for_user(context, user_ref['id']) if tenant_id: assert tenant_id in tenants tenant_ref = self.identity_api.get_tenant(context=context, tenant_id=tenant_id) if tenant_ref: metadata_ref = self.identity_api.get_metadata( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id']) catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) else: metadata_ref = {} catalog_ref = {} token_ref = self.token_api.create_token( context, token_id, dict(id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref)) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata roles_ref = [] for role_id in metadata_ref.get('roles', []): roles_ref.append(self.identity_api.get_role(context, role_id)) logging.debug('TOKEN_REF %s', token_ref) return self._format_authenticate(token_ref, roles_ref, catalog_ref) def _get_token_ref(self, context, token_id, belongs_to=None): """Returns a token if a valid one exists. Optionally, limited to a token owned by a specific tenant. """ # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) token_ref = self.token_api.get_token(context=context, token_id=token_id) if belongs_to: assert token_ref['tenant']['id'] == belongs_to return token_ref # admin only def validate_token_head(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Identical to ``validate_token``, except does not return a response. """ belongs_to = context['query_string'].get("belongsTo") assert self._get_token_ref(context, token_id, belongs_to) # admin only def validate_token(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Returns metadata about the token along any associated roles. """ belongs_to = context['query_string'].get("belongsTo") token_ref = self._get_token_ref(context, token_id, belongs_to) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata metadata_ref = token_ref['metadata'] roles_ref = [] for role_id in metadata_ref.get('roles', []): roles_ref.append(self.identity_api.get_role(context, role_id)) # Get a service catalog if belongs_to is not none # This is needed for on-behalf-of requests catalog_ref = None if belongs_to is not None: catalog_ref = self.catalog_api.get_catalog( context=context, user_id=token_ref['user']['id'], tenant_id=token_ref['tenant']['id'], metadata=metadata_ref) return self._format_token(token_ref, roles_ref, catalog_ref) def delete_token(self, context, token_id): """Delete a token, effectively invalidating it for authz.""" # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) self.token_api.delete_token(context=context, token_id=token_id) def endpoints(self, context, token_id): """Return a list of endpoints available to the token.""" raise exception.NotImplemented() def _format_authenticate(self, token_ref, roles_ref, catalog_ref): o = self._format_token(token_ref, roles_ref) o['access']['serviceCatalog'] = self._format_catalog(catalog_ref) return o def _format_token(self, token_ref, roles_ref, catalog_ref=None): user_ref = token_ref['user'] metadata_ref = token_ref['metadata'] expires = token_ref['expires'] if expires is not None: expires = utils.isotime(expires) o = {'access': {'token': {'id': token_ref['id'], 'expires': expires, }, 'user': {'id': user_ref['id'], 'name': user_ref['name'], 'username': user_ref['name'], 'roles': roles_ref, 'roles_links': metadata_ref.get('roles_links', []) } } } if 'tenant' in token_ref and token_ref['tenant']: token_ref['tenant']['enabled'] = True o['access']['token']['tenant'] = token_ref['tenant'] if catalog_ref is not None: o['access']['serviceCatalog'] = self._format_catalog(catalog_ref) return o def _format_catalog(self, catalog_ref): """Munge catalogs from internal to output format Internal catalogs look like: {$REGION: { {$SERVICE: { $key1: $value1, ... } } } The legacy api wants them to look like [{'name': $SERVICE[name], 'type': $SERVICE, 'endpoints': [{ 'tenantId': $tenant_id, ... 'region': $REGION, }], 'endpoints_links': [], }] """ if not catalog_ref: return {} services = {} for region, region_ref in catalog_ref.iteritems(): for service, service_ref in region_ref.iteritems(): new_service_ref = services.get(service, {}) new_service_ref['name'] = service_ref.pop('name') new_service_ref['type'] = service new_service_ref['endpoints_links'] = [] service_ref['region'] = region endpoints_ref = new_service_ref.get('endpoints', []) endpoints_ref.append(service_ref) new_service_ref['endpoints'] = endpoints_ref services[service] = new_service_ref return services.values() class ExtensionsController(wsgi.Application): """Base extensions controller to be extended by public and admin API's.""" def __init__(self, extensions=None): super(ExtensionsController, self).__init__() self.extensions = extensions or {} def get_extensions_info(self, context): return {'extensions': {'values': self.extensions.values()}} def get_extension_info(self, context, extension_alias): try: return {'extension': self.extensions[extension_alias]} except KeyError: raise exception.NotFound(target=extension_alias) class PublicExtensionsController(ExtensionsController): pass class AdminExtensionsController(ExtensionsController): def __init__(self, *args, **kwargs): super(AdminExtensionsController, self).__init__(*args, **kwargs) # TODO(dolph): Extensions should obviously provide this information # themselves, but hardcoding it here allows us to match # the API spec in the short term with minimal complexity. self.extensions['OS-KSADM'] = { 'name': 'Openstack Keystone Admin', 'namespace': 'http://docs.openstack.org/identity/api/ext/' 'OS-KSADM/v1.0', 'alias': 'OS-KSADM', 'updated': '2011-08-19T13:25:27-06:00', 'description': 'Openstack extensions to Keystone v2.0 API ' 'enabling Admin Operations.', 'links': [ { 'rel': 'describedby', # TODO(dolph): link needs to be revised after # bug 928059 merges 'type': 'text/html', 'href': ('https://github.com/openstack/' 'identity-api'), } ] } @logging.fail_gracefully def public_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return PublicRouter() @logging.fail_gracefully def admin_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AdminRouter() @logging.fail_gracefully def public_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return PublicVersionRouter() @logging.fail_gracefully def admin_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AdminVersionRouter()
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3690_0
crossvul-python_data_bad_3634_2
# Copyright 2011 OpenStack LLC. # Copyright 2012 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The security groups extension.""" import urllib from xml.dom import minidom from webob import exc import webob from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import compute from nova import db from nova import exception from nova import flags from nova import log as logging from nova import utils LOG = logging.getLogger(__name__) FLAGS = flags.FLAGS authorize = extensions.extension_authorizer('compute', 'security_groups') def make_rule(elem): elem.set('id') elem.set('parent_group_id') proto = xmlutil.SubTemplateElement(elem, 'ip_protocol') proto.text = 'ip_protocol' from_port = xmlutil.SubTemplateElement(elem, 'from_port') from_port.text = 'from_port' to_port = xmlutil.SubTemplateElement(elem, 'to_port') to_port.text = 'to_port' group = xmlutil.SubTemplateElement(elem, 'group', selector='group') name = xmlutil.SubTemplateElement(group, 'name') name.text = 'name' tenant_id = xmlutil.SubTemplateElement(group, 'tenant_id') tenant_id.text = 'tenant_id' ip_range = xmlutil.SubTemplateElement(elem, 'ip_range', selector='ip_range') cidr = xmlutil.SubTemplateElement(ip_range, 'cidr') cidr.text = 'cidr' def make_sg(elem): elem.set('id') elem.set('tenant_id') elem.set('name') desc = xmlutil.SubTemplateElement(elem, 'description') desc.text = 'description' rules = xmlutil.SubTemplateElement(elem, 'rules') rule = xmlutil.SubTemplateElement(rules, 'rule', selector='rules') make_rule(rule) sg_nsmap = {None: wsgi.XMLNS_V11} class SecurityGroupRuleTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_group_rule', selector='security_group_rule') make_rule(root) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_group', selector='security_group') make_sg(root) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('security_groups') elem = xmlutil.SubTemplateElement(root, 'security_group', selector='security_groups') make_sg(elem) return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap) class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted security group requests. """ def default(self, string): """Deserialize an xml-formatted security group create request""" dom = minidom.parseString(string) security_group = {} sg_node = self.find_first_child_named(dom, 'security_group') if sg_node is not None: if sg_node.hasAttribute('name'): security_group['name'] = sg_node.getAttribute('name') desc_node = self.find_first_child_named(sg_node, "description") if desc_node: security_group['description'] = self.extract_text(desc_node) return {'body': {'security_group': security_group}} class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer): """ Deserializer to handle xml-formatted security group requests. """ def default(self, string): """Deserialize an xml-formatted security group create request""" dom = minidom.parseString(string) security_group_rule = self._extract_security_group_rule(dom) return {'body': {'security_group_rule': security_group_rule}} def _extract_security_group_rule(self, node): """Marshal the security group rule attribute of a parsed request""" sg_rule = {} sg_rule_node = self.find_first_child_named(node, 'security_group_rule') if sg_rule_node is not None: ip_protocol_node = self.find_first_child_named(sg_rule_node, "ip_protocol") if ip_protocol_node is not None: sg_rule['ip_protocol'] = self.extract_text(ip_protocol_node) from_port_node = self.find_first_child_named(sg_rule_node, "from_port") if from_port_node is not None: sg_rule['from_port'] = self.extract_text(from_port_node) to_port_node = self.find_first_child_named(sg_rule_node, "to_port") if to_port_node is not None: sg_rule['to_port'] = self.extract_text(to_port_node) parent_group_id_node = self.find_first_child_named(sg_rule_node, "parent_group_id") if parent_group_id_node is not None: sg_rule['parent_group_id'] = self.extract_text( parent_group_id_node) group_id_node = self.find_first_child_named(sg_rule_node, "group_id") if group_id_node is not None: sg_rule['group_id'] = self.extract_text(group_id_node) cidr_node = self.find_first_child_named(sg_rule_node, "cidr") if cidr_node is not None: sg_rule['cidr'] = self.extract_text(cidr_node) return sg_rule class SecurityGroupControllerBase(object): """Base class for Security Group controllers.""" def __init__(self): self.compute_api = compute.API() self.sgh = utils.import_object(FLAGS.security_group_handler) def _format_security_group_rule(self, context, rule): sg_rule = {} sg_rule['id'] = rule.id sg_rule['parent_group_id'] = rule.parent_group_id sg_rule['ip_protocol'] = rule.protocol sg_rule['from_port'] = rule.from_port sg_rule['to_port'] = rule.to_port sg_rule['group'] = {} sg_rule['ip_range'] = {} if rule.group_id: source_group = db.security_group_get(context, rule.group_id) sg_rule['group'] = {'name': source_group.name, 'tenant_id': source_group.project_id} else: sg_rule['ip_range'] = {'cidr': rule.cidr} return sg_rule def _format_security_group(self, context, group): security_group = {} security_group['id'] = group.id security_group['description'] = group.description security_group['name'] = group.name security_group['tenant_id'] = group.project_id security_group['rules'] = [] for rule in group.rules: security_group['rules'] += [self._format_security_group_rule( context, rule)] return security_group class SecurityGroupController(SecurityGroupControllerBase): """The Security group API controller for the OpenStack API.""" def _get_security_group(self, context, id): try: id = int(id) security_group = db.security_group_get(context, id) except ValueError: msg = _("Security group id should be integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) return security_group @wsgi.serializers(xml=SecurityGroupTemplate) def show(self, req, id): """Return data about the given security group.""" context = req.environ['nova.context'] authorize(context) security_group = self._get_security_group(context, id) return {'security_group': self._format_security_group(context, security_group)} def delete(self, req, id): """Delete a security group.""" context = req.environ['nova.context'] authorize(context) security_group = self._get_security_group(context, id) if db.security_group_in_use(context, security_group.id): msg = _("Security group is still in use") raise exc.HTTPBadRequest(explanation=msg) LOG.audit(_("Delete security group %s"), id, context=context) db.security_group_destroy(context, security_group.id) self.sgh.trigger_security_group_destroy_refresh( context, security_group.id) return webob.Response(status_int=202) @wsgi.serializers(xml=SecurityGroupsTemplate) def index(self, req): """Returns a list of security groups""" context = req.environ['nova.context'] authorize(context) self.compute_api.ensure_default_security_group(context) groups = db.security_group_get_by_project(context, context.project_id) limited_list = common.limited(groups, req) result = [self._format_security_group(context, group) for group in limited_list] return {'security_groups': list(sorted(result, key=lambda k: (k['tenant_id'], k['name'])))} @wsgi.serializers(xml=SecurityGroupTemplate) @wsgi.deserializers(xml=SecurityGroupXMLDeserializer) def create(self, req, body): """Creates a new security group.""" context = req.environ['nova.context'] authorize(context) if not body: raise exc.HTTPUnprocessableEntity() security_group = body.get('security_group', None) if security_group is None: raise exc.HTTPUnprocessableEntity() group_name = security_group.get('name', None) group_description = security_group.get('description', None) self._validate_security_group_property(group_name, "name") self._validate_security_group_property(group_description, "description") group_name = group_name.strip() group_description = group_description.strip() LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): msg = _('Security group %s already exists') % group_name raise exc.HTTPBadRequest(explanation=msg) group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': group_name, 'description': group_description} group_ref = db.security_group_create(context, group) self.sgh.trigger_security_group_create_refresh(context, group) return {'security_group': self._format_security_group(context, group_ref)} def _validate_security_group_property(self, value, typ): """ typ will be either 'name' or 'description', depending on the caller """ try: val = value.strip() except AttributeError: msg = _("Security group %s is not a string or unicode") % typ raise exc.HTTPBadRequest(explanation=msg) if not val: msg = _("Security group %s cannot be empty.") % typ raise exc.HTTPBadRequest(explanation=msg) if len(val) > 255: msg = _("Security group %s should not be greater " "than 255 characters.") % typ raise exc.HTTPBadRequest(explanation=msg) class SecurityGroupRulesController(SecurityGroupControllerBase): @wsgi.serializers(xml=SecurityGroupRuleTemplate) @wsgi.deserializers(xml=SecurityGroupRulesXMLDeserializer) def create(self, req, body): context = req.environ['nova.context'] authorize(context) if not body: raise exc.HTTPUnprocessableEntity() if not 'security_group_rule' in body: raise exc.HTTPUnprocessableEntity() self.compute_api.ensure_default_security_group(context) sg_rule = body['security_group_rule'] parent_group_id = sg_rule.get('parent_group_id', None) try: parent_group_id = int(parent_group_id) security_group = db.security_group_get(context, parent_group_id) except ValueError: msg = _("Parent group id is not integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound as exp: msg = _("Security group (%s) not found") % parent_group_id raise exc.HTTPNotFound(explanation=msg) msg = _("Authorize security group ingress %s") LOG.audit(msg, security_group['name'], context=context) try: values = self._rule_args_to_dict(context, to_port=sg_rule.get('to_port'), from_port=sg_rule.get('from_port'), parent_group_id=sg_rule.get('parent_group_id'), ip_protocol=sg_rule.get('ip_protocol'), cidr=sg_rule.get('cidr'), group_id=sg_rule.get('group_id')) except Exception as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) if values is None: msg = _("Not enough parameters to build a " "valid rule.") raise exc.HTTPBadRequest(explanation=msg) values['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values): msg = _('This rule already exists in group %s') % parent_group_id raise exc.HTTPBadRequest(explanation=msg) security_group_rule = db.security_group_rule_create(context, values) self.sgh.trigger_security_group_rule_create_refresh( context, [security_group_rule['id']]) self.compute_api.trigger_security_group_rules_refresh(context, security_group_id=security_group['id']) return {"security_group_rule": self._format_security_group_rule( context, security_group_rule)} def _security_group_rule_exists(self, security_group, values): """Indicates whether the specified rule values are already defined in the given security group. """ for rule in security_group.rules: is_duplicate = True keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol') for key in keys: if rule.get(key) != values.get(key): is_duplicate = False break if is_duplicate: return True return False def _rule_args_to_dict(self, context, to_port=None, from_port=None, parent_group_id=None, ip_protocol=None, cidr=None, group_id=None): values = {} if group_id is not None: try: parent_group_id = int(parent_group_id) group_id = int(group_id) except ValueError: msg = _("Parent or group id is not integer") raise exception.InvalidInput(reason=msg) values['group_id'] = group_id #check if groupId exists db.security_group_get(context, group_id) elif cidr: # If this fails, it throws an exception. This is what we want. try: cidr = urllib.unquote(cidr).decode() except Exception: raise exception.InvalidCidr(cidr=cidr) if not utils.is_valid_cidr(cidr): # Raise exception for non-valid address raise exception.InvalidCidr(cidr=cidr) values['cidr'] = cidr else: values['cidr'] = '0.0.0.0/0' if group_id: # Open everything if an explicit port range or type/code are not # specified, but only if a source group was specified. ip_proto_upper = ip_protocol.upper() if ip_protocol else '' if (ip_proto_upper == 'ICMP' and from_port is None and to_port is None): from_port = -1 to_port = -1 elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None and to_port is None): from_port = 1 to_port = 65535 if ip_protocol and from_port is not None and to_port is not None: ip_protocol = str(ip_protocol) try: from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason="Type and" " Code must be integers for ICMP protocol type") else: raise exception.InvalidInput(reason="To and From ports " "must be integers") if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if (ip_protocol.upper() in ['TCP', 'UDP'] and from_port > to_port): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid TCP ports should" " be between 1-65535") # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or from_port > 255 or to_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if 'cidr' in values: return None return values def delete(self, req, id): context = req.environ['nova.context'] authorize(context) self.compute_api.ensure_default_security_group(context) try: id = int(id) rule = db.security_group_rule_get(context, id) except ValueError: msg = _("Rule id is not integer") raise exc.HTTPBadRequest(explanation=msg) except exception.NotFound: msg = _("Rule (%s) not found") % id raise exc.HTTPNotFound(explanation=msg) group_id = rule.parent_group_id self.compute_api.ensure_default_security_group(context) security_group = db.security_group_get(context, group_id) msg = _("Revoke security group ingress %s") LOG.audit(msg, security_group['name'], context=context) db.security_group_rule_destroy(context, rule['id']) self.sgh.trigger_security_group_rule_destroy_refresh( context, [rule['id']]) self.compute_api.trigger_security_group_rules_refresh(context, security_group_id=security_group['id']) return webob.Response(status_int=202) class ServerSecurityGroupController(SecurityGroupControllerBase): @wsgi.serializers(xml=SecurityGroupsTemplate) def index(self, req, server_id): """Returns a list of security groups for the given instance.""" context = req.environ['nova.context'] authorize(context) self.compute_api.ensure_default_security_group(context) try: instance = self.compute_api.get(context, server_id) groups = db.security_group_get_by_instance(context, instance['id']) except exception.ApiError, e: raise webob.exc.HTTPBadRequest(explanation=e.message) except exception.NotAuthorized, e: raise webob.exc.HTTPUnauthorized() result = [self._format_security_group(context, group) for group in groups] return {'security_groups': list(sorted(result, key=lambda k: (k['tenant_id'], k['name'])))} class SecurityGroupActionController(wsgi.Controller): def __init__(self, *args, **kwargs): super(SecurityGroupActionController, self).__init__(*args, **kwargs) self.compute_api = compute.API() self.sgh = utils.import_object(FLAGS.security_group_handler) @wsgi.action('addSecurityGroup') def _addSecurityGroup(self, req, id, body): context = req.environ['nova.context'] authorize(context) try: body = body['addSecurityGroup'] group_name = body['name'] except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Security group not specified") raise webob.exc.HTTPBadRequest(explanation=msg) if not group_name or group_name.strip() == '': msg = _("Security group name cannot be empty") raise webob.exc.HTTPBadRequest(explanation=msg) try: instance = self.compute_api.get(context, id) self.compute_api.add_security_group(context, instance, group_name) self.sgh.trigger_instance_add_security_group_refresh( context, instance, group_name) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.InstanceNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) return webob.Response(status_int=202) @wsgi.action('removeSecurityGroup') def _removeSecurityGroup(self, req, id, body): context = req.environ['nova.context'] authorize(context) try: body = body['removeSecurityGroup'] group_name = body['name'] except TypeError: msg = _("Missing parameter dict") raise webob.exc.HTTPBadRequest(explanation=msg) except KeyError: msg = _("Security group not specified") raise webob.exc.HTTPBadRequest(explanation=msg) if not group_name or group_name.strip() == '': msg = _("Security group name cannot be empty") raise webob.exc.HTTPBadRequest(explanation=msg) try: instance = self.compute_api.get(context, id) self.compute_api.remove_security_group(context, instance, group_name) self.sgh.trigger_instance_remove_security_group_refresh( context, instance, group_name) except exception.SecurityGroupNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.InstanceNotFound as exp: raise exc.HTTPNotFound(explanation=unicode(exp)) except exception.Invalid as exp: raise exc.HTTPBadRequest(explanation=unicode(exp)) return webob.Response(status_int=202) class Security_groups(extensions.ExtensionDescriptor): """Security group support""" name = "SecurityGroups" alias = "security_groups" namespace = "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1" updated = "2011-07-21T00:00:00+00:00" def get_controller_extensions(self): controller = SecurityGroupActionController() extension = extensions.ControllerExtension(self, 'servers', controller) return [extension] def get_resources(self): resources = [] res = extensions.ResourceExtension('os-security-groups', controller=SecurityGroupController()) resources.append(res) res = extensions.ResourceExtension('os-security-group-rules', controller=SecurityGroupRulesController()) resources.append(res) res = extensions.ResourceExtension( 'os-security-groups', controller=ServerSecurityGroupController(), parent=dict(member_name='server', collection_name='servers')) resources.append(res) return resources
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3634_2
crossvul-python_data_bad_3771_1
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2011 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Stubouts, mocks and fixtures for the test suite""" import os try: import sendfile SENDFILE_SUPPORTED = True except ImportError: SENDFILE_SUPPORTED = False import routes import webob from glance.api.middleware import context from glance.api.v1 import router import glance.common.client from glance.registry.api import v1 as rserver from glance.tests import utils VERBOSE = False DEBUG = False class FakeRegistryConnection(object): def __init__(self, *args, **kwargs): pass def connect(self): return True def close(self): return True def request(self, method, url, body=None, headers=None): self.req = webob.Request.blank("/" + url.lstrip("/")) self.req.method = method if headers: self.req.headers = headers if body: self.req.body = body def getresponse(self): mapper = routes.Mapper() api = context.UnauthenticatedContextMiddleware(rserver.API(mapper)) webob_res = self.req.get_response(api) return utils.FakeHTTPResponse(status=webob_res.status_int, headers=webob_res.headers, data=webob_res.body) def stub_out_registry_and_store_server(stubs, base_dir): """ Mocks calls to 127.0.0.1 on 9191 and 9292 for testing so that a real Glance server does not need to be up and running """ class FakeSocket(object): def __init__(self, *args, **kwargs): pass def fileno(self): return 42 class FakeSendFile(object): def __init__(self, req): self.req = req def sendfile(self, o, i, offset, nbytes): os.lseek(i, offset, os.SEEK_SET) prev_len = len(self.req.body) self.req.body += os.read(i, nbytes) return len(self.req.body) - prev_len class FakeGlanceConnection(object): def __init__(self, *args, **kwargs): self.sock = FakeSocket() self.stub_force_sendfile = kwargs.get('stub_force_sendfile', SENDFILE_SUPPORTED) def connect(self): return True def close(self): return True def _clean_url(self, url): #TODO(bcwaldon): Fix the hack that strips off v1 return url.replace('/v1', '', 1) if url.startswith('/v1') else url def putrequest(self, method, url): self.req = webob.Request.blank(self._clean_url(url)) if self.stub_force_sendfile: fake_sendfile = FakeSendFile(self.req) stubs.Set(sendfile, 'sendfile', fake_sendfile.sendfile) self.req.method = method def putheader(self, key, value): self.req.headers[key] = value def endheaders(self): hl = [i.lower() for i in self.req.headers.keys()] assert not ('content-length' in hl and 'transfer-encoding' in hl), \ 'Content-Length and Transfer-Encoding are mutually exclusive' def send(self, data): # send() is called during chunked-transfer encoding, and # data is of the form %x\r\n%s\r\n. Strip off the %x and # only write the actual data in tests. self.req.body += data.split("\r\n")[1] def request(self, method, url, body=None, headers=None): self.req = webob.Request.blank(self._clean_url(url)) self.req.method = method if headers: self.req.headers = headers if body: self.req.body = body def getresponse(self): mapper = routes.Mapper() api = context.UnauthenticatedContextMiddleware(router.API(mapper)) res = self.req.get_response(api) # httplib.Response has a read() method...fake it out def fake_reader(): return res.body setattr(res, 'read', fake_reader) return res def fake_get_connection_type(client): """ Returns the proper connection type """ DEFAULT_REGISTRY_PORT = 9191 DEFAULT_API_PORT = 9292 if (client.port == DEFAULT_API_PORT and client.host == '0.0.0.0'): return FakeGlanceConnection elif (client.port == DEFAULT_REGISTRY_PORT and client.host == '0.0.0.0'): return FakeRegistryConnection def fake_image_iter(self): for i in self.source.app_iter: yield i def fake_sendable(self, body): force = getattr(self, 'stub_force_sendfile', None) if force is None: return self._stub_orig_sendable(body) else: if force: assert glance.common.client.SENDFILE_SUPPORTED return force stubs.Set(glance.common.client.BaseClient, 'get_connection_type', fake_get_connection_type) setattr(glance.common.client.BaseClient, '_stub_orig_sendable', glance.common.client.BaseClient._sendable) stubs.Set(glance.common.client.BaseClient, '_sendable', fake_sendable) def stub_out_registry_server(stubs, **kwargs): """ Mocks calls to 127.0.0.1 on 9191 for testing so that a real Glance Registry server does not need to be up and running """ def fake_get_connection_type(client): """ Returns the proper connection type """ DEFAULT_REGISTRY_PORT = 9191 if (client.port == DEFAULT_REGISTRY_PORT and client.host == '0.0.0.0'): return FakeRegistryConnection def fake_image_iter(self): for i in self.response.app_iter: yield i stubs.Set(glance.common.client.BaseClient, 'get_connection_type', fake_get_connection_type)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3771_1
crossvul-python_data_bad_5539_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Piston Cloud Computing, Inc. # Copyright 2012 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handles all requests relating to compute resources (e.g. guest VMs, networking and storage of VMs, and compute hosts on which they run).""" import base64 import functools import re import string import time import urllib from nova import block_device from nova.compute import instance_types from nova.compute import power_state from nova.compute import rpcapi as compute_rpcapi from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_states from nova.consoleauth import rpcapi as consoleauth_rpcapi from nova import crypto from nova.db import base from nova import exception from nova import flags from nova.image import glance from nova import network from nova import notifications from nova.openstack.common import excutils from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import timeutils import nova.policy from nova import quota from nova.scheduler import rpcapi as scheduler_rpcapi from nova import utils from nova import volume LOG = logging.getLogger(__name__) FLAGS = flags.FLAGS flags.DECLARE('consoleauth_topic', 'nova.consoleauth') MAX_USERDATA_SIZE = 65535 QUOTAS = quota.QUOTAS def check_instance_state(vm_state=None, task_state=(None,)): """Decorator to check VM and/or task state before entry to API functions. If the instance is in the wrong state, the wrapper will raise an exception. """ if vm_state is not None and not isinstance(vm_state, set): vm_state = set(vm_state) if task_state is not None and not isinstance(task_state, set): task_state = set(task_state) def outer(f): @functools.wraps(f) def inner(self, context, instance, *args, **kw): if vm_state is not None and instance['vm_state'] not in vm_state: raise exception.InstanceInvalidState( attr='vm_state', instance_uuid=instance['uuid'], state=instance['vm_state'], method=f.__name__) if (task_state is not None and instance['task_state'] not in task_state): raise exception.InstanceInvalidState( attr='task_state', instance_uuid=instance['uuid'], state=instance['task_state'], method=f.__name__) return f(self, context, instance, *args, **kw) return inner return outer def check_instance_lock(function): @functools.wraps(function) def inner(self, context, instance, *args, **kwargs): if instance['locked'] and not context.is_admin: raise exception.InstanceIsLocked(instance_uuid=instance['uuid']) return function(self, context, instance, *args, **kwargs) return inner def policy_decorator(scope): """Check corresponding policy prior of wrapped method to execution""" def outer(func): @functools.wraps(func) def wrapped(self, context, target, *args, **kwargs): check_policy(context, func.__name__, target, scope) return func(self, context, target, *args, **kwargs) return wrapped return outer wrap_check_policy = policy_decorator(scope='compute') wrap_check_security_groups_policy = policy_decorator( scope='compute:security_groups') def check_policy(context, action, target, scope='compute'): _action = '%s:%s' % (scope, action) nova.policy.enforce(context, _action, target) class API(base.Base): """API for interacting with the compute manager.""" def __init__(self, image_service=None, network_api=None, volume_api=None, security_group_api=None, **kwargs): self.image_service = (image_service or glance.get_default_image_service()) self.network_api = network_api or network.API() self.volume_api = volume_api or volume.API() self.security_group_api = security_group_api or SecurityGroupAPI() self.sgh = importutils.import_object(FLAGS.security_group_handler) self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI() self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self.compute_rpcapi = compute_rpcapi.ComputeAPI() super(API, self).__init__(**kwargs) def _instance_update(self, context, instance_uuid, **kwargs): """Update an instance in the database using kwargs as value.""" (old_ref, instance_ref) = self.db.instance_update_and_get_original( context, instance_uuid, kwargs) notifications.send_update(context, old_ref, instance_ref) return instance_ref def _check_injected_file_quota(self, context, injected_files): """Enforce quota limits on injected files. Raises a QuotaError if any limit is exceeded. """ if injected_files is None: return # Check number of files first try: QUOTAS.limit_check(context, injected_files=len(injected_files)) except exception.OverQuota: raise exception.OnsetFileLimitExceeded() # OK, now count path and content lengths; we're looking for # the max... max_path = 0 max_content = 0 for path, content in injected_files: max_path = max(max_path, len(path)) max_content = max(max_content, len(content)) try: QUOTAS.limit_check(context, injected_file_path_bytes=max_path, injected_file_content_bytes=max_content) except exception.OverQuota as exc: # Favor path limit over content limit for reporting # purposes if 'injected_file_path_bytes' in exc.kwargs['overs']: raise exception.OnsetFilePathLimitExceeded() else: raise exception.OnsetFileContentLimitExceeded() def _check_num_instances_quota(self, context, instance_type, min_count, max_count): """Enforce quota limits on number of instances created.""" # Determine requested cores and ram req_cores = max_count * instance_type['vcpus'] req_ram = max_count * instance_type['memory_mb'] # Check the quota try: reservations = QUOTAS.reserve(context, instances=max_count, cores=req_cores, ram=req_ram) except exception.OverQuota as exc: # OK, we exceeded quota; let's figure out why... quotas = exc.kwargs['quotas'] usages = exc.kwargs['usages'] overs = exc.kwargs['overs'] headroom = dict((res, quotas[res] - (usages[res]['in_use'] + usages[res]['reserved'])) for res in quotas.keys()) allowed = headroom['instances'] # Reduce 'allowed' instances in line with the cores & ram headroom if instance_type['vcpus']: allowed = min(allowed, headroom['cores'] // instance_type['vcpus']) if instance_type['memory_mb']: allowed = min(allowed, headroom['ram'] // instance_type['memory_mb']) # Convert to the appropriate exception message if allowed <= 0: msg = _("Cannot run any more instances of this type.") allowed = 0 elif min_count <= allowed <= max_count: # We're actually OK, but still need reservations return self._check_num_instances_quota(context, instance_type, min_count, allowed) else: msg = (_("Can only run %s more instances of this type.") % allowed) resource = overs[0] used = quotas[resource] - headroom[resource] total_allowed = used + headroom[resource] overs = ','.join(overs) pid = context.project_id LOG.warn(_("%(overs)s quota exceeded for %(pid)s," " tried to run %(min_count)s instances. %(msg)s"), locals()) requested = dict(instances=min_count, cores=req_cores, ram=req_ram) raise exception.TooManyInstances(overs=overs, req=requested[resource], used=used, allowed=total_allowed, resource=resource) return max_count, reservations def _check_metadata_properties_quota(self, context, metadata=None): """Enforce quota limits on metadata properties.""" if not metadata: metadata = {} num_metadata = len(metadata) try: QUOTAS.limit_check(context, metadata_items=num_metadata) except exception.OverQuota as exc: pid = context.project_id LOG.warn(_("Quota exceeded for %(pid)s, tried to set " "%(num_metadata)s metadata properties") % locals()) quota_metadata = exc.kwargs['quotas']['metadata_items'] raise exception.MetadataLimitExceeded(allowed=quota_metadata) # Because metadata is stored in the DB, we hard-code the size limits # In future, we may support more variable length strings, so we act # as if this is quota-controlled for forwards compatibility for k, v in metadata.iteritems(): if len(k) == 0: msg = _("Metadata property key blank") LOG.warn(msg) raise exception.InvalidMetadata(reason=msg) if len(k) > 255: msg = _("Metadata property key greater than 255 characters") LOG.warn(msg) raise exception.InvalidMetadataSize(reason=msg) if len(v) > 255: msg = _("Metadata property value greater than 255 characters") LOG.warn(msg) raise exception.InvalidMetadataSize(reason=msg) def _check_requested_networks(self, context, requested_networks): """ Check if the networks requested belongs to the project and the fixed IP address for each network provided is within same the network block """ if requested_networks is None: return self.network_api.validate_networks(context, requested_networks) @staticmethod def _handle_kernel_and_ramdisk(context, kernel_id, ramdisk_id, image, image_service): """Choose kernel and ramdisk appropriate for the instance. The kernel and ramdisk can be chosen in one of three ways: 1. Passed in with create-instance request. 2. Inherited from image. 3. Forced to None by using `null_kernel` FLAG. """ # Inherit from image if not specified if kernel_id is None: kernel_id = image['properties'].get('kernel_id') if ramdisk_id is None: ramdisk_id = image['properties'].get('ramdisk_id') # Force to None if using null_kernel if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None # Verify kernel and ramdisk exist (fail-fast) if kernel_id is not None: image_service.show(context, kernel_id) if ramdisk_id is not None: image_service.show(context, ramdisk_id) return kernel_id, ramdisk_id @staticmethod def _handle_availability_zone(availability_zone): # NOTE(vish): We have a legacy hack to allow admins to specify hosts # via az using az:host. It might be nice to expose an # api to specify specific hosts to force onto, but for # now it just supports this legacy hack. forced_host = None if availability_zone and ':' in availability_zone: availability_zone, forced_host = availability_zone.split(':') if not availability_zone: availability_zone = FLAGS.default_schedule_zone return availability_zone, forced_host @staticmethod def _inherit_properties_from_image(image, auto_disk_config): def prop(prop_, prop_type=None): """Return the value of an image property.""" value = image['properties'].get(prop_) if value is not None: if prop_type == 'bool': value = utils.bool_from_str(value) return value options_from_image = {'os_type': prop('os_type'), 'architecture': prop('arch'), 'vm_mode': prop('vm_mode')} # If instance doesn't have auto_disk_config overridden by request, use # whatever the image indicates if auto_disk_config is None: auto_disk_config = prop('auto_disk_config', prop_type='bool') options_from_image['auto_disk_config'] = auto_disk_config return options_from_image def _create_instance(self, context, instance_type, image_href, kernel_id, ramdisk_id, min_count, max_count, display_name, display_description, key_name, key_data, security_group, availability_zone, user_data, metadata, injected_files, admin_password, access_ip_v4, access_ip_v6, requested_networks, config_drive, block_device_mapping, auto_disk_config, reservation_id=None, scheduler_hints=None): """Verify all the input parameters regardless of the provisioning strategy being performed and schedule the instance(s) for creation.""" if not metadata: metadata = {} if not security_group: security_group = 'default' if not instance_type: instance_type = instance_types.get_default_instance_type() if not min_count: min_count = 1 if not max_count: max_count = min_count block_device_mapping = block_device_mapping or [] if instance_type['disabled']: raise exception.InstanceTypeNotFound( instance_type_id=instance_type['id']) # Reserve quotas num_instances, quota_reservations = self._check_num_instances_quota( context, instance_type, min_count, max_count) # Try to create the instance try: instances = [] instance_uuids = [] self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) self._check_requested_networks(context, requested_networks) (image_service, image_id) = glance.get_remote_image_service( context, image_href) image = image_service.show(context, image_id) if instance_type['memory_mb'] < int(image.get('min_ram') or 0): raise exception.InstanceTypeMemoryTooSmall() if instance_type['root_gb'] < int(image.get('min_disk') or 0): raise exception.InstanceTypeDiskTooSmall() # Handle config_drive config_drive_id = None if config_drive and config_drive is not True: # config_drive is volume id config_drive_id = config_drive config_drive = None # Ensure config_drive image exists image_service.show(context, config_drive_id) kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk( context, kernel_id, ramdisk_id, image, image_service) if key_data is None and key_name: key_pair = self.db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] if reservation_id is None: reservation_id = utils.generate_uid('r') # grab the architecture from glance architecture = image['properties'].get('architecture', 'Unknown') root_device_name = block_device.properties_root_device_name( image['properties']) availability_zone, forced_host = self._handle_availability_zone( availability_zone) base_options = { 'reservation_id': reservation_id, 'image_ref': image_href, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'power_state': power_state.NOSTATE, 'vm_state': vm_states.BUILDING, 'config_drive_id': config_drive_id or '', 'config_drive': config_drive or '', 'user_id': context.user_id, 'project_id': context.project_id, 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), 'instance_type_id': instance_type['id'], 'memory_mb': instance_type['memory_mb'], 'vcpus': instance_type['vcpus'], 'root_gb': instance_type['root_gb'], 'ephemeral_gb': instance_type['ephemeral_gb'], 'display_name': display_name, 'display_description': display_description or '', 'user_data': user_data, 'key_name': key_name, 'key_data': key_data, 'locked': False, 'metadata': metadata, 'access_ip_v4': access_ip_v4, 'access_ip_v6': access_ip_v6, 'availability_zone': availability_zone, 'root_device_name': root_device_name, 'architecture': architecture, 'progress': 0} if user_data: l = len(user_data) if l > MAX_USERDATA_SIZE: # NOTE(mikal): user_data is stored in a text column, and # the database might silently truncate if its over length. raise exception.InstanceUserDataTooLarge( length=l, maxsize=MAX_USERDATA_SIZE) try: base64.decodestring(user_data) except base64.binascii.Error: raise exception.InstanceUserDataMalformed() options_from_image = self._inherit_properties_from_image( image, auto_disk_config) base_options.update(options_from_image) LOG.debug(_("Going to run %s instances...") % num_instances) filter_properties = dict(scheduler_hints=scheduler_hints) if context.is_admin and forced_host: filter_properties['force_hosts'] = [forced_host] for i in xrange(num_instances): options = base_options.copy() instance = self.create_db_entry_for_new_instance( context, instance_type, image, options, security_group, block_device_mapping) instances.append(instance) instance_uuids.append(instance['uuid']) # In the case of any exceptions, attempt DB cleanup and rollback the # quota reservations. except Exception: with excutils.save_and_reraise_exception(): try: for instance_uuid in instance_uuids: self.db.instance_destroy(context, instance_uuid) finally: QUOTAS.rollback(context, quota_reservations) # Commit the reservations QUOTAS.commit(context, quota_reservations) request_spec = { 'image': jsonutils.to_primitive(image), 'instance_properties': base_options, 'instance_type': instance_type, 'instance_uuids': instance_uuids, 'block_device_mapping': block_device_mapping, 'security_group': security_group, } self.scheduler_rpcapi.run_instance(context, request_spec=request_spec, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, is_first_time=True, filter_properties=filter_properties) return (instances, reservation_id) @staticmethod def _volume_size(instance_type, virtual_name): size = 0 if virtual_name == 'swap': size = instance_type.get('swap', 0) elif block_device.is_ephemeral(virtual_name): num = block_device.ephemeral_num(virtual_name) # TODO(yamahata): ephemeralN where N > 0 # Only ephemeral0 is allowed for now because InstanceTypes # table only allows single local disk, ephemeral_gb. # In order to enhance it, we need to add a new columns to # instance_types table. if num > 0: return 0 size = instance_type.get('ephemeral_gb') return size def _update_image_block_device_mapping(self, elevated_context, instance_type, instance_uuid, mappings): """tell vm driver to create ephemeral/swap device at boot time by updating BlockDeviceMapping """ for bdm in block_device.mappings_prepend_dev(mappings): LOG.debug(_("bdm %s"), bdm, instance_uuid=instance_uuid) virtual_name = bdm['virtual'] if virtual_name == 'ami' or virtual_name == 'root': continue if not block_device.is_swap_or_ephemeral(virtual_name): continue size = self._volume_size(instance_type, virtual_name) if size == 0: continue values = { 'instance_uuid': instance_uuid, 'device_name': bdm['device'], 'virtual_name': virtual_name, 'volume_size': size} self.db.block_device_mapping_update_or_create(elevated_context, values) def _update_block_device_mapping(self, elevated_context, instance_type, instance_uuid, block_device_mapping): """tell vm driver to attach volume at boot time by updating BlockDeviceMapping """ LOG.debug(_("block_device_mapping %s"), block_device_mapping, instance_uuid=instance_uuid) for bdm in block_device_mapping: assert 'device_name' in bdm values = {'instance_uuid': instance_uuid} for key in ('device_name', 'delete_on_termination', 'virtual_name', 'snapshot_id', 'volume_id', 'volume_size', 'no_device'): values[key] = bdm.get(key) virtual_name = bdm.get('virtual_name') if (virtual_name is not None and block_device.is_swap_or_ephemeral(virtual_name)): size = self._volume_size(instance_type, virtual_name) if size == 0: continue values['volume_size'] = size # NOTE(yamahata): NoDevice eliminates devices defined in image # files by command line option. # (--block-device-mapping) if virtual_name == 'NoDevice': values['no_device'] = True for k in ('delete_on_termination', 'virtual_name', 'snapshot_id', 'volume_id', 'volume_size'): values[k] = None self.db.block_device_mapping_update_or_create(elevated_context, values) def _populate_instance_for_bdm(self, context, instance, instance_type, image, block_device_mapping): """Populate instance block device mapping information.""" # FIXME(comstud): Why do the block_device_mapping DB calls # require elevated context? elevated = context.elevated() instance_uuid = instance['uuid'] mappings = image['properties'].get('mappings', []) if mappings: self._update_image_block_device_mapping(elevated, instance_type, instance_uuid, mappings) image_bdm = image['properties'].get('block_device_mapping', []) for mapping in (image_bdm, block_device_mapping): if not mapping: continue self._update_block_device_mapping(elevated, instance_type, instance_uuid, mapping) def _populate_instance_shutdown_terminate(self, instance, image, block_device_mapping): """Populate instance shutdown_terminate information.""" if (block_device_mapping or image['properties'].get('mappings') or image['properties'].get('block_device_mapping')): instance['shutdown_terminate'] = False def _populate_instance_names(self, instance): """Populate instance display_name and hostname.""" display_name = instance.get('display_name') hostname = instance.get('hostname') if display_name is None: display_name = self._default_display_name(instance['uuid']) instance['display_name'] = display_name if hostname is None: hostname = display_name instance['hostname'] = utils.sanitize_hostname(hostname) def _default_display_name(self, instance_uuid): return "Server %s" % instance_uuid def _populate_instance_for_create(self, base_options, image, security_groups): """Build the beginning of a new instance.""" instance = base_options if not instance.get('uuid'): # Generate the instance_uuid here so we can use it # for additional setup before creating the DB entry. instance['uuid'] = str(utils.gen_uuid()) instance['launch_index'] = 0 instance['vm_state'] = vm_states.BUILDING instance['task_state'] = task_states.SCHEDULING instance['architecture'] = image['properties'].get('architecture') instance['info_cache'] = {'network_info': '[]'} # Store image properties so we can use them later # (for notifications, etc). Only store what we can. instance.setdefault('system_metadata', {}) for key, value in image['properties'].iteritems(): new_value = str(value)[:255] instance['system_metadata']['image_%s' % key] = new_value # Keep a record of the original base image that this # image's instance is derived from: base_image_ref = image['properties'].get('base_image_ref') if not base_image_ref: # base image ref property not previously set through a snapshot. # default to using the image ref as the base: base_image_ref = base_options['image_ref'] instance['system_metadata']['image_base_image_ref'] = base_image_ref # Use 'default' security_group if none specified. if security_groups is None: security_groups = ['default'] elif not isinstance(security_groups, list): security_groups = [security_groups] instance['security_groups'] = security_groups return instance #NOTE(bcwaldon): No policy check since this is only used by scheduler and # the compute api. That should probably be cleaned up, though. def create_db_entry_for_new_instance(self, context, instance_type, image, base_options, security_group, block_device_mapping): """Create an entry in the DB for this new instance, including any related table updates (such as security group, etc). This is called by the scheduler after a location for the instance has been determined. """ instance = self._populate_instance_for_create(base_options, image, security_group) self._populate_instance_names(instance) self._populate_instance_shutdown_terminate(instance, image, block_device_mapping) # ensure_default security group is called before the instance # is created so the creation of the default security group is # proxied to the sgh. self.security_group_api.ensure_default(context) instance = self.db.instance_create(context, instance) self._populate_instance_for_bdm(context, instance, instance_type, image, block_device_mapping) # send a state update notification for the initial create to # show it going from non-existent to BUILDING notifications.send_update_with_states(context, instance, None, vm_states.BUILDING, None, None, service="api") return instance def _check_create_policies(self, context, availability_zone, requested_networks, block_device_mapping): """Check policies for create().""" target = {'project_id': context.project_id, 'user_id': context.user_id, 'availability_zone': availability_zone} check_policy(context, 'create', target) if requested_networks: check_policy(context, 'create:attach_network', target) if block_device_mapping: check_policy(context, 'create:attach_volume', target) def create(self, context, instance_type, image_href, kernel_id=None, ramdisk_id=None, min_count=None, max_count=None, display_name=None, display_description=None, key_name=None, key_data=None, security_group=None, availability_zone=None, user_data=None, metadata=None, injected_files=None, admin_password=None, block_device_mapping=None, access_ip_v4=None, access_ip_v6=None, requested_networks=None, config_drive=None, auto_disk_config=None, scheduler_hints=None): """ Provision instances, sending instance information to the scheduler. The scheduler will determine where the instance(s) go and will handle creating the DB entries. Returns a tuple of (instances, reservation_id) """ self._check_create_policies(context, availability_zone, requested_networks, block_device_mapping) return self._create_instance( context, instance_type, image_href, kernel_id, ramdisk_id, min_count, max_count, display_name, display_description, key_name, key_data, security_group, availability_zone, user_data, metadata, injected_files, admin_password, access_ip_v4, access_ip_v6, requested_networks, config_drive, block_device_mapping, auto_disk_config, scheduler_hints=scheduler_hints) def trigger_provider_fw_rules_refresh(self, context): """Called when a rule is added/removed from a provider firewall""" hosts = [x['host'] for (x, idx) in self.db.service_get_all_compute_sorted(context)] for host in hosts: self.compute_rpcapi.refresh_provider_fw_rules(context, host) @wrap_check_policy def update(self, context, instance, **kwargs): """Updates the instance in the datastore. :param context: The security context :param instance: The instance to update :param kwargs: All additional keyword args are treated as data fields of the instance to be updated :returns: None """ _, updated = self._update(context, instance, **kwargs) return updated def _update(self, context, instance, **kwargs): # Update the instance record and send a state update notification # if task or vm state changed old_ref, instance_ref = self.db.instance_update_and_get_original( context, instance['uuid'], kwargs) notifications.send_update(context, old_ref, instance_ref, service="api") return dict(old_ref.iteritems()), dict(instance_ref.iteritems()) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=None, task_state=None) def soft_delete(self, context, instance): """Terminate an instance.""" LOG.debug(_('Going to try to soft delete instance'), instance=instance) if instance['disable_terminate']: return # NOTE(jerdfelt): The compute daemon handles reclaiming instances # that are in soft delete. If there is no host assigned, there is # no daemon to reclaim, so delete it immediately. if instance['host']: instance = self.update(context, instance, task_state=task_states.POWERING_OFF, expected_task_state=None, deleted_at=timeutils.utcnow()) self.compute_rpcapi.power_off_instance(context, instance) else: LOG.warning(_('No host for instance, deleting immediately'), instance=instance) try: self.db.instance_destroy(context, instance['uuid']) except exception.InstanceNotFound: # NOTE(comstud): Race condition. Instance already gone. pass def _delete(self, context, instance): host = instance['host'] reservations = None try: #Note(maoy): no expected_task_state needs to be set old, updated = self._update(context, instance, task_state=task_states.DELETING, progress=0) # Avoid double-counting the quota usage reduction # where delete is already in progress if old['task_state'] != task_states.DELETING: reservations = QUOTAS.reserve(context, instances=-1, cores=-instance['vcpus'], ram=-instance['memory_mb']) if not host: # Just update database, nothing else we can do constraint = self.db.constraint(host=self.db.equal_any(host)) try: result = self.db.instance_destroy(context, instance['uuid'], constraint) if reservations: QUOTAS.commit(context, reservations) return result except exception.ConstraintNotMet: # Refresh to get new host information instance = self.get(context, instance['uuid']) if instance['vm_state'] == vm_states.RESIZED: # If in the middle of a resize, use confirm_resize to # ensure the original instance is cleaned up too get_migration = self.db.migration_get_by_instance_and_status try: migration_ref = get_migration(context.elevated(), instance['uuid'], 'finished') except exception.MigrationNotFoundByStatus: migration_ref = None if migration_ref: src_host = migration_ref['source_compute'] # Call since this can race with the terminate_instance. # The resize is done but awaiting confirmation/reversion, # so there are two cases: # 1. up-resize: here -instance['vcpus'/'memory_mb'] match # the quota usages accounted for this instance, # so no further quota adjustment is needed # 2. down-resize: here -instance['vcpus'/'memory_mb'] are # shy by delta(old, new) from the quota usages accounted # for this instance, so we must adjust deltas = self._downsize_quota_delta(context, migration_ref) downsize_reservations = self._reserve_quota_delta(context, deltas) self.compute_rpcapi.confirm_resize(context.elevated(), instance, migration_ref['id'], host=src_host, cast=False, reservations=downsize_reservations) is_up = False bdms = self.db.block_device_mapping_get_all_by_instance( context, instance["uuid"]) #Note(jogo): db allows for multiple compute services per host try: services = self.db.service_get_all_compute_by_host( context.elevated(), instance['host']) except exception.ComputeHostNotFound: services = [] for service in services: if utils.service_is_up(service): is_up = True self.compute_rpcapi.terminate_instance(context, instance) break if not is_up: # If compute node isn't up, just delete from DB self._local_delete(context, instance, bdms) if reservations: QUOTAS.commit(context, reservations) except exception.InstanceNotFound: # NOTE(comstud): Race condition. Instance already gone. if reservations: QUOTAS.rollback(context, reservations) except Exception: with excutils.save_and_reraise_exception(): if reservations: QUOTAS.rollback(context, reservations) def _local_delete(self, context, instance, bdms): LOG.warning(_('host for instance is down, deleting from ' 'database'), instance=instance) instance_uuid = instance['uuid'] self.db.instance_info_cache_delete(context, instance_uuid) compute_utils.notify_about_instance_usage( context, instance, "delete.start") elevated = context.elevated() self.network_api.deallocate_for_instance(elevated, instance) system_meta = self.db.instance_system_metadata_get(context, instance_uuid) # cleanup volumes for bdm in bdms: if bdm['volume_id']: volume = self.volume_api.get(context, bdm['volume_id']) # NOTE(vish): We don't have access to correct volume # connector info, so just pass a fake # connector. This can be improved when we # expose get_volume_connector to rpc. connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'} self.volume_api.terminate_connection(context, volume, connector) self.volume_api.detach(elevated, volume) if bdm['delete_on_termination']: self.volume_api.delete(context, volume) self.db.block_device_mapping_destroy(context, bdm['id']) instance = self._instance_update(context, instance_uuid, vm_state=vm_states.DELETED, task_state=None, terminated_at=timeutils.utcnow()) self.db.instance_destroy(context, instance_uuid) compute_utils.notify_about_instance_usage( context, instance, "delete.end", system_metadata=system_meta) # NOTE(maoy): we allow delete to be called no matter what vm_state says. @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=None, task_state=None) def delete(self, context, instance): """Terminate an instance.""" LOG.debug(_("Going to try to terminate instance"), instance=instance) if instance['disable_terminate']: return self._delete(context, instance) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.SOFT_DELETED]) def restore(self, context, instance): """Restore a previously deleted (but not reclaimed) instance.""" if instance['host']: instance = self.update(context, instance, task_state=task_states.POWERING_ON, expected_task_state=None, deleted_at=None) self.compute_rpcapi.power_on_instance(context, instance) else: self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=None, expected_task_state=None, deleted_at=None) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.SOFT_DELETED]) def force_delete(self, context, instance): """Force delete a previously deleted (but not reclaimed) instance.""" self._delete(context, instance) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED, vm_states.ERROR, vm_states.STOPPED], task_state=[None]) def stop(self, context, instance, do_cast=True): """Stop an instance.""" LOG.debug(_("Going to try to stop instance"), instance=instance) instance = self.update(context, instance, task_state=task_states.STOPPING, expected_task_state=None, progress=0) self.compute_rpcapi.stop_instance(context, instance, cast=do_cast) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.STOPPED]) def start(self, context, instance): """Start an instance.""" LOG.debug(_("Going to try to start instance"), instance=instance) instance = self.update(context, instance, task_state=task_states.STARTING, expected_task_state=None) # TODO(yamahata): injected_files isn't supported right now. # It is used only for osapi. not for ec2 api. # availability_zone isn't used by run_instance. self.compute_rpcapi.start_instance(context, instance) #NOTE(bcwaldon): no policy check here since it should be rolled in to # search_opts in get_all def get_active_by_window(self, context, begin, end=None, project_id=None): """Get instances that were continuously active over a window.""" return self.db.instance_get_active_by_window(context, begin, end, project_id) #NOTE(bcwaldon): this doesn't really belong in this class def get_instance_type(self, context, instance_type_id): """Get an instance type by instance type id.""" return instance_types.get_instance_type(instance_type_id) def get(self, context, instance_id): """Get a single instance with the given instance_id.""" # NOTE(ameade): we still need to support integer ids for ec2 if utils.is_uuid_like(instance_id): instance = self.db.instance_get_by_uuid(context, instance_id) else: instance = self.db.instance_get(context, instance_id) check_policy(context, 'get', instance) inst = dict(instance.iteritems()) # NOTE(comstud): Doesn't get returned with iteritems inst['name'] = instance['name'] return inst def get_all(self, context, search_opts=None, sort_key='created_at', sort_dir='desc', limit=None, marker=None): """Get all instances filtered by one of the given parameters. If there is no filter and the context is an admin, it will retrieve all instances in the system. Deleted instances will be returned by default, unless there is a search option that says otherwise. The results will be returned sorted in the order specified by the 'sort_dir' parameter using the key specified in the 'sort_key' parameter. """ #TODO(bcwaldon): determine the best argument for target here target = { 'project_id': context.project_id, 'user_id': context.user_id, } check_policy(context, "get_all", target) if search_opts is None: search_opts = {} LOG.debug(_("Searching by: %s") % str(search_opts)) # Fixups for the DB call filters = {} def _remap_flavor_filter(flavor_id): try: instance_type = instance_types.get_instance_type_by_flavor_id( flavor_id) except exception.FlavorNotFound: raise ValueError() filters['instance_type_id'] = instance_type['id'] def _remap_fixed_ip_filter(fixed_ip): # Turn fixed_ip into a regexp match. Since '.' matches # any character, we need to use regexp escaping for it. filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.') # search_option to filter_name mapping. filter_mapping = { 'image': 'image_ref', 'name': 'display_name', 'tenant_id': 'project_id', 'flavor': _remap_flavor_filter, 'fixed_ip': _remap_fixed_ip_filter} # copy from search_opts, doing various remappings as necessary for opt, value in search_opts.iteritems(): # Do remappings. # Values not in the filter_mapping table are copied as-is. # If remapping is None, option is not copied # If the remapping is a string, it is the filter_name to use try: remap_object = filter_mapping[opt] except KeyError: filters[opt] = value else: # Remaps are strings to translate to, or functions to call # to do the translating as defined by the table above. if isinstance(remap_object, basestring): filters[remap_object] = value else: try: remap_object(value) # We already know we can't match the filter, so # return an empty list except ValueError: return [] inst_models = self._get_instances_by_filters(context, filters, sort_key, sort_dir, limit=limit, marker=marker) # Convert the models to dictionaries instances = [] for inst_model in inst_models: instance = dict(inst_model.iteritems()) # NOTE(comstud): Doesn't get returned by iteritems instance['name'] = inst_model['name'] instances.append(instance) return instances def _get_instances_by_filters(self, context, filters, sort_key, sort_dir, limit=None, marker=None): if 'ip6' in filters or 'ip' in filters: res = self.network_api.get_instance_uuids_by_ip_filter(context, filters) # NOTE(jkoelker) It is possible that we will get the same # instance uuid twice (one for ipv4 and ipv6) uuids = set([r['instance_uuid'] for r in res]) filters['uuid'] = uuids return self.db.instance_get_all_by_filters(context, filters, sort_key, sort_dir, limit=limit, marker=marker) @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED]) def backup(self, context, instance, name, backup_type, rotation, extra_properties=None): """Backup the given instance :param instance: nova.db.sqlalchemy.models.Instance :param name: name of the backup or snapshot name = backup_type # daily backups are called 'daily' :param rotation: int representing how many backups to keep around; None if rotation shouldn't be used (as in the case of snapshots) :param extra_properties: dict of extra image properties to include """ recv_meta = self._create_image(context, instance, name, 'backup', backup_type=backup_type, rotation=rotation, extra_properties=extra_properties) return recv_meta @wrap_check_policy @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED]) def snapshot(self, context, instance, name, extra_properties=None): """Snapshot the given instance. :param instance: nova.db.sqlalchemy.models.Instance :param name: name of the backup or snapshot :param extra_properties: dict of extra image properties to include :returns: A dict containing image metadata """ return self._create_image(context, instance, name, 'snapshot', extra_properties=extra_properties) def _create_image(self, context, instance, name, image_type, backup_type=None, rotation=None, extra_properties=None): """Create snapshot or backup for an instance on this host. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance :param name: string for name of the snapshot :param image_type: snapshot | backup :param backup_type: daily | weekly :param rotation: int representing how many backups to keep around; None if rotation shouldn't be used (as in the case of snapshots) :param extra_properties: dict of extra image properties to include """ instance_uuid = instance['uuid'] if image_type == "snapshot": task_state = task_states.IMAGE_SNAPSHOT elif image_type == "backup": task_state = task_states.IMAGE_BACKUP else: raise Exception(_('Image type not recognized %s') % image_type) # change instance state and notify old_vm_state = instance["vm_state"] old_task_state = instance["task_state"] self.db.instance_test_and_set( context, instance_uuid, 'task_state', [None], task_state) notifications.send_update_with_states(context, instance, old_vm_state, instance["vm_state"], old_task_state, instance["task_state"], service="api", verify_states=True) properties = { 'instance_uuid': instance_uuid, 'user_id': str(context.user_id), 'image_type': image_type, } # Persist base image ref as a Glance image property system_meta = self.db.instance_system_metadata_get( context, instance_uuid) base_image_ref = system_meta.get('image_base_image_ref') if base_image_ref: properties['base_image_ref'] = base_image_ref sent_meta = {'name': name, 'is_public': False} if image_type == 'backup': properties['backup_type'] = backup_type elif image_type == 'snapshot': min_ram, min_disk = self._get_minram_mindisk_params(context, instance) if min_ram is not None: sent_meta['min_ram'] = min_ram if min_disk is not None: sent_meta['min_disk'] = min_disk properties.update(extra_properties or {}) sent_meta['properties'] = properties recv_meta = self.image_service.create(context, sent_meta) self.compute_rpcapi.snapshot_instance(context, instance=instance, image_id=recv_meta['id'], image_type=image_type, backup_type=backup_type, rotation=rotation) return recv_meta @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED]) def snapshot_volume_backed(self, context, instance, image_meta, name, extra_properties=None): """Snapshot the given volume-backed instance. :param instance: nova.db.sqlalchemy.models.Instance :param image_meta: metadata for the new image :param name: name of the backup or snapshot :param extra_properties: dict of extra image properties to include :returns: the new image metadata """ image_meta['name'] = name properties = image_meta['properties'] if instance['root_device_name']: properties['root_device_name'] = instance['root_device_name'] properties.update(extra_properties or {}) bdms = self.get_instance_bdms(context, instance) mapping = [] for bdm in bdms: if bdm.no_device: continue m = {} for attr in ('device_name', 'snapshot_id', 'volume_id', 'volume_size', 'delete_on_termination', 'no_device', 'virtual_name'): val = getattr(bdm, attr) if val is not None: m[attr] = val volume_id = m.get('volume_id') if volume_id: # create snapshot based on volume_id volume = self.volume_api.get(context, volume_id) # NOTE(yamahata): Should we wait for snapshot creation? # Linux LVM snapshot creation completes in # short time, it doesn't matter for now. name = _('snapshot for %s') % image_meta['name'] snapshot = self.volume_api.create_snapshot_force( context, volume, name, volume['display_description']) m['snapshot_id'] = snapshot['id'] del m['volume_id'] if m: mapping.append(m) for m in block_device.mappings_prepend_dev(properties.get('mappings', [])): virtual_name = m['virtual'] if virtual_name in ('ami', 'root'): continue assert block_device.is_swap_or_ephemeral(virtual_name) device_name = m['device'] if device_name in [b['device_name'] for b in mapping if not b.get('no_device', False)]: continue # NOTE(yamahata): swap and ephemeral devices are specified in # AMI, but disabled for this instance by user. # So disable those device by no_device. mapping.append({'device_name': device_name, 'no_device': True}) if mapping: properties['block_device_mapping'] = mapping for attr in ('status', 'location', 'id'): image_meta.pop(attr, None) # the new image is simply a bucket of properties (particularly the # block device mapping, kernel and ramdisk IDs) with no image data, # hence the zero size image_meta['size'] = 0 return self.image_service.create(context, image_meta, data='') def _get_minram_mindisk_params(self, context, instance): try: #try to get source image of the instance orig_image = self.image_service.show(context, instance['image_ref']) except exception.ImageNotFound: return None, None #disk format of vhd is non-shrinkable if orig_image.get('disk_format') == 'vhd': min_ram = instance['instance_type']['memory_mb'] min_disk = instance['instance_type']['root_gb'] else: #set new image values to the original image values min_ram = orig_image.get('min_ram') min_disk = orig_image.get('min_disk') return min_ram, min_disk @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED, vm_states.RESCUED], task_state=[None, task_states.REBOOTING]) def reboot(self, context, instance, reboot_type): """Reboot the given instance.""" if (reboot_type == 'SOFT' and instance['task_state'] == task_states.REBOOTING): raise exception.InstanceInvalidState( attr='task_state', instance_uuid=instance['uuid'], state=instance['task_state']) state = {'SOFT': task_states.REBOOTING, 'HARD': task_states.REBOOTING_HARD}[reboot_type] instance = self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=state, expected_task_state=[None, task_states.REBOOTING]) self.compute_rpcapi.reboot_instance(context, instance=instance, reboot_type=reboot_type) def _get_image(self, context, image_href): """Throws an ImageNotFound exception if image_href does not exist.""" (image_service, image_id) = glance.get_remote_image_service(context, image_href) return image_service.show(context, image_id) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED], task_state=[None]) def rebuild(self, context, instance, image_href, admin_password, **kwargs): """Rebuild the given instance with the provided attributes.""" orig_image_ref = instance['image_ref'] image = self._get_image(context, image_href) files_to_inject = kwargs.pop('files_to_inject', []) self._check_injected_file_quota(context, files_to_inject) metadata = kwargs.get('metadata', {}) self._check_metadata_properties_quota(context, metadata) instance_type = instance['instance_type'] if instance_type['memory_mb'] < int(image.get('min_ram') or 0): raise exception.InstanceTypeMemoryTooSmall() if instance_type['root_gb'] < int(image.get('min_disk') or 0): raise exception.InstanceTypeDiskTooSmall() (image_service, image_id) = glance.get_remote_image_service(context, image_href) image = image_service.show(context, image_id) kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk( context, None, None, image, image_service) def _reset_image_metadata(): """ Remove old image properties that we're storing as instance system metadata. These properties start with 'image_'. Then add the properites for the new image. """ # FIXME(comstud): There's a race condition here in that # if the system_metadata for this instance is updated # after we do the get and before we update.. those other # updates will be lost. Since this problem exists in a lot # of other places, I think it should be addressed in a DB # layer overhaul. sys_metadata = self.db.instance_system_metadata_get(context, instance['uuid']) orig_sys_metadata = dict(sys_metadata) # Remove the old keys for key in sys_metadata.keys(): if key.startswith('image_'): del sys_metadata[key] # Add the new ones for key, value in image['properties'].iteritems(): new_value = str(value)[:255] sys_metadata['image_%s' % key] = new_value self.db.instance_system_metadata_update(context, instance['uuid'], sys_metadata, True) return orig_sys_metadata instance = self.update(context, instance, task_state=task_states.REBUILDING, expected_task_state=None, # Unfortunately we need to set image_ref early, # so API users can see it. image_ref=image_href, kernel_id=kernel_id or "", ramdisk_id=ramdisk_id or "", progress=0, **kwargs) # On a rebuild, since we're potentially changing images, we need to # wipe out the old image properties that we're storing as instance # system metadata... and copy in the properties for the new image. orig_sys_metadata = _reset_image_metadata() self.compute_rpcapi.rebuild_instance(context, instance=instance, new_pass=admin_password, injected_files=files_to_inject, image_ref=image_href, orig_image_ref=orig_image_ref, orig_sys_metadata=orig_sys_metadata) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.RESIZED]) def revert_resize(self, context, instance): """Reverts a resize, deleting the 'new' instance in the process.""" context = context.elevated() migration_ref = self.db.migration_get_by_instance_and_status(context, instance['uuid'], 'finished') if not migration_ref: raise exception.MigrationNotFoundByStatus( instance_id=instance['uuid'], status='finished') # reverse quota reservation for increased resource usage deltas = self._reverse_upsize_quota_delta(context, migration_ref) reservations = self._reserve_quota_delta(context, deltas) instance = self.update(context, instance, task_state=task_states.RESIZE_REVERTING, expected_task_state=None) self.compute_rpcapi.revert_resize(context, instance=instance, migration_id=migration_ref['id'], host=migration_ref['dest_compute'], reservations=reservations) self.db.migration_update(context, migration_ref['id'], {'status': 'reverted'}) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.RESIZED]) def confirm_resize(self, context, instance): """Confirms a migration/resize and deletes the 'old' instance.""" context = context.elevated() migration_ref = self.db.migration_get_by_instance_and_status(context, instance['uuid'], 'finished') if not migration_ref: raise exception.MigrationNotFoundByStatus( instance_id=instance['uuid'], status='finished') # reserve quota only for any decrease in resource usage deltas = self._downsize_quota_delta(context, migration_ref) reservations = self._reserve_quota_delta(context, deltas) instance = self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=None, expected_task_state=None) self.compute_rpcapi.confirm_resize(context, instance=instance, migration_id=migration_ref['id'], host=migration_ref['source_compute'], reservations=reservations) self.db.migration_update(context, migration_ref['id'], {'status': 'confirmed'}) @staticmethod def _resize_quota_delta(context, new_instance_type, old_instance_type, sense, compare): """ Calculate any quota adjustment required at a particular point in the resize cycle. :param context: the request context :param new_instance_type: the target instance type :param old_instance_type: the original instance type :param sense: the sense of the adjustment, 1 indicates a forward adjustment, whereas -1 indicates a reversal of a prior adjustment :param compare: the direction of the comparison, 1 indicates we're checking for positive deltas, whereas -1 indicates negative deltas """ def _quota_delta(resource): return sense * (new_instance_type[resource] - old_instance_type[resource]) deltas = {} if compare * _quota_delta('vcpus') > 0: deltas['cores'] = _quota_delta('vcpus') if compare * _quota_delta('memory_mb') > 0: deltas['ram'] = _quota_delta('memory_mb') return deltas @staticmethod def _upsize_quota_delta(context, new_instance_type, old_instance_type): """ Calculate deltas required to adjust quota for an instance upsize. """ return API._resize_quota_delta(context, new_instance_type, old_instance_type, 1, 1) @staticmethod def _reverse_upsize_quota_delta(context, migration_ref): """ Calculate deltas required to reverse a prior upsizing quota adjustment. """ old_instance_type = instance_types.get_instance_type( migration_ref['old_instance_type_id']) new_instance_type = instance_types.get_instance_type( migration_ref['new_instance_type_id']) return API._resize_quota_delta(context, new_instance_type, old_instance_type, -1, -1) @staticmethod def _downsize_quota_delta(context, migration_ref): """ Calculate deltas required to adjust quota for an instance downsize. """ old_instance_type = instance_types.get_instance_type( migration_ref['old_instance_type_id']) new_instance_type = instance_types.get_instance_type( migration_ref['new_instance_type_id']) return API._resize_quota_delta(context, new_instance_type, old_instance_type, 1, -1) @staticmethod def _reserve_quota_delta(context, deltas): return QUOTAS.reserve(context, **deltas) if deltas else None @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED], task_state=[None]) def resize(self, context, instance, flavor_id=None, **kwargs): """Resize (ie, migrate) a running instance. If flavor_id is None, the process is considered a migration, keeping the original flavor_id. If flavor_id is not None, the instance should be migrated to a new host and resized to the new flavor_id. """ current_instance_type = instance['instance_type'] # If flavor_id is not provided, only migrate the instance. if not flavor_id: LOG.debug(_("flavor_id is None. Assuming migration."), instance=instance) new_instance_type = current_instance_type else: new_instance_type = instance_types.get_instance_type_by_flavor_id( flavor_id) current_instance_type_name = current_instance_type['name'] new_instance_type_name = new_instance_type['name'] LOG.debug(_("Old instance type %(current_instance_type_name)s, " " new instance type %(new_instance_type_name)s"), locals(), instance=instance) # FIXME(sirp): both of these should raise InstanceTypeNotFound instead if not new_instance_type: raise exception.FlavorNotFound(flavor_id=flavor_id) same_instance_type = (current_instance_type['id'] == new_instance_type['id']) # NOTE(sirp): We don't want to force a customer to change their flavor # when Ops is migrating off of a failed host. if new_instance_type['disabled'] and not same_instance_type: raise exception.FlavorNotFound(flavor_id=flavor_id) # NOTE(markwash): look up the image early to avoid auth problems later image = self.image_service.show(context, instance['image_ref']) if same_instance_type and flavor_id: raise exception.CannotResizeToSameFlavor() # ensure there is sufficient headroom for upsizes deltas = self._upsize_quota_delta(context, new_instance_type, current_instance_type) try: reservations = self._reserve_quota_delta(context, deltas) except exception.OverQuota as exc: quotas = exc.kwargs['quotas'] usages = exc.kwargs['usages'] overs = exc.kwargs['overs'] headroom = dict((res, quotas[res] - (usages[res]['in_use'] + usages[res]['reserved'])) for res in quotas.keys()) resource = overs[0] used = quotas[resource] - headroom[resource] total_allowed = used + headroom[resource] overs = ','.join(overs) pid = context.project_id LOG.warn(_("%(overs)s quota exceeded for %(pid)s," " tried to resize instance. %(msg)s"), locals()) raise exception.TooManyInstances(overs=overs, req=deltas[resource], used=used, allowed=total_allowed, resource=resource) instance = self.update(context, instance, task_state=task_states.RESIZE_PREP, expected_task_state=None, progress=0, **kwargs) request_spec = { 'instance_type': new_instance_type, 'instance_uuids': [instance['uuid']], 'instance_properties': instance} filter_properties = {'ignore_hosts': []} if not FLAGS.allow_resize_to_same_host: filter_properties['ignore_hosts'].append(instance['host']) args = { "instance": instance, "instance_type": new_instance_type, "image": image, "request_spec": jsonutils.to_primitive(request_spec), "filter_properties": filter_properties, "reservations": reservations, } self.scheduler_rpcapi.prep_resize(context, **args) @wrap_check_policy @check_instance_lock def add_fixed_ip(self, context, instance, network_id): """Add fixed_ip from specified network to given instance.""" self.compute_rpcapi.add_fixed_ip_to_instance(context, instance=instance, network_id=network_id) @wrap_check_policy @check_instance_lock def remove_fixed_ip(self, context, instance, address): """Remove fixed_ip from specified network to given instance.""" self.compute_rpcapi.remove_fixed_ip_from_instance(context, instance=instance, address=address) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED]) def pause(self, context, instance): """Pause the given instance.""" self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=task_states.PAUSING, expected_task_state=None) self.compute_rpcapi.pause_instance(context, instance=instance) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.PAUSED]) def unpause(self, context, instance): """Unpause the given instance.""" self.update(context, instance, vm_state=vm_states.PAUSED, task_state=task_states.UNPAUSING, expected_task_state=None) self.compute_rpcapi.unpause_instance(context, instance=instance) @wrap_check_policy def get_diagnostics(self, context, instance): """Retrieve diagnostics for the given instance.""" return self.compute_rpcapi.get_diagnostics(context, instance=instance) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED]) def suspend(self, context, instance): """Suspend the given instance.""" self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=task_states.SUSPENDING, expected_task_state=None) self.compute_rpcapi.suspend_instance(context, instance=instance) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.SUSPENDED]) def resume(self, context, instance): """Resume the given instance.""" self.update(context, instance, vm_state=vm_states.SUSPENDED, task_state=task_states.RESUMING, expected_task_state=None) self.compute_rpcapi.resume_instance(context, instance=instance) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED]) def rescue(self, context, instance, rescue_password=None): """Rescue the given instance.""" self.update(context, instance, vm_state=vm_states.ACTIVE, task_state=task_states.RESCUING, expected_task_state=None) self.compute_rpcapi.rescue_instance(context, instance=instance, rescue_password=rescue_password) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.RESCUED]) def unrescue(self, context, instance): """Unrescue the given instance.""" self.update(context, instance, vm_state=vm_states.RESCUED, task_state=task_states.UNRESCUING, expected_task_state=None) self.compute_rpcapi.unrescue_instance(context, instance=instance) @wrap_check_policy @check_instance_lock @check_instance_state(vm_state=[vm_states.ACTIVE]) def set_admin_password(self, context, instance, password=None): """Set the root/admin password for the given instance.""" self.update(context, instance, task_state=task_states.UPDATING_PASSWORD, expected_task_state=None) self.compute_rpcapi.set_admin_password(context, instance=instance, new_pass=password) @wrap_check_policy @check_instance_lock def inject_file(self, context, instance, path, file_contents): """Write a file to the given instance.""" self.compute_rpcapi.inject_file(context, instance=instance, path=path, file_contents=file_contents) @wrap_check_policy def get_vnc_console(self, context, instance, console_type): """Get a url to an instance Console.""" if not instance['host']: raise exception.InstanceNotReady(instance=instance) connect_info = self.compute_rpcapi.get_vnc_console(context, instance=instance, console_type=console_type) self.consoleauth_rpcapi.authorize_console(context, connect_info['token'], console_type, connect_info['host'], connect_info['port'], connect_info['internal_access_path']) return {'url': connect_info['access_url']} @wrap_check_policy def get_console_output(self, context, instance, tail_length=None): """Get console output for an instance.""" return self.compute_rpcapi.get_console_output(context, instance=instance, tail_length=tail_length) @wrap_check_policy def lock(self, context, instance): """Lock the given instance.""" context = context.elevated() instance_uuid = instance['uuid'] LOG.debug(_('Locking'), context=context, instance_uuid=instance_uuid) self._instance_update(context, instance_uuid, locked=True) @wrap_check_policy def unlock(self, context, instance): """Unlock the given instance.""" context = context.elevated() instance_uuid = instance['uuid'] LOG.debug(_('Unlocking'), context=context, instance_uuid=instance_uuid) self._instance_update(context, instance_uuid, locked=False) @wrap_check_policy def get_lock(self, context, instance): """Return the boolean state of given instance's lock.""" return self.get(context, instance['uuid'])['locked'] @wrap_check_policy @check_instance_lock def reset_network(self, context, instance): """Reset networking on the instance.""" self.compute_rpcapi.reset_network(context, instance=instance) @wrap_check_policy @check_instance_lock def inject_network_info(self, context, instance): """Inject network info for the instance.""" self.compute_rpcapi.inject_network_info(context, instance=instance) @wrap_check_policy @check_instance_lock def attach_volume(self, context, instance, volume_id, device=None): """Attach an existing volume to an existing instance.""" # NOTE(vish): Fail fast if the device is not going to pass. This # will need to be removed along with the test if we # change the logic in the manager for what constitutes # a valid device. if device and not block_device.match_device(device): raise exception.InvalidDevicePath(path=device) # NOTE(vish): This is done on the compute host because we want # to avoid a race where two devices are requested at # the same time. When db access is removed from # compute, the bdm will be created here and we will # have to make sure that they are assigned atomically. device = self.compute_rpcapi.reserve_block_device_name( context, device=device, instance=instance) try: volume = self.volume_api.get(context, volume_id) self.volume_api.check_attach(context, volume) self.volume_api.reserve_volume(context, volume) self.compute_rpcapi.attach_volume(context, instance=instance, volume_id=volume_id, mountpoint=device) except Exception: with excutils.save_and_reraise_exception(): self.db.block_device_mapping_destroy_by_instance_and_device( context, instance['uuid'], device) return device @check_instance_lock def _detach_volume(self, context, instance, volume_id): check_policy(context, 'detach_volume', instance) volume = self.volume_api.get(context, volume_id) self.volume_api.check_detach(context, volume) self.volume_api.begin_detaching(context, volume) self.compute_rpcapi.detach_volume(context, instance=instance, volume_id=volume_id) return instance # FIXME(comstud): I wonder if API should pull in the instance from # the volume ID via volume API and pass it and the volume object here def detach_volume(self, context, volume_id): """Detach a volume from an instance.""" volume = self.volume_api.get(context, volume_id) if volume['attach_status'] == 'detached': msg = _("Volume must be attached in order to detach.") raise exception.InvalidVolume(reason=msg) instance_uuid = volume['instance_uuid'] instance = self.db.instance_get_by_uuid(context.elevated(), instance_uuid) if not instance: raise exception.VolumeUnattached(volume_id=volume_id) self._detach_volume(context, instance, volume_id) @wrap_check_policy def get_instance_metadata(self, context, instance): """Get all metadata associated with an instance.""" rv = self.db.instance_metadata_get(context, instance['uuid']) return dict(rv.iteritems()) @wrap_check_policy @check_instance_lock def delete_instance_metadata(self, context, instance, key): """Delete the given metadata item from an instance.""" self.db.instance_metadata_delete(context, instance['uuid'], key) instance['metadata'] = {} notifications.send_update(context, instance, instance) self.compute_rpcapi.change_instance_metadata(context, instance=instance, diff={key: ['-']}) @wrap_check_policy @check_instance_lock def update_instance_metadata(self, context, instance, metadata, delete=False): """Updates or creates instance metadata. If delete is True, metadata items that are not specified in the `metadata` argument will be deleted. """ orig = self.get_instance_metadata(context, instance) if delete: _metadata = metadata else: _metadata = orig.copy() _metadata.update(metadata) self._check_metadata_properties_quota(context, _metadata) metadata = self.db.instance_metadata_update(context, instance['uuid'], _metadata, True) instance['metadata'] = metadata notifications.send_update(context, instance, instance) diff = utils.diff_dict(orig, _metadata) self.compute_rpcapi.change_instance_metadata(context, instance=instance, diff=diff) return _metadata def get_instance_faults(self, context, instances): """Get all faults for a list of instance uuids.""" if not instances: return {} for instance in instances: check_policy(context, 'get_instance_faults', instance) uuids = [instance['uuid'] for instance in instances] return self.db.instance_fault_get_by_instance_uuids(context, uuids) def get_instance_bdms(self, context, instance): """Get all bdm tables for specified instance.""" return self.db.block_device_mapping_get_all_by_instance(context, instance['uuid']) def is_volume_backed_instance(self, context, instance, bdms): bdms = bdms or self.get_instance_bdms(context, instance) for bdm in bdms: if (block_device.strip_dev(bdm.device_name) == block_device.strip_dev(instance['root_device_name'])): return True else: return False @check_instance_state(vm_state=[vm_states.ACTIVE]) def live_migrate(self, context, instance, block_migration, disk_over_commit, host): """Migrate a server lively to a new host.""" LOG.debug(_("Going to try to live migrate instance to %s"), host, instance=instance) instance = self.update(context, instance, task_state=task_states.MIGRATING, expected_task_state=None) self.scheduler_rpcapi.live_migration(context, block_migration, disk_over_commit, instance, host) class HostAPI(base.Base): def __init__(self): self.compute_rpcapi = compute_rpcapi.ComputeAPI() super(HostAPI, self).__init__() """Sub-set of the Compute Manager API for managing host operations.""" def set_host_enabled(self, context, host, enabled): """Sets the specified host's ability to accept new instances.""" # NOTE(comstud): No instance_uuid argument to this compute manager # call return self.compute_rpcapi.set_host_enabled(context, enabled=enabled, host=host) def get_host_uptime(self, context, host): """Returns the result of calling "uptime" on the target host.""" # NOTE(comstud): No instance_uuid argument to this compute manager # call return self.compute_rpcapi.get_host_uptime(context, host=host) def host_power_action(self, context, host, action): """Reboots, shuts down or powers up the host.""" # NOTE(comstud): No instance_uuid argument to this compute manager # call return self.compute_rpcapi.host_power_action(context, action=action, host=host) def set_host_maintenance(self, context, host, mode): """Start/Stop host maintenance window. On start, it triggers guest VMs evacuation.""" return self.compute_rpcapi.host_maintenance_mode(context, host_param=host, mode=mode, host=host) class AggregateAPI(base.Base): """Sub-set of the Compute Manager API for managing host aggregates.""" def __init__(self, **kwargs): self.compute_rpcapi = compute_rpcapi.ComputeAPI() super(AggregateAPI, self).__init__(**kwargs) def create_aggregate(self, context, aggregate_name, availability_zone): """Creates the model for the aggregate.""" zones = [s.availability_zone for s in self.db.service_get_all_by_topic(context, FLAGS.compute_topic)] if availability_zone in zones: values = {"name": aggregate_name, "availability_zone": availability_zone} aggregate = self.db.aggregate_create(context, values) return dict(aggregate.iteritems()) else: raise exception.InvalidAggregateAction(action='create_aggregate', aggregate_id="'N/A'", reason='invalid zone') def get_aggregate(self, context, aggregate_id): """Get an aggregate by id.""" aggregate = self.db.aggregate_get(context, aggregate_id) return self._get_aggregate_info(context, aggregate) def get_aggregate_list(self, context): """Get all the aggregates.""" aggregates = self.db.aggregate_get_all(context) return [self._get_aggregate_info(context, a) for a in aggregates] def update_aggregate(self, context, aggregate_id, values): """Update the properties of an aggregate.""" aggregate = self.db.aggregate_update(context, aggregate_id, values) return self._get_aggregate_info(context, aggregate) def update_aggregate_metadata(self, context, aggregate_id, metadata): """Updates the aggregate metadata. If a key is set to None, it gets removed from the aggregate metadata. """ for key in metadata.keys(): if not metadata[key]: try: self.db.aggregate_metadata_delete(context, aggregate_id, key) metadata.pop(key) except exception.AggregateMetadataNotFound, e: LOG.warn(e.message) self.db.aggregate_metadata_add(context, aggregate_id, metadata) return self.get_aggregate(context, aggregate_id) def delete_aggregate(self, context, aggregate_id): """Deletes the aggregate.""" hosts = self.db.aggregate_host_get_all(context, aggregate_id) if len(hosts) > 0: raise exception.InvalidAggregateAction(action='delete', aggregate_id=aggregate_id, reason='not empty') self.db.aggregate_delete(context, aggregate_id) def add_host_to_aggregate(self, context, aggregate_id, host): """Adds the host to an aggregate.""" # validates the host; ComputeHostNotFound is raised if invalid service = self.db.service_get_all_compute_by_host(context, host)[0] aggregate = self.db.aggregate_get(context, aggregate_id) if service.availability_zone != aggregate.availability_zone: raise exception.InvalidAggregateAction( action='add host', aggregate_id=aggregate_id, reason='availability zone mismatch') self.db.aggregate_host_add(context, aggregate_id, host) #NOTE(jogo): Send message to host to support resource pools self.compute_rpcapi.add_aggregate_host(context, aggregate_id=aggregate_id, host_param=host, host=host) return self.get_aggregate(context, aggregate_id) def remove_host_from_aggregate(self, context, aggregate_id, host): """Removes host from the aggregate.""" # validates the host; ComputeHostNotFound is raised if invalid service = self.db.service_get_all_compute_by_host(context, host)[0] self.db.aggregate_host_delete(context, aggregate_id, host) self.compute_rpcapi.remove_aggregate_host(context, aggregate_id=aggregate_id, host_param=host, host=host) return self.get_aggregate(context, aggregate_id) def _get_aggregate_info(self, context, aggregate): """Builds a dictionary with aggregate props, metadata and hosts.""" metadata = self.db.aggregate_metadata_get(context, aggregate.id) hosts = self.db.aggregate_host_get_all(context, aggregate.id) result = dict(aggregate.iteritems()) result["metadata"] = metadata result["hosts"] = hosts return result class KeypairAPI(base.Base): """Sub-set of the Compute Manager API for managing key pairs.""" def __init__(self, **kwargs): super(KeypairAPI, self).__init__(**kwargs) def _validate_keypair_name(self, context, user_id, key_name): safechars = "_- " + string.digits + string.ascii_letters clean_value = "".join(x for x in key_name if x in safechars) if clean_value != key_name: msg = _("Keypair name contains unsafe characters") raise exception.InvalidKeypair(explanation=msg) if not 0 < len(key_name) < 256: msg = _('Keypair name must be between 1 and 255 characters long') raise exception.InvalidKeypair(explanation=msg) # NOTE: check for existing keypairs of same name try: self.db.key_pair_get(context, user_id, key_name) raise exception.KeyPairExists(key_name=key_name) except exception.NotFound: pass def import_key_pair(self, context, user_id, key_name, public_key): """Import a key pair using an existing public key.""" self._validate_keypair_name(context, user_id, key_name) count = QUOTAS.count(context, 'key_pairs', user_id) try: QUOTAS.limit_check(context, key_pairs=count + 1) except exception.OverQuota: raise exception.KeypairLimitExceeded() try: fingerprint = crypto.generate_fingerprint(public_key) except exception.InvalidKeypair: msg = _("Keypair data is invalid") raise exception.InvalidKeypair(explanation=msg) keypair = {'user_id': user_id, 'name': key_name, 'fingerprint': fingerprint, 'public_key': public_key} self.db.key_pair_create(context, keypair) return keypair def create_key_pair(self, context, user_id, key_name): """Create a new key pair.""" self._validate_keypair_name(context, user_id, key_name) count = QUOTAS.count(context, 'key_pairs', user_id) try: QUOTAS.limit_check(context, key_pairs=count + 1) except exception.OverQuota: raise exception.KeypairLimitExceeded() private_key, public_key, fingerprint = crypto.generate_key_pair() keypair = {'user_id': user_id, 'name': key_name, 'fingerprint': fingerprint, 'public_key': public_key, 'private_key': private_key} self.db.key_pair_create(context, keypair) return keypair def delete_key_pair(self, context, user_id, key_name): """Delete a keypair by name.""" self.db.key_pair_destroy(context, user_id, key_name) def get_key_pairs(self, context, user_id): """List key pairs.""" key_pairs = self.db.key_pair_get_all_by_user(context, user_id) rval = [] for key_pair in key_pairs: rval.append({ 'name': key_pair['name'], 'public_key': key_pair['public_key'], 'fingerprint': key_pair['fingerprint'], }) return rval def get_key_pair(self, context, user_id, key_name): """Get a keypair by name.""" key_pair = self.db.key_pair_get(context, user_id, key_name) return {'name': key_pair['name'], 'public_key': key_pair['public_key'], 'fingerprint': key_pair['fingerprint']} class SecurityGroupAPI(base.Base): """ Sub-set of the Compute API related to managing security groups and security group rules """ def __init__(self, **kwargs): super(SecurityGroupAPI, self).__init__(**kwargs) self.security_group_rpcapi = compute_rpcapi.SecurityGroupAPI() self.sgh = importutils.import_object(FLAGS.security_group_handler) def validate_property(self, value, property, allowed): """ Validate given security group property. :param value: the value to validate, as a string or unicode :param property: the property, either 'name' or 'description' :param allowed: the range of characters allowed """ try: val = value.strip() except AttributeError: msg = _("Security group %s is not a string or unicode") % property self.raise_invalid_property(msg) if not val: msg = _("Security group %s cannot be empty.") % property self.raise_invalid_property(msg) if allowed and not re.match(allowed, val): # Some validation to ensure that values match API spec. # - Alphanumeric characters, spaces, dashes, and underscores. # TODO(Daviey): LP: #813685 extend beyond group_name checking, and # probably create a param validator that can be used elsewhere. msg = (_("Value (%(value)s) for parameter Group%(property)s is " "invalid. Content limited to '%(allowed)'.") % dict(value=value, allowed=allowed, property=property.capitalize())) self.raise_invalid_property(msg) if len(val) > 255: msg = _("Security group %s should not be greater " "than 255 characters.") % property self.raise_invalid_property(msg) def ensure_default(self, context): """Ensure that a context has a security group. Creates a security group for the security context if it does not already exist. :param context: the security context """ existed, group = self.db.security_group_ensure_default(context) if not existed: self.sgh.trigger_security_group_create_refresh(context, group) def create(self, context, name, description): try: reservations = QUOTAS.reserve(context, security_groups=1) except exception.OverQuota: msg = _("Quota exceeded, too many security groups.") self.raise_over_quota(msg) LOG.audit(_("Create Security Group %s"), name, context=context) try: self.ensure_default(context) if self.db.security_group_exists(context, context.project_id, name): msg = _('Security group %s already exists') % name self.raise_group_already_exists(msg) group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': name, 'description': description} group_ref = self.db.security_group_create(context, group) self.sgh.trigger_security_group_create_refresh(context, group) # Commit the reservation QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): QUOTAS.rollback(context, reservations) return group_ref def get(self, context, name=None, id=None, map_exception=False): self.ensure_default(context) try: if name: return self.db.security_group_get_by_name(context, context.project_id, name) elif id: return self.db.security_group_get(context, id) except exception.NotFound as exp: if map_exception: msg = unicode(exp) self.raise_not_found(msg) else: raise def list(self, context, names=None, ids=None, project=None, search_opts=None): self.ensure_default(context) groups = [] if names or ids: if names: for name in names: groups.append(self.db.security_group_get_by_name(context, project, name)) if ids: for id in ids: groups.append(self.db.security_group_get(context, id)) elif context.is_admin: # TODO(eglynn): support a wider set of search options than just # all_tenants, at least include the standard filters defined for # the EC2 DescribeSecurityGroups API for the non-admin case also if (search_opts and 'all_tenants' in search_opts): groups = self.db.security_group_get_all(context) else: groups = self.db.security_group_get_by_project(context, project) elif project: groups = self.db.security_group_get_by_project(context, project) return groups def destroy(self, context, security_group): if self.db.security_group_in_use(context, security_group.id): msg = _("Security group is still in use") self.raise_invalid_group(msg) # Get reservations try: reservations = QUOTAS.reserve(context, security_groups=-1) except Exception: reservations = None LOG.exception(_("Failed to update usages deallocating " "security group")) LOG.audit(_("Delete security group %s"), security_group.name, context=context) self.db.security_group_destroy(context, security_group.id) self.sgh.trigger_security_group_destroy_refresh(context, security_group.id) # Commit the reservations if reservations: QUOTAS.commit(context, reservations) def is_associated_with_server(self, security_group, instance_uuid): """Check if the security group is already associated with the instance. If Yes, return True. """ if not security_group: return False instances = security_group.get('instances') if not instances: return False for inst in instances: if (instance_uuid == inst['uuid']): return True return False @wrap_check_security_groups_policy def add_to_instance(self, context, instance, security_group_name): """Add security group to the instance""" security_group = self.db.security_group_get_by_name(context, context.project_id, security_group_name) instance_uuid = instance['uuid'] #check if the security group is associated with the server if self.is_associated_with_server(security_group, instance_uuid): raise exception.SecurityGroupExistsForInstance( security_group_id=security_group['id'], instance_id=instance_uuid) #check if the instance is in running state if instance['power_state'] != power_state.RUNNING: raise exception.InstanceNotRunning(instance_id=instance_uuid) self.db.instance_add_security_group(context.elevated(), instance_uuid, security_group['id']) # NOTE(comstud): No instance_uuid argument to this compute manager # call self.security_group_rpcapi.refresh_security_group_rules(context, security_group['id'], host=instance['host']) self.trigger_handler('instance_add_security_group', context, instance, security_group_name) @wrap_check_security_groups_policy def remove_from_instance(self, context, instance, security_group_name): """Remove the security group associated with the instance""" security_group = self.db.security_group_get_by_name(context, context.project_id, security_group_name) instance_uuid = instance['uuid'] #check if the security group is associated with the server if not self.is_associated_with_server(security_group, instance_uuid): raise exception.SecurityGroupNotExistsForInstance( security_group_id=security_group['id'], instance_id=instance_uuid) #check if the instance is in running state if instance['power_state'] != power_state.RUNNING: raise exception.InstanceNotRunning(instance_id=instance_uuid) self.db.instance_remove_security_group(context.elevated(), instance_uuid, security_group['id']) # NOTE(comstud): No instance_uuid argument to this compute manager # call self.security_group_rpcapi.refresh_security_group_rules(context, security_group['id'], host=instance['host']) self.trigger_handler('instance_remove_security_group', context, instance, security_group_name) def trigger_handler(self, event, *args): handle = getattr(self.sgh, 'trigger_%s_refresh' % event) handle(*args) def trigger_rules_refresh(self, context, id): """Called when a rule is added to or removed from a security_group.""" security_group = self.db.security_group_get(context, id) for instance in security_group['instances']: if instance['host'] is not None: self.security_group_rpcapi.refresh_instance_security_rules( context, instance['host'], instance) def trigger_members_refresh(self, context, group_ids): """Called when a security group gains a new or loses a member. Sends an update request to each compute node for each instance for which this is relevant. """ # First, we get the security group rules that reference these groups as # the grantee.. security_group_rules = set() for group_id in group_ids: security_group_rules.update( self.db.security_group_rule_get_by_security_group_grantee( context, group_id)) # ..then we distill the rules into the groups to which they belong.. security_groups = set() for rule in security_group_rules: security_group = self.db.security_group_get( context, rule['parent_group_id']) security_groups.add(security_group) # ..then we find the instances that are members of these groups.. instances = set() for security_group in security_groups: for instance in security_group['instances']: instances.add(instance) # ..then we send a request to refresh the rules for each instance. for instance in instances: if instance['host']: self.security_group_rpcapi.refresh_instance_security_rules( context, instance['host'], instance) def parse_cidr(self, cidr): if cidr: try: cidr = urllib.unquote(cidr).decode() except Exception as e: self.raise_invalid_cidr(cidr, e) if not utils.is_valid_cidr(cidr): self.raise_invalid_cidr(cidr) return cidr else: return '0.0.0.0/0' @staticmethod def new_group_ingress_rule(grantee_group_id, protocol, from_port, to_port): return SecurityGroupAPI._new_ingress_rule(protocol, from_port, to_port, group_id=grantee_group_id) @staticmethod def new_cidr_ingress_rule(grantee_cidr, protocol, from_port, to_port): return SecurityGroupAPI._new_ingress_rule(protocol, from_port, to_port, cidr=grantee_cidr) @staticmethod def _new_ingress_rule(ip_protocol, from_port, to_port, group_id=None, cidr=None): values = {} if group_id: values['group_id'] = group_id # Open everything if an explicit port range or type/code are not # specified, but only if a source group was specified. ip_proto_upper = ip_protocol.upper() if ip_protocol else '' if (ip_proto_upper == 'ICMP' and from_port is None and to_port is None): from_port = -1 to_port = -1 elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None and to_port is None): from_port = 1 to_port = 65535 elif cidr: values['cidr'] = cidr if ip_protocol and from_port is not None and to_port is not None: ip_protocol = str(ip_protocol) try: # Verify integer conversions from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason="Type and" " Code must be integers for ICMP protocol type") else: raise exception.InvalidInput(reason="To and From ports " "must be integers") if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port > to_port)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid TCP ports should" " be between 1-65535") # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or from_port > 255 or to_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if cidr: return None return values def rule_exists(self, security_group, values): """Indicates whether the specified rule values are already defined in the given security group. """ for rule in security_group.rules: is_duplicate = True keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol') for key in keys: if rule.get(key) != values.get(key): is_duplicate = False break if is_duplicate: return rule.get('id') or True return False def get_rule(self, context, id): self.ensure_default(context) try: return self.db.security_group_rule_get(context, id) except exception.NotFound: msg = _("Rule (%s) not found") % id self.raise_not_found(msg) def add_rules(self, context, id, name, vals): count = QUOTAS.count(context, 'security_group_rules', id) try: projected = count + len(vals) QUOTAS.limit_check(context, security_group_rules=projected) except exception.OverQuota: msg = _("Quota exceeded, too many security group rules.") self.raise_over_quota(msg) msg = _("Authorize security group ingress %s") LOG.audit(msg, name, context=context) rules = [self.db.security_group_rule_create(context, v) for v in vals] self.trigger_rules_refresh(context, id=id) self.trigger_handler('security_group_rule_create', context, [r['id'] for r in rules]) return rules def remove_rules(self, context, security_group, rule_ids): msg = _("Revoke security group ingress %s") LOG.audit(msg, security_group['name'], context=context) for rule_id in rule_ids: self.db.security_group_rule_destroy(context, rule_id) # NOTE(vish): we removed some rules, so refresh self.trigger_rules_refresh(context, id=security_group['id']) self.trigger_handler('security_group_rule_destroy', context, rule_ids) @staticmethod def raise_invalid_property(msg): raise NotImplementedError() @staticmethod def raise_group_already_exists(msg): raise NotImplementedError() @staticmethod def raise_invalid_group(msg): raise NotImplementedError() @staticmethod def raise_invalid_cidr(cidr, decoding_exception=None): raise NotImplementedError() @staticmethod def raise_over_quota(msg): raise NotImplementedError() @staticmethod def raise_not_found(msg): raise NotImplementedError()
./CrossVul/dataset_final_sorted/CWE-264/py/bad_5539_0
crossvul-python_data_good_3773_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ /images endpoint for Glance v1 API """ import errno import logging import sys import traceback from webob.exc import (HTTPError, HTTPNotFound, HTTPConflict, HTTPBadRequest, HTTPForbidden, HTTPRequestEntityTooLarge, HTTPServiceUnavailable, ) from glance.api import policy import glance.api.v1 from glance.api.v1 import controller from glance.api.v1 import filters from glance.common import cfg from glance.common import exception from glance.common import wsgi from glance.common import utils import glance.store import glance.store.filesystem import glance.store.http import glance.store.rbd import glance.store.s3 import glance.store.swift from glance.store import (get_from_backend, get_size_from_backend, schedule_delete_from_backend, get_store_from_location, get_store_from_scheme) from glance import registry from glance import notifier logger = logging.getLogger(__name__) SUPPORTED_PARAMS = glance.api.v1.SUPPORTED_PARAMS SUPPORTED_FILTERS = glance.api.v1.SUPPORTED_FILTERS # 1 PiB, which is a *huge* image by anyone's measure. This is just to protect # against client programming errors (or DoS attacks) in the image metadata. # We have a known limit of 1 << 63 in the database -- images.size is declared # as a BigInteger. IMAGE_SIZE_CAP = 1 << 50 class Controller(controller.BaseController): """ WSGI controller for images resource in Glance v1 API The images resource API is a RESTful web service for image data. The API is as follows:: GET /images -- Returns a set of brief metadata about images GET /images/detail -- Returns a set of detailed metadata about images HEAD /images/<ID> -- Return metadata about an image with id <ID> GET /images/<ID> -- Return image data for image with id <ID> POST /images -- Store image data and return metadata about the newly-stored image PUT /images/<ID> -- Update image metadata and/or upload image data for a previously-reserved image DELETE /images/<ID> -- Delete the image with id <ID> """ default_store_opt = cfg.StrOpt('default_store', default='file') def __init__(self, conf): self.conf = conf self.conf.register_opt(self.default_store_opt) glance.store.create_stores(conf) self.verify_store_or_exit(self.conf.default_store) self.notifier = notifier.Notifier(conf) registry.configure_registry_client(conf) self.policy = policy.Enforcer(conf) def _enforce(self, req, action): """Authorize an action against our policies""" try: self.policy.enforce(req.context, action, {}) except exception.Forbidden: raise HTTPForbidden() def index(self, req): """ Returns the following information for all public, available images: * id -- The opaque image identifier * name -- The name of the image * disk_format -- The disk image format * container_format -- The "container" format of the image * checksum -- MD5 checksum of the image data * size -- Size of image data in bytes :param req: The WSGI/Webob Request object :retval The response body is a mapping of the following form:: {'images': [ {'id': <ID>, 'name': <NAME>, 'disk_format': <DISK_FORMAT>, 'container_format': <DISK_FORMAT>, 'checksum': <CHECKSUM> 'size': <SIZE>}, ... ]} """ self._enforce(req, 'get_images') params = self._get_query_params(req) try: images = registry.get_images_list(req.context, **params) except exception.Invalid, e: raise HTTPBadRequest(explanation="%s" % e) return dict(images=images) def detail(self, req): """ Returns detailed information for all public, available images :param req: The WSGI/Webob Request object :retval The response body is a mapping of the following form:: {'images': [ {'id': <ID>, 'name': <NAME>, 'size': <SIZE>, 'disk_format': <DISK_FORMAT>, 'container_format': <CONTAINER_FORMAT>, 'checksum': <CHECKSUM>, 'min_disk': <MIN_DISK>, 'min_ram': <MIN_RAM>, 'store': <STORE>, 'status': <STATUS>, 'created_at': <TIMESTAMP>, 'updated_at': <TIMESTAMP>, 'deleted_at': <TIMESTAMP>|<NONE>, 'properties': {'distro': 'Ubuntu 10.04 LTS', ...}}, ... ]} """ self._enforce(req, 'get_images') params = self._get_query_params(req) try: images = registry.get_images_detail(req.context, **params) # Strip out the Location attribute. Temporary fix for # LP Bug #755916. This information is still coming back # from the registry, since the API server still needs access # to it, however we do not return this potential security # information to the API end user... for image in images: del image['location'] except exception.Invalid, e: raise HTTPBadRequest(explanation="%s" % e) return dict(images=images) def _get_query_params(self, req): """ Extracts necessary query params from request. :param req: the WSGI Request object :retval dict of parameters that can be used by registry client """ params = {'filters': self._get_filters(req)} for PARAM in SUPPORTED_PARAMS: if PARAM in req.params: params[PARAM] = req.params.get(PARAM) return params def _get_filters(self, req): """ Return a dictionary of query param filters from the request :param req: the Request object coming from the wsgi layer :retval a dict of key/value filters """ query_filters = {} for param in req.params: if param in SUPPORTED_FILTERS or param.startswith('property-'): query_filters[param] = req.params.get(param) if not filters.validate(param, query_filters[param]): raise HTTPBadRequest('Bad value passed to filter %s ' 'got %s' % (param, query_filters[param])) return query_filters def meta(self, req, id): """ Returns metadata about an image in the HTTP headers of the response object :param req: The WSGI/Webob Request object :param id: The opaque image identifier :retval similar to 'show' method but without image_data :raises HTTPNotFound if image metadata is not available to user """ self._enforce(req, 'get_image') image_meta = self.get_image_meta_or_404(req, id) del image_meta['location'] return { 'image_meta': image_meta } @staticmethod def _validate_source(source, req): """ External sources (as specified via the location or copy-from headers) are supported only over non-local store types, i.e. S3, Swift, HTTP. Note the absence of file:// for security reasons, see LP bug #942118. If the above constraint is violated, we reject with 400 "Bad Request". """ if source: for scheme in ['s3', 'swift', 'http']: if source.lower().startswith(scheme): return source msg = _("External sourcing not supported for store %s") % source logger.error(msg) raise HTTPBadRequest(msg, request=req, content_type="text/plain") @staticmethod def _copy_from(req): return req.headers.get('x-glance-api-copy-from') @staticmethod def _external_source(image_meta, req): source = image_meta.get('location', Controller._copy_from(req)) return Controller._validate_source(source, req) @staticmethod def _get_from_store(where): try: image_data, image_size = get_from_backend(where) except exception.NotFound, e: raise HTTPNotFound(explanation="%s" % e) image_size = int(image_size) if image_size else None return image_data, image_size def show(self, req, id): """ Returns an iterator that can be used to retrieve an image's data along with the image metadata. :param req: The WSGI/Webob Request object :param id: The opaque image identifier :raises HTTPNotFound if image is not available to user """ self._enforce(req, 'get_image') image_meta = self.get_active_image_meta_or_404(req, id) if image_meta.get('size') == 0: image_iterator = iter([]) else: image_iterator, size = self._get_from_store(image_meta['location']) image_meta['size'] = size or image_meta['size'] del image_meta['location'] return { 'image_iterator': image_iterator, 'image_meta': image_meta, } def _reserve(self, req, image_meta): """ Adds the image metadata to the registry and assigns an image identifier if one is not supplied in the request headers. Sets the image's status to `queued`. :param req: The WSGI/Webob Request object :param id: The opaque image identifier :param image_meta: The image metadata :raises HTTPConflict if image already exists :raises HTTPBadRequest if image metadata is not valid """ location = self._external_source(image_meta, req) image_meta['status'] = ('active' if image_meta.get('size') == 0 else 'queued') if location: store = get_store_from_location(location) # check the store exists before we hit the registry, but we # don't actually care what it is at this point self.get_store_or_400(req, store) # retrieve the image size from remote store (if not provided) image_meta['size'] = self._get_size(image_meta, location) else: # Ensure that the size attribute is set to zero for directly # uploadable images (if not provided). The size will be set # to a non-zero value during upload image_meta['size'] = image_meta.get('size', 0) try: image_meta = registry.add_image_metadata(req.context, image_meta) return image_meta except exception.Duplicate: msg = (_("An image with identifier %s already exists") % image_meta['id']) logger.error(msg) raise HTTPConflict(msg, request=req, content_type="text/plain") except exception.Invalid, e: msg = (_("Failed to reserve image. Got error: %(e)s") % locals()) for line in msg.split('\n'): logger.error(line) raise HTTPBadRequest(msg, request=req, content_type="text/plain") except exception.Forbidden: msg = _("Forbidden to reserve image.") logger.error(msg) raise HTTPForbidden(msg, request=req, content_type="text/plain") def _upload(self, req, image_meta): """ Uploads the payload of the request to a backend store in Glance. If the `x-image-meta-store` header is set, Glance will attempt to use that store, if not, Glance will use the store set by the flag `default_store`. :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about image :raises HTTPConflict if image already exists :retval The location where the image was stored """ copy_from = self._copy_from(req) if copy_from: image_data, image_size = self._get_from_store(copy_from) image_meta['size'] = image_size or image_meta['size'] else: try: req.get_content_type('application/octet-stream') except exception.InvalidContentType: self._safe_kill(req, image_meta['id']) msg = _("Content-Type must be application/octet-stream") logger.error(msg) raise HTTPBadRequest(explanation=msg) image_data = req.body_file if req.content_length: image_size = int(req.content_length) elif 'x-image-meta-size' in req.headers: image_size = int(req.headers['x-image-meta-size']) else: logger.debug(_("Got request with no content-length and no " "x-image-meta-size header")) image_size = 0 store_name = req.headers.get('x-image-meta-store', self.conf.default_store) store = self.get_store_or_400(req, store_name) image_id = image_meta['id'] logger.debug(_("Setting image %s to status 'saving'"), image_id) registry.update_image_metadata(req.context, image_id, {'status': 'saving'}) try: logger.debug(_("Uploading image data for image %(image_id)s " "to %(store_name)s store"), locals()) if image_size > IMAGE_SIZE_CAP: max_image_size = IMAGE_SIZE_CAP msg = _("Denying attempt to upload image larger than " "%(max_image_size)d. Supplied image size was " "%(image_size)d") % locals() logger.warn(msg) raise HTTPBadRequest(msg, request=req) location, size, checksum = store.add(image_meta['id'], image_data, image_size) # Verify any supplied checksum value matches checksum # returned from store when adding image supplied_checksum = image_meta.get('checksum') if supplied_checksum and supplied_checksum != checksum: msg = _("Supplied checksum (%(supplied_checksum)s) and " "checksum generated from uploaded image " "(%(checksum)s) did not match. Setting image " "status to 'killed'.") % locals() logger.error(msg) self._safe_kill(req, image_id) raise HTTPBadRequest(msg, content_type="text/plain", request=req) # Update the database with the checksum returned # from the backend store logger.debug(_("Updating image %(image_id)s data. " "Checksum set to %(checksum)s, size set " "to %(size)d"), locals()) update_data = {'checksum': checksum, 'size': size} image_meta = registry.update_image_metadata(req.context, image_id, update_data) self.notifier.info('image.upload', image_meta) return location except exception.Duplicate, e: msg = _("Attempt to upload duplicate image: %s") % e logger.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPConflict(msg, request=req) except exception.Forbidden, e: msg = _("Forbidden upload attempt: %s") % e logger.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPForbidden(msg, request=req, content_type="text/plain") except exception.StorageFull, e: msg = _("Image storage media is full: %s") % e logger.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPRequestEntityTooLarge(msg, request=req, content_type='text/plain') except exception.StorageWriteDenied, e: msg = _("Insufficient permissions on image storage media: %s") % e logger.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPServiceUnavailable(msg, request=req, content_type='text/plain') except HTTPError, e: self._safe_kill(req, image_id) self.notifier.error('image.upload', e.explanation) raise except Exception, e: tb_info = traceback.format_exc() logger.error(tb_info) self._safe_kill(req, image_id) msg = _("Error uploading image: (%(class_name)s): " "%(exc)s") % ({'class_name': e.__class__.__name__, 'exc': str(e)}) self.notifier.error('image.upload', msg) raise HTTPBadRequest(msg, request=req) def _activate(self, req, image_id, location): """ Sets the image status to `active` and the image's location attribute. :param req: The WSGI/Webob Request object :param image_id: Opaque image identifier :param location: Location of where Glance stored this image """ image_meta = {} image_meta['location'] = location image_meta['status'] = 'active' try: return registry.update_image_metadata(req.context, image_id, image_meta) except exception.Invalid, e: msg = (_("Failed to activate image. Got error: %(e)s") % locals()) for line in msg.split('\n'): logger.error(line) self.notifier.error('image.update', msg) raise HTTPBadRequest(msg, request=req, content_type="text/plain") def _kill(self, req, image_id): """ Marks the image status to `killed`. :param req: The WSGI/Webob Request object :param image_id: Opaque image identifier """ registry.update_image_metadata(req.context, image_id, {'status': 'killed'}) def _safe_kill(self, req, image_id): """ Mark image killed without raising exceptions if it fails. Since _kill is meant to be called from exceptions handlers, it should not raise itself, rather it should just log its error. :param req: The WSGI/Webob Request object :param image_id: Opaque image identifier """ try: self._kill(req, image_id) except Exception, e: logger.error(_("Unable to kill image %(id)s: " "%(exc)s") % ({'id': image_id, 'exc': repr(e)})) def _upload_and_activate(self, req, image_meta): """ Safely uploads the image data in the request payload and activates the image in the registry after a successful upload. :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about image :retval Mapping of updated image data """ image_id = image_meta['id'] # This is necessary because of a bug in Webob 1.0.2 - 1.0.7 # See: https://bitbucket.org/ianb/webob/ # issue/12/fix-for-issue-6-broke-chunked-transfer req.is_body_readable = True location = self._upload(req, image_meta) return self._activate(req, image_id, location) def _get_size(self, image_meta, location): # retrieve the image size from remote store (if not provided) return image_meta.get('size', 0) or get_size_from_backend(location) def _handle_source(self, req, image_id, image_meta, image_data): if image_data or self._copy_from(req): image_meta = self._upload_and_activate(req, image_meta) else: location = image_meta.get('location') if location: image_meta = self._activate(req, image_id, location) return image_meta def create(self, req, image_meta, image_data): """ Adds a new image to Glance. Four scenarios exist when creating an image: 1. If the image data is available directly for upload, create can be passed the image data as the request body and the metadata as the request headers. The image will initially be 'queued', during upload it will be in the 'saving' status, and then 'killed' or 'active' depending on whether the upload completed successfully. 2. If the image data exists somewhere else, you can upload indirectly from the external source using the x-glance-api-copy-from header. Once the image is uploaded, the external store is not subsequently consulted, i.e. the image content is served out from the configured glance image store. State transitions are as for option #1. 3. If the image data exists somewhere else, you can reference the source using the x-image-meta-location header. The image content will be served out from the external store, i.e. is never uploaded to the configured glance image store. 4. If the image data is not available yet, but you'd like reserve a spot for it, you can omit the data and a record will be created in the 'queued' state. This exists primarily to maintain backwards compatibility with OpenStack/Rackspace API semantics. The request body *must* be encoded as application/octet-stream, otherwise an HTTPBadRequest is returned. Upon a successful save of the image data and metadata, a response containing metadata about the image is returned, including its opaque identifier. :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about image :param image_data: Actual image data that is to be stored :raises HTTPBadRequest if x-image-meta-location is missing and the request body is not application/octet-stream image data. """ self._enforce(req, 'add_image') if image_meta.get('is_public'): self._enforce(req, 'publicize_image') if req.context.read_only: msg = _("Read-only access") logger.debug(msg) raise HTTPForbidden(msg, request=req, content_type="text/plain") image_meta = self._reserve(req, image_meta) id = image_meta['id'] image_meta = self._handle_source(req, id, image_meta, image_data) # Prevent client from learning the location, as it # could contain security credentials image_meta.pop('location', None) return {'image_meta': image_meta} def update(self, req, id, image_meta, image_data): """ Updates an existing image with the registry. :param request: The WSGI/Webob Request object :param id: The opaque image identifier :retval Returns the updated image information as a mapping """ self._enforce(req, 'modify_image') if image_meta.get('is_public'): self._enforce(req, 'publicize_image') if req.context.read_only: msg = _("Read-only access") logger.debug(msg) raise HTTPForbidden(msg, request=req, content_type="text/plain") orig_image_meta = self.get_image_meta_or_404(req, id) orig_status = orig_image_meta['status'] # The default behaviour for a PUT /images/<IMAGE_ID> is to # override any properties that were previously set. This, however, # leads to a number of issues for the common use case where a caller # registers an image with some properties and then almost immediately # uploads an image file along with some more properties. Here, we # check for a special header value to be false in order to force # properties NOT to be purged. However we also disable purging of # properties if an image file is being uploaded... purge_props = req.headers.get('x-glance-registry-purge-props', True) purge_props = (utils.bool_from_string(purge_props) and image_data is None) if image_data is not None and orig_status != 'queued': raise HTTPConflict(_("Cannot upload to an unqueued image")) # Only allow the Location|Copy-From fields to be modified if the # image is in queued status, which indicates that the user called # POST /images but originally supply neither a Location|Copy-From # field NOR image data location = self._external_source(image_meta, req) reactivating = orig_status != 'queued' and location activating = orig_status == 'queued' and (location or image_data) if reactivating: msg = _("Attempted to update Location field for an image " "not in queued status.") raise HTTPBadRequest(msg, request=req, content_type="text/plain") try: if location: image_meta['size'] = self._get_size(image_meta, location) image_meta = registry.update_image_metadata(req.context, id, image_meta, purge_props) if activating: image_meta = self._handle_source(req, id, image_meta, image_data) except exception.Invalid, e: msg = (_("Failed to update image metadata. Got error: %(e)s") % locals()) for line in msg.split('\n'): logger.error(line) self.notifier.error('image.update', msg) raise HTTPBadRequest(msg, request=req, content_type="text/plain") except exception.NotFound, e: msg = ("Failed to find image to update: %(e)s" % locals()) for line in msg.split('\n'): logger.info(line) self.notifier.info('image.update', msg) raise HTTPNotFound(msg, request=req, content_type="text/plain") except exception.Forbidden, e: msg = ("Forbidden to update image: %(e)s" % locals()) for line in msg.split('\n'): logger.info(line) self.notifier.info('image.update', msg) raise HTTPForbidden(msg, request=req, content_type="text/plain") else: self.notifier.info('image.update', image_meta) # Prevent client from learning the location, as it # could contain security credentials image_meta.pop('location', None) return {'image_meta': image_meta} def delete(self, req, id): """ Deletes the image and all its chunks from the Glance :param req: The WSGI/Webob Request object :param id: The opaque image identifier :raises HttpBadRequest if image registry is invalid :raises HttpNotFound if image or any chunk is not available :raises HttpUnauthorized if image or any chunk is not deleteable by the requesting user """ self._enforce(req, 'delete_image') if req.context.read_only: msg = _("Read-only access") logger.debug(msg) raise HTTPForbidden(msg, request=req, content_type="text/plain") image = self.get_image_meta_or_404(req, id) if not (req.context.is_admin or image['owner'] == None or image['owner'] == req.context.owner): msg = _("Unable to delete image you do not own") logger.debug(msg) raise HTTPForbidden(msg, request=req, content_type="text/plain") if image['protected']: msg = _("Image is protected") logger.debug(msg) raise HTTPForbidden(msg, request=req, content_type="text/plain") # The image's location field may be None in the case # of a saving or queued image, therefore don't ask a backend # to delete the image if the backend doesn't yet store it. # See https://bugs.launchpad.net/glance/+bug/747799 try: if image['location']: schedule_delete_from_backend(image['location'], self.conf, req.context, id) registry.delete_image_metadata(req.context, id) except exception.NotFound, e: msg = ("Failed to find image to delete: %(e)s" % locals()) for line in msg.split('\n'): logger.info(line) self.notifier.info('image.delete', msg) raise HTTPNotFound(msg, request=req, content_type="text/plain") except exception.Forbidden, e: msg = ("Forbidden to delete image: %(e)s" % locals()) for line in msg.split('\n'): logger.info(line) self.notifier.info('image.delete', msg) raise HTTPForbidden(msg, request=req, content_type="text/plain") else: self.notifier.info('image.delete', id) def get_store_or_400(self, request, store_name): """ Grabs the storage backend for the supplied store name or raises an HTTPBadRequest (400) response :param request: The WSGI/Webob Request object :param store_name: The backend store name :raises HTTPNotFound if store does not exist """ try: return get_store_from_scheme(store_name) except exception.UnknownScheme: msg = (_("Requested store %s not available on this Glance server") % store_name) logger.error(msg) raise HTTPBadRequest(msg, request=request, content_type='text/plain') def verify_store_or_exit(self, store_name): """ Verifies availability of the storage backend for the given store name or exits :param store_name: The backend store name """ try: get_store_from_scheme(store_name) except exception.UnknownScheme: msg = (_("Default store %s not available on this Glance server\n") % store_name) logger.error(msg) # message on stderr will only be visible if started directly via # bin/glance-api, as opposed to being daemonized by glance-control sys.stderr.write(msg) sys.exit(255) class ImageDeserializer(wsgi.JSONRequestDeserializer): """Handles deserialization of specific controller method requests.""" def _deserialize(self, request): result = {} try: result['image_meta'] = utils.get_image_meta_from_headers(request) except exception.Invalid: image_size_str = request.headers['x-image-meta-size'] msg = _("Incoming image size of %s was not convertible to " "an integer.") % image_size_str raise HTTPBadRequest(msg, request=request) image_meta = result['image_meta'] if 'size' in image_meta: incoming_image_size = image_meta['size'] if incoming_image_size > IMAGE_SIZE_CAP: max_image_size = IMAGE_SIZE_CAP msg = _("Denying attempt to upload image larger than " "%(max_image_size)d. Supplied image size was " "%(incoming_image_size)d") % locals() logger.warn(msg) raise HTTPBadRequest(msg, request=request) data = request.body_file if self.has_body(request) else None result['image_data'] = data return result def create(self, request): return self._deserialize(request) def update(self, request): return self._deserialize(request) class ImageSerializer(wsgi.JSONResponseSerializer): """Handles serialization of specific controller method responses.""" def __init__(self, conf): self.conf = conf self.notifier = notifier.Notifier(conf) def _inject_location_header(self, response, image_meta): location = self._get_image_location(image_meta) response.headers['Location'] = location def _inject_checksum_header(self, response, image_meta): response.headers['ETag'] = image_meta['checksum'] def _inject_image_meta_headers(self, response, image_meta): """ Given a response and mapping of image metadata, injects the Response with a set of HTTP headers for the image metadata. Each main image metadata field is injected as a HTTP header with key 'x-image-meta-<FIELD>' except for the properties field, which is further broken out into a set of 'x-image-meta-property-<KEY>' headers :param response: The Webob Response object :param image_meta: Mapping of image metadata """ headers = utils.image_meta_to_http_headers(image_meta) for k, v in headers.items(): response.headers[k] = v def _get_image_location(self, image_meta): """Build a relative url to reach the image defined by image_meta.""" return "/v1/images/%s" % image_meta['id'] def meta(self, response, result): image_meta = result['image_meta'] self._inject_image_meta_headers(response, image_meta) self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def image_send_notification(self, bytes_written, expected_size, image_meta, request): """Send an image.send message to the notifier.""" try: context = request.context payload = { 'bytes_sent': bytes_written, 'image_id': image_meta['id'], 'owner_id': image_meta['owner'], 'receiver_tenant_id': context.tenant, 'receiver_user_id': context.user, 'destination_ip': request.remote_addr, } if bytes_written != expected_size: self.notifier.error('image.send', payload) else: self.notifier.info('image.send', payload) except Exception, err: msg = _("An error occurred during image.send" " notification: %(err)s") % locals() logger.error(msg) def show(self, response, result): image_meta = result['image_meta'] image_id = image_meta['id'] # We use a secondary iterator here to wrap the # iterator coming back from the store driver in # order to check for disconnections from the backend # storage connections and log an error if the size of # the transferred image is not the same as the expected # size of the image file. See LP Bug #882585. def checked_iter(image_id, expected_size, image_iter): bytes_written = 0 def notify_image_sent_hook(env): self.image_send_notification(bytes_written, expected_size, image_meta, response.request) # Add hook to process after response is fully sent if 'eventlet.posthooks' in response.request.environ: response.request.environ['eventlet.posthooks'].append( (notify_image_sent_hook, (), {})) try: for chunk in image_iter: yield chunk bytes_written += len(chunk) except Exception, err: msg = _("An error occurred reading from backend storage " "for image %(image_id): %(err)s") % locals() logger.error(msg) raise if expected_size != bytes_written: msg = _("Backend storage for image %(image_id)s " "disconnected after writing only %(bytes_written)d " "bytes") % locals() logger.error(msg) raise IOError(errno.EPIPE, _("Corrupt image download for " "image %(image_id)s") % locals()) image_iter = result['image_iterator'] # image_meta['size'] is a str expected_size = int(image_meta['size']) response.app_iter = checked_iter(image_id, expected_size, image_iter) # Using app_iter blanks content-length, so we set it here... response.headers['Content-Length'] = image_meta['size'] response.headers['Content-Type'] = 'application/octet-stream' self._inject_image_meta_headers(response, image_meta) self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def update(self, response, result): image_meta = result['image_meta'] response.body = self.to_json(dict(image=image_meta)) response.headers['Content-Type'] = 'application/json' self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def create(self, response, result): image_meta = result['image_meta'] response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(image=image_meta)) self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def create_resource(conf): """Images resource factory method""" deserializer = ImageDeserializer() serializer = ImageSerializer(conf) return wsgi.Resource(Controller(conf), deserializer, serializer)
./CrossVul/dataset_final_sorted/CWE-264/py/good_3773_0
crossvul-python_data_good_3632_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cloud Controller: Implementation of EC2 REST API calls, which are dispatched to other nodes via AMQP RPC. State is via distributed datastore. """ import base64 import re import time import urllib from nova.api.ec2 import ec2utils from nova.api.ec2 import inst_state from nova.api import validator from nova import block_device from nova import compute from nova.compute import instance_types from nova.compute import vm_states from nova import crypto from nova import db from nova import exception from nova import flags from nova.image import s3 from nova import log as logging from nova import network from nova import quota from nova import utils from nova import volume FLAGS = flags.FLAGS LOG = logging.getLogger(__name__) def validate_ec2_id(val): if not validator.validate_str()(val): raise exception.InvalidInstanceIDMalformed(val) try: ec2utils.ec2_id_to_id(val) except exception.InvalidEc2Id: raise exception.InvalidInstanceIDMalformed(val) def _gen_key(context, user_id, key_name): """Generate a key This is a module level method because it is slow and we need to defer it into a process pool.""" # NOTE(vish): generating key pair is slow so check for legal # creation before creating key_pair try: db.key_pair_get(context, user_id, key_name) raise exception.KeyPairExists(key_name=key_name) except exception.NotFound: pass private_key, public_key, fingerprint = crypto.generate_key_pair() key = {} key['user_id'] = user_id key['name'] = key_name key['public_key'] = public_key key['fingerprint'] = fingerprint db.key_pair_create(context, key) return {'private_key': private_key, 'fingerprint': fingerprint} # EC2 API can return the following values as documented in the EC2 API # http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ # ApiReference-ItemType-InstanceStateType.html # pending 0 | running 16 | shutting-down 32 | terminated 48 | stopping 64 | # stopped 80 _STATE_DESCRIPTION_MAP = { None: inst_state.PENDING, vm_states.ACTIVE: inst_state.RUNNING, vm_states.BUILDING: inst_state.PENDING, vm_states.REBUILDING: inst_state.PENDING, vm_states.DELETED: inst_state.TERMINATED, vm_states.SOFT_DELETE: inst_state.TERMINATED, vm_states.STOPPED: inst_state.STOPPED, vm_states.SHUTOFF: inst_state.SHUTOFF, vm_states.MIGRATING: inst_state.MIGRATE, vm_states.RESIZING: inst_state.RESIZE, vm_states.PAUSED: inst_state.PAUSE, vm_states.SUSPENDED: inst_state.SUSPEND, vm_states.RESCUED: inst_state.RESCUE, } def _state_description(vm_state, shutdown_terminate): """Map the vm state to the server status string""" if (vm_state == vm_states.SHUTOFF and not shutdown_terminate): name = inst_state.STOPPED else: name = _STATE_DESCRIPTION_MAP.get(vm_state, vm_state) return {'code': inst_state.name_to_code(name), 'name': name} def _parse_block_device_mapping(bdm): """Parse BlockDeviceMappingItemType into flat hash BlockDevicedMapping.<N>.DeviceName BlockDevicedMapping.<N>.Ebs.SnapshotId BlockDevicedMapping.<N>.Ebs.VolumeSize BlockDevicedMapping.<N>.Ebs.DeleteOnTermination BlockDevicedMapping.<N>.Ebs.NoDevice BlockDevicedMapping.<N>.VirtualName => remove .Ebs and allow volume id in SnapshotId """ ebs = bdm.pop('ebs', None) if ebs: ec2_id = ebs.pop('snapshot_id', None) if ec2_id: id = ec2utils.ec2_id_to_id(ec2_id) if ec2_id.startswith('snap-'): bdm['snapshot_id'] = id elif ec2_id.startswith('vol-'): bdm['volume_id'] = id ebs.setdefault('delete_on_termination', True) bdm.update(ebs) return bdm def _properties_get_mappings(properties): return block_device.mappings_prepend_dev(properties.get('mappings', [])) def _format_block_device_mapping(bdm): """Contruct BlockDeviceMappingItemType {'device_name': '...', 'snapshot_id': , ...} => BlockDeviceMappingItemType """ keys = (('deviceName', 'device_name'), ('virtualName', 'virtual_name')) item = {} for name, k in keys: if k in bdm: item[name] = bdm[k] if bdm.get('no_device'): item['noDevice'] = True if ('snapshot_id' in bdm) or ('volume_id' in bdm): ebs_keys = (('snapshotId', 'snapshot_id'), ('snapshotId', 'volume_id'), # snapshotId is abused ('volumeSize', 'volume_size'), ('deleteOnTermination', 'delete_on_termination')) ebs = {} for name, k in ebs_keys: if k in bdm: if k == 'snapshot_id': ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k]) elif k == 'volume_id': ebs[name] = ec2utils.id_to_ec2_vol_id(bdm[k]) else: ebs[name] = bdm[k] assert 'snapshotId' in ebs item['ebs'] = ebs return item def _format_mappings(properties, result): """Format multiple BlockDeviceMappingItemType""" mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']} for m in _properties_get_mappings(properties) if block_device.is_swap_or_ephemeral(m['virtual'])] block_device_mapping = [_format_block_device_mapping(bdm) for bdm in properties.get('block_device_mapping', [])] # NOTE(yamahata): overwrite mappings with block_device_mapping for bdm in block_device_mapping: for i in range(len(mappings)): if bdm['deviceName'] == mappings[i]['deviceName']: del mappings[i] break mappings.append(bdm) # NOTE(yamahata): trim ebs.no_device == true. Is this necessary? mappings = [bdm for bdm in mappings if not (bdm.get('noDevice', False))] if mappings: result['blockDeviceMapping'] = mappings class CloudController(object): """ CloudController provides the critical dispatch between inbound API calls through the endpoint and messages sent to the other nodes. """ def __init__(self): self.image_service = s3.S3ImageService() self.network_api = network.API() self.volume_api = volume.API() self.compute_api = compute.API(network_api=self.network_api, volume_api=self.volume_api) self.sgh = utils.import_object(FLAGS.security_group_handler) def __str__(self): return 'CloudController' def _get_image_state(self, image): # NOTE(vish): fallback status if image_state isn't set state = image.get('status') if state == 'active': state = 'available' return image['properties'].get('image_state', state) def describe_availability_zones(self, context, **kwargs): if ('zone_name' in kwargs and 'verbose' in kwargs['zone_name'] and context.is_admin): return self._describe_availability_zones_verbose(context, **kwargs) else: return self._describe_availability_zones(context, **kwargs) def _describe_availability_zones(self, context, **kwargs): ctxt = context.elevated() enabled_services = db.service_get_all(ctxt, False) disabled_services = db.service_get_all(ctxt, True) available_zones = [] for zone in [service.availability_zone for service in enabled_services]: if not zone in available_zones: available_zones.append(zone) not_available_zones = [] for zone in [service.availability_zone for service in disabled_services if not service['availability_zone'] in available_zones]: if not zone in not_available_zones: not_available_zones.append(zone) result = [] for zone in available_zones: result.append({'zoneName': zone, 'zoneState': "available"}) for zone in not_available_zones: result.append({'zoneName': zone, 'zoneState': "not available"}) return {'availabilityZoneInfo': result} def _describe_availability_zones_verbose(self, context, **kwargs): rv = {'availabilityZoneInfo': [{'zoneName': 'nova', 'zoneState': 'available'}]} services = db.service_get_all(context, False) hosts = [] for host in [service['host'] for service in services]: if not host in hosts: hosts.append(host) for host in hosts: rv['availabilityZoneInfo'].append({'zoneName': '|- %s' % host, 'zoneState': ''}) hsvcs = [service for service in services if service['host'] == host] for svc in hsvcs: alive = utils.service_is_up(svc) art = (alive and ":-)") or "XXX" active = 'enabled' if svc['disabled']: active = 'disabled' rv['availabilityZoneInfo'].append({ 'zoneName': '| |- %s' % svc['binary'], 'zoneState': '%s %s %s' % (active, art, svc['updated_at'])}) return rv def describe_regions(self, context, region_name=None, **kwargs): if FLAGS.region_list: regions = [] for region in FLAGS.region_list: name, _sep, host = region.partition('=') endpoint = '%s://%s:%s%s' % (FLAGS.ec2_scheme, host, FLAGS.ec2_port, FLAGS.ec2_path) regions.append({'regionName': name, 'regionEndpoint': endpoint}) else: regions = [{'regionName': 'nova', 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_scheme, FLAGS.ec2_host, FLAGS.ec2_port, FLAGS.ec2_path)}] return {'regionInfo': regions} def describe_snapshots(self, context, snapshot_id=None, owner=None, restorable_by=None, **kwargs): if snapshot_id: snapshots = [] for ec2_id in snapshot_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) snapshot = self.volume_api.get_snapshot( context, snapshot_id=internal_id) snapshots.append(snapshot) else: snapshots = self.volume_api.get_all_snapshots(context) snapshots = [self._format_snapshot(context, s) for s in snapshots] return {'snapshotSet': snapshots} def _format_snapshot(self, context, snapshot): s = {} s['snapshotId'] = ec2utils.id_to_ec2_snap_id(snapshot['id']) s['volumeId'] = ec2utils.id_to_ec2_vol_id(snapshot['volume_id']) s['status'] = snapshot['status'] s['startTime'] = snapshot['created_at'] s['progress'] = snapshot['progress'] s['ownerId'] = snapshot['project_id'] s['volumeSize'] = snapshot['volume_size'] s['description'] = snapshot['display_description'] return s def create_snapshot(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) volume_id = ec2utils.ec2_id_to_id(volume_id) volume = self.volume_api.get(context, volume_id) snapshot = self.volume_api.create_snapshot( context, volume, None, kwargs.get('description')) return self._format_snapshot(context, snapshot) def delete_snapshot(self, context, snapshot_id, **kwargs): snapshot_id = ec2utils.ec2_id_to_id(snapshot_id) snapshot = self.volume_api.get_snapshot(context, snapshot_id) self.volume_api.delete_snapshot(context, snapshot) return True def describe_key_pairs(self, context, key_name=None, **kwargs): key_pairs = db.key_pair_get_all_by_user(context, context.user_id) if not key_name is None: key_pairs = [x for x in key_pairs if x['name'] in key_name] result = [] for key_pair in key_pairs: # filter out the vpn keys suffix = FLAGS.vpn_key_suffix if context.is_admin or not key_pair['name'].endswith(suffix): result.append({ 'keyName': key_pair['name'], 'keyFingerprint': key_pair['fingerprint'], }) return {'keySet': result} def create_key_pair(self, context, key_name, **kwargs): if not re.match('^[a-zA-Z0-9_\- ]+$', str(key_name)): err = _("Value (%s) for KeyName is invalid." " Content limited to Alphanumeric character, " "spaces, dashes, and underscore.") % key_name raise exception.EC2APIError(err) if len(str(key_name)) > 255: err = _("Value (%s) for Keyname is invalid." " Length exceeds maximum of 255.") % key_name raise exception.EC2APIError(err) LOG.audit(_("Create key pair %s"), key_name, context=context) data = _gen_key(context, context.user_id, key_name) return {'keyName': key_name, 'keyFingerprint': data['fingerprint'], 'keyMaterial': data['private_key']} # TODO(vish): when context is no longer an object, pass it here def import_key_pair(self, context, key_name, public_key_material, **kwargs): LOG.audit(_("Import key %s"), key_name, context=context) try: db.key_pair_get(context, context.user_id, key_name) raise exception.KeyPairExists(key_name=key_name) except exception.NotFound: pass public_key = base64.b64decode(public_key_material) fingerprint = crypto.generate_fingerprint(public_key) key = {} key['user_id'] = context.user_id key['name'] = key_name key['public_key'] = public_key key['fingerprint'] = fingerprint db.key_pair_create(context, key) return {'keyName': key_name, 'keyFingerprint': fingerprint} def delete_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Delete key pair %s"), key_name, context=context) try: db.key_pair_destroy(context, context.user_id, key_name) except exception.NotFound: # aws returns true even if the key doesn't exist pass return True def describe_security_groups(self, context, group_name=None, group_id=None, **kwargs): self.compute_api.ensure_default_security_group(context) if group_name or group_id: groups = [] if group_name: for name in group_name: group = db.security_group_get_by_name(context, context.project_id, name) groups.append(group) if group_id: for gid in group_id: group = db.security_group_get(context, gid) groups.append(group) elif context.is_admin: groups = db.security_group_get_all(context) else: groups = db.security_group_get_by_project(context, context.project_id) groups = [self._format_security_group(context, g) for g in groups] return {'securityGroupInfo': list(sorted(groups, key=lambda k: (k['ownerId'], k['groupName'])))} def _format_security_group(self, context, group): g = {} g['groupDescription'] = group.description g['groupName'] = group.name g['ownerId'] = group.project_id g['ipPermissions'] = [] for rule in group.rules: r = {} r['groups'] = [] r['ipRanges'] = [] if rule.group_id: source_group = db.security_group_get(context, rule.group_id) r['groups'] += [{'groupName': source_group.name, 'userId': source_group.project_id}] if rule.protocol: r['ipProtocol'] = rule.protocol r['fromPort'] = rule.from_port r['toPort'] = rule.to_port g['ipPermissions'] += [dict(r)] else: for protocol, min_port, max_port in (('icmp', -1, -1), ('tcp', 1, 65535), ('udp', 1, 65535)): r['ipProtocol'] = protocol r['fromPort'] = min_port r['toPort'] = max_port g['ipPermissions'] += [dict(r)] else: r['ipProtocol'] = rule.protocol r['fromPort'] = rule.from_port r['toPort'] = rule.to_port r['ipRanges'] += [{'cidrIp': rule.cidr}] g['ipPermissions'] += [r] return g def _rule_args_to_dict(self, context, kwargs): rules = [] if not 'groups' in kwargs and not 'ip_ranges' in kwargs: rule = self._rule_dict_last_step(context, **kwargs) if rule: rules.append(rule) return rules if 'ip_ranges' in kwargs: rules = self._cidr_args_split(kwargs) else: rules = [kwargs] finalset = [] for rule in rules: if 'groups' in rule: groups_values = self._groups_args_split(rule) for groups_value in groups_values: final = self._rule_dict_last_step(context, **groups_value) finalset.append(final) else: final = self._rule_dict_last_step(context, **rule) finalset.append(final) return finalset def _cidr_args_split(self, kwargs): cidr_args_split = [] cidrs = kwargs['ip_ranges'] for key, cidr in cidrs.iteritems(): mykwargs = kwargs.copy() del mykwargs['ip_ranges'] mykwargs['cidr_ip'] = cidr['cidr_ip'] cidr_args_split.append(mykwargs) return cidr_args_split def _groups_args_split(self, kwargs): groups_args_split = [] groups = kwargs['groups'] for key, group in groups.iteritems(): mykwargs = kwargs.copy() del mykwargs['groups'] if 'group_name' in group: mykwargs['source_security_group_name'] = group['group_name'] if 'user_id' in group: mykwargs['source_security_group_owner_id'] = group['user_id'] if 'group_id' in group: mykwargs['source_security_group_id'] = group['group_id'] groups_args_split.append(mykwargs) return groups_args_split def _rule_dict_last_step(self, context, to_port=None, from_port=None, ip_protocol=None, cidr_ip=None, user_id=None, source_security_group_name=None, source_security_group_owner_id=None): values = {} if source_security_group_name: source_project_id = self._get_source_project_id(context, source_security_group_owner_id) source_security_group = db.security_group_get_by_name( context.elevated(), source_project_id, source_security_group_name) notfound = exception.SecurityGroupNotFound if not source_security_group: raise notfound(security_group_id=source_security_group_name) values['group_id'] = source_security_group['id'] elif cidr_ip: # If this fails, it throws an exception. This is what we want. cidr_ip = urllib.unquote(cidr_ip).decode() if not utils.is_valid_cidr(cidr_ip): # Raise exception for non-valid address raise exception.EC2APIError(_("Invalid CIDR")) values['cidr'] = cidr_ip else: values['cidr'] = '0.0.0.0/0' if source_security_group_name: # Open everything if an explicit port range or type/code are not # specified, but only if a source group was specified. ip_proto_upper = ip_protocol.upper() if ip_protocol else '' if (ip_proto_upper == 'ICMP' and from_port is None and to_port is None): from_port = -1 to_port = -1 elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None and to_port is None): from_port = 1 to_port = 65535 if ip_protocol and from_port is not None and to_port is not None: ip_protocol = str(ip_protocol) try: # Verify integer conversions from_port = int(from_port) to_port = int(to_port) except ValueError: if ip_protocol.upper() == 'ICMP': raise exception.InvalidInput(reason="Type and" " Code must be integers for ICMP protocol type") else: raise exception.InvalidInput(reason="To and From ports " "must be integers") if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: raise exception.InvalidIpProtocol(protocol=ip_protocol) # Verify that from_port must always be less than # or equal to to_port if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port > to_port)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Former value cannot" " be greater than the later") # Verify valid TCP, UDP port ranges if (ip_protocol.upper() in ['TCP', 'UDP'] and (from_port < 1 or to_port > 65535)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="Valid TCP ports should" " be between 1-65535") # Verify ICMP type and code if (ip_protocol.upper() == "ICMP" and (from_port < -1 or from_port > 255 or to_port < -1 or to_port > 255)): raise exception.InvalidPortRange(from_port=from_port, to_port=to_port, msg="For ICMP, the" " type:code must be valid") values['protocol'] = ip_protocol values['from_port'] = from_port values['to_port'] = to_port else: # If cidr based filtering, protocol and ports are mandatory if 'cidr' in values: return None return values def _security_group_rule_exists(self, security_group, values): """Indicates whether the specified rule values are already defined in the given security group. """ for rule in security_group.rules: is_duplicate = True keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol') for key in keys: if rule.get(key) != values.get(key): is_duplicate = False break if is_duplicate: return rule['id'] return False def revoke_security_group_ingress(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) self.compute_api.ensure_default_security_group(context) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) if group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) msg = _("Revoke security group ingress %s") LOG.audit(msg, security_group['name'], context=context) prevalues = [] try: prevalues = kwargs['ip_permissions'] except KeyError: prevalues.append(kwargs) rule_id = None rule_ids = [] for values in prevalues: rulesvalues = self._rule_args_to_dict(context, values) if not rulesvalues: err = _("%s Not enough parameters to build a valid rule") raise exception.EC2APIError(err % rulesvalues) for values_for_rule in rulesvalues: values_for_rule['parent_group_id'] = security_group.id rule_id = self._security_group_rule_exists(security_group, values_for_rule) if rule_id: db.security_group_rule_destroy(context, rule_id) rule_ids.append(rule_id) if rule_id: # NOTE(vish): we removed a rule, so refresh self.compute_api.trigger_security_group_rules_refresh( context, security_group_id=security_group['id']) self.sgh.trigger_security_group_rule_destroy_refresh( context, rule_ids) return True raise exception.EC2APIError(_("No rule for the specified parameters.")) # TODO(soren): This has only been tested with Boto as the client. # Unfortunately, it seems Boto is using an old API # for these operations, so support for newer API versions # is sketchy. def authorize_security_group_ingress(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) self.compute_api.ensure_default_security_group(context) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) if group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) msg = _("Authorize security group ingress %s") LOG.audit(msg, security_group['name'], context=context) prevalues = [] try: prevalues = kwargs['ip_permissions'] except KeyError: prevalues.append(kwargs) postvalues = [] for values in prevalues: rulesvalues = self._rule_args_to_dict(context, values) if not rulesvalues: err = _("%s Not enough parameters to build a valid rule") raise exception.EC2APIError(err % rulesvalues) for values_for_rule in rulesvalues: values_for_rule['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values_for_rule): err = _('%s - This rule already exists in group') raise exception.EC2APIError(err % values_for_rule) postvalues.append(values_for_rule) allowed = quota.allowed_security_group_rules(context, security_group['id'], 1) if allowed < 1: msg = _("Quota exceeded, too many security group rules.") raise exception.EC2APIError(msg) rule_ids = [] for values_for_rule in postvalues: security_group_rule = db.security_group_rule_create( context, values_for_rule) rule_ids.append(security_group_rule['id']) if postvalues: self.compute_api.trigger_security_group_rules_refresh( context, security_group_id=security_group['id']) self.sgh.trigger_security_group_rule_create_refresh( context, rule_ids) return True raise exception.EC2APIError(_("No rule for the specified parameters.")) def _get_source_project_id(self, context, source_security_group_owner_id): if source_security_group_owner_id: # Parse user:project for source group. source_parts = source_security_group_owner_id.split(':') # If no project name specified, assume it's same as user name. # Since we're looking up by project name, the user name is not # used here. It's only read for EC2 API compatibility. if len(source_parts) == 2: source_project_id = source_parts[1] else: source_project_id = source_parts[0] else: source_project_id = context.project_id return source_project_id def create_security_group(self, context, group_name, group_description): if not re.match('^[a-zA-Z0-9_\- ]+$', str(group_name)): # Some validation to ensure that values match API spec. # - Alphanumeric characters, spaces, dashes, and underscores. # TODO(Daviey): LP: #813685 extend beyond group_name checking, and # probably create a param validator that can be used elsewhere. err = _("Value (%s) for parameter GroupName is invalid." " Content limited to Alphanumeric characters, " "spaces, dashes, and underscores.") % group_name # err not that of master ec2 implementation, as they fail to raise. raise exception.InvalidParameterValue(err=err) if len(str(group_name)) > 255: err = _("Value (%s) for parameter GroupName is invalid." " Length exceeds maximum of 255.") % group_name raise exception.InvalidParameterValue(err=err) LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): msg = _('group %s already exists') raise exception.EC2APIError(msg % group_name) if quota.allowed_security_groups(context, 1) < 1: msg = _("Quota exceeded, too many security groups.") raise exception.EC2APIError(msg) group = {'user_id': context.user_id, 'project_id': context.project_id, 'name': group_name, 'description': group_description} group_ref = db.security_group_create(context, group) self.sgh.trigger_security_group_create_refresh(context, group) return {'securityGroupSet': [self._format_security_group(context, group_ref)]} def delete_security_group(self, context, group_name=None, group_id=None, **kwargs): if not group_name and not group_id: err = _("Not enough parameters, need group_name or group_id") raise exception.EC2APIError(err) notfound = exception.SecurityGroupNotFound if group_name: security_group = db.security_group_get_by_name(context, context.project_id, group_name) if not security_group: raise notfound(security_group_id=group_name) elif group_id: security_group = db.security_group_get(context, group_id) if not security_group: raise notfound(security_group_id=group_id) if db.security_group_in_use(context, security_group.id): raise exception.InvalidGroup(reason="In Use") LOG.audit(_("Delete security group %s"), group_name, context=context) db.security_group_destroy(context, security_group.id) self.sgh.trigger_security_group_destroy_refresh(context, security_group.id) return True def get_console_output(self, context, instance_id, **kwargs): LOG.audit(_("Get console output for instance %s"), instance_id, context=context) # instance_id may be passed in as a list of instances if isinstance(instance_id, list): ec2_id = instance_id[0] else: ec2_id = instance_id validate_ec2_id(ec2_id) instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, instance_id) output = self.compute_api.get_console_output(context, instance) now = utils.utcnow() return {"InstanceId": ec2_id, "Timestamp": now, "output": base64.b64encode(output)} def describe_volumes(self, context, volume_id=None, **kwargs): if volume_id: volumes = [] for ec2_id in volume_id: validate_ec2_id(ec2_id) internal_id = ec2utils.ec2_id_to_id(ec2_id) volume = self.volume_api.get(context, internal_id) volumes.append(volume) else: volumes = self.volume_api.get_all(context) volumes = [self._format_volume(context, v) for v in volumes] return {'volumeSet': volumes} def _format_volume(self, context, volume): instance_ec2_id = None instance_data = None if volume.get('instance', None): instance_id = volume['instance']['id'] instance_ec2_id = ec2utils.id_to_ec2_id(instance_id) instance_data = '%s[%s]' % (instance_ec2_id, volume['instance']['host']) v = {} v['volumeId'] = ec2utils.id_to_ec2_vol_id(volume['id']) v['status'] = volume['status'] v['size'] = volume['size'] v['availabilityZone'] = volume['availability_zone'] v['createTime'] = volume['created_at'] if context.is_admin: v['status'] = '%s (%s, %s, %s, %s)' % ( volume['status'], volume['project_id'], volume['host'], instance_data, volume['mountpoint']) if volume['attach_status'] == 'attached': v['attachmentSet'] = [{'attachTime': volume['attach_time'], 'deleteOnTermination': False, 'device': volume['mountpoint'], 'instanceId': instance_ec2_id, 'status': 'attached', 'volumeId': v['volumeId']}] else: v['attachmentSet'] = [{}] if volume.get('snapshot_id') is not None: v['snapshotId'] = ec2utils.id_to_ec2_snap_id(volume['snapshot_id']) else: v['snapshotId'] = None return v def create_volume(self, context, **kwargs): size = kwargs.get('size') if kwargs.get('snapshot_id') is not None: snapshot_id = ec2utils.ec2_id_to_id(kwargs['snapshot_id']) snapshot = self.volume_api.get_snapshot(context, snapshot_id) LOG.audit(_("Create volume from snapshot %s"), snapshot_id, context=context) else: snapshot = None LOG.audit(_("Create volume of %s GB"), size, context=context) availability_zone = kwargs.get('availability_zone', None) volume = self.volume_api.create(context, size, None, None, snapshot, availability_zone=availability_zone) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. return self._format_volume(context, dict(volume)) def delete_volume(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) volume_id = ec2utils.ec2_id_to_id(volume_id) try: volume = self.volume_api.get(context, volume_id) self.volume_api.delete(context, volume) except exception.InvalidVolume: raise exception.EC2APIError(_('Delete Failed')) return True def attach_volume(self, context, volume_id, instance_id, device, **kwargs): validate_ec2_id(instance_id) validate_ec2_id(volume_id) volume_id = ec2utils.ec2_id_to_id(volume_id) instance_id = ec2utils.ec2_id_to_id(instance_id) instance = self.compute_api.get(context, instance_id) msg = _("Attach volume %(volume_id)s to instance %(instance_id)s" " at %(device)s") % locals() LOG.audit(msg, context=context) try: self.compute_api.attach_volume(context, instance, volume_id, device) except exception.InvalidVolume: raise exception.EC2APIError(_('Attach Failed.')) volume = self.volume_api.get(context, volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance_id), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} def detach_volume(self, context, volume_id, **kwargs): validate_ec2_id(volume_id) volume_id = ec2utils.ec2_id_to_id(volume_id) LOG.audit(_("Detach volume %s"), volume_id, context=context) volume = self.volume_api.get(context, volume_id) try: instance = self.compute_api.detach_volume(context, volume_id=volume_id) except exception.InvalidVolume: raise exception.EC2APIError(_('Detach Volume Failed.')) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance['id']), 'requestId': context.request_id, 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} def _format_kernel_id(self, context, instance_ref, result, key): kernel_uuid = instance_ref['kernel_id'] if kernel_uuid is None or kernel_uuid == '': return result[key] = ec2utils.glance_id_to_ec2_id(context, kernel_uuid, 'aki') def _format_ramdisk_id(self, context, instance_ref, result, key): ramdisk_uuid = instance_ref['ramdisk_id'] if ramdisk_uuid is None or ramdisk_uuid == '': return result[key] = ec2utils.glance_id_to_ec2_id(context, ramdisk_uuid, 'ari') def describe_instance_attribute(self, context, instance_id, attribute, **kwargs): def _unsupported_attribute(instance, result): raise exception.EC2APIError(_('attribute not supported: %s') % attribute) def _format_attr_block_device_mapping(instance, result): tmp = {} self._format_instance_root_device_name(instance, tmp) self._format_instance_bdm(context, instance_id, tmp['rootDeviceName'], result) def _format_attr_disable_api_termination(instance, result): result['disableApiTermination'] = instance['disable_terminate'] def _format_attr_group_set(instance, result): CloudController._format_group_set(instance, result) def _format_attr_instance_initiated_shutdown_behavior(instance, result): if instance['shutdown_terminate']: result['instanceInitiatedShutdownBehavior'] = 'terminate' else: result['instanceInitiatedShutdownBehavior'] = 'stop' def _format_attr_instance_type(instance, result): self._format_instance_type(instance, result) def _format_attr_kernel(instance, result): self._format_kernel_id(context, instance, result, 'kernel') def _format_attr_ramdisk(instance, result): self._format_ramdisk_id(context, instance, result, 'ramdisk') def _format_attr_root_device_name(instance, result): self._format_instance_root_device_name(instance, result) def _format_attr_source_dest_check(instance, result): _unsupported_attribute(instance, result) def _format_attr_user_data(instance, result): result['userData'] = base64.b64decode(instance['user_data']) attribute_formatter = { 'blockDeviceMapping': _format_attr_block_device_mapping, 'disableApiTermination': _format_attr_disable_api_termination, 'groupSet': _format_attr_group_set, 'instanceInitiatedShutdownBehavior': _format_attr_instance_initiated_shutdown_behavior, 'instanceType': _format_attr_instance_type, 'kernel': _format_attr_kernel, 'ramdisk': _format_attr_ramdisk, 'rootDeviceName': _format_attr_root_device_name, 'sourceDestCheck': _format_attr_source_dest_check, 'userData': _format_attr_user_data, } fn = attribute_formatter.get(attribute) if fn is None: raise exception.EC2APIError( _('attribute not supported: %s') % attribute) ec2_instance_id = instance_id validate_ec2_id(instance_id) instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance = self.compute_api.get(context, instance_id) result = {'instance_id': ec2_instance_id} fn(instance, result) return result def describe_instances(self, context, **kwargs): # Optional DescribeInstances argument instance_id = kwargs.get('instance_id', None) return self._format_describe_instances(context, instance_id=instance_id) def describe_instances_v6(self, context, **kwargs): # Optional DescribeInstancesV6 argument instance_id = kwargs.get('instance_id', None) return self._format_describe_instances(context, instance_id=instance_id, use_v6=True) def _format_describe_instances(self, context, **kwargs): return {'reservationSet': self._format_instances(context, **kwargs)} def _format_run_instances(self, context, reservation_id): i = self._format_instances(context, reservation_id=reservation_id) assert len(i) == 1 return i[0] def _format_terminate_instances(self, context, instance_id, previous_states): instances_set = [] for (ec2_id, previous_state) in zip(instance_id, previous_states): i = {} i['instanceId'] = ec2_id i['previousState'] = _state_description(previous_state['vm_state'], previous_state['shutdown_terminate']) try: internal_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, internal_id) i['shutdownState'] = _state_description(instance['vm_state'], instance['shutdown_terminate']) except exception.NotFound: i['shutdownState'] = _state_description(vm_states.DELETED, True) instances_set.append(i) return {'instancesSet': instances_set} def _format_instance_bdm(self, context, instance_id, root_device_name, result): """Format InstanceBlockDeviceMappingResponseItemType""" root_device_type = 'instance-store' mapping = [] for bdm in db.block_device_mapping_get_all_by_instance(context, instance_id): volume_id = bdm['volume_id'] if (volume_id is None or bdm['no_device']): continue if (bdm['device_name'] == root_device_name and (bdm['snapshot_id'] or bdm['volume_id'])): assert not bdm['virtual_name'] root_device_type = 'ebs' vol = self.volume_api.get(context, volume_id) LOG.debug(_("vol = %s\n"), vol) # TODO(yamahata): volume attach time ebs = {'volumeId': volume_id, 'deleteOnTermination': bdm['delete_on_termination'], 'attachTime': vol['attach_time'] or '-', 'status': vol['status'], } res = {'deviceName': bdm['device_name'], 'ebs': ebs, } mapping.append(res) if mapping: result['blockDeviceMapping'] = mapping result['rootDeviceType'] = root_device_type @staticmethod def _format_instance_root_device_name(instance, result): result['rootDeviceName'] = (instance.get('root_device_name') or block_device.DEFAULT_ROOT_DEV_NAME) @staticmethod def _format_instance_type(instance, result): if instance['instance_type']: result['instanceType'] = instance['instance_type'].get('name') else: result['instanceType'] = None @staticmethod def _format_group_set(instance, result): security_group_names = [] if instance.get('security_groups'): for security_group in instance['security_groups']: security_group_names.append(security_group['name']) result['groupSet'] = utils.convert_to_list_dict( security_group_names, 'groupId') def _format_instances(self, context, instance_id=None, use_v6=False, **search_opts): # TODO(termie): this method is poorly named as its name does not imply # that it will be making a variety of database calls # rather than simply formatting a bunch of instances that # were handed to it reservations = {} # NOTE(vish): instance_id is an optional list of ids to filter by if instance_id: instances = [] for ec2_id in instance_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) try: instance = self.compute_api.get(context, internal_id) except exception.NotFound: continue instances.append(instance) else: try: # always filter out deleted instances search_opts['deleted'] = False instances = self.compute_api.get_all(context, search_opts=search_opts, sort_dir='asc') except exception.NotFound: instances = [] for instance in instances: if not context.is_admin: if instance['image_ref'] == str(FLAGS.vpn_image_id): continue i = {} instance_id = instance['id'] ec2_id = ec2utils.id_to_ec2_id(instance_id) i['instanceId'] = ec2_id image_uuid = instance['image_ref'] i['imageId'] = ec2utils.glance_id_to_ec2_id(context, image_uuid) self._format_kernel_id(context, instance, i, 'kernelId') self._format_ramdisk_id(context, instance, i, 'ramdiskId') i['instanceState'] = _state_description( instance['vm_state'], instance['shutdown_terminate']) fixed_ip = None floating_ip = None ip_info = ec2utils.get_ip_info_for_instance(context, instance) if ip_info['fixed_ips']: fixed_ip = ip_info['fixed_ips'][0] if ip_info['floating_ips']: floating_ip = ip_info['floating_ips'][0] if ip_info['fixed_ip6s']: i['dnsNameV6'] = ip_info['fixed_ip6s'][0] if FLAGS.ec2_private_dns_show_ip: i['privateDnsName'] = fixed_ip else: i['privateDnsName'] = instance['hostname'] i['privateIpAddress'] = fixed_ip i['publicDnsName'] = floating_ip i['ipAddress'] = floating_ip or fixed_ip i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] i['keyName'] = instance['key_name'] if context.is_admin: i['keyName'] = '%s (%s, %s)' % (i['keyName'], instance['project_id'], instance['host']) i['productCodesSet'] = utils.convert_to_list_dict([], 'product_codes') self._format_instance_type(instance, i) i['launchTime'] = instance['created_at'] i['amiLaunchIndex'] = instance['launch_index'] self._format_instance_root_device_name(instance, i) self._format_instance_bdm(context, instance_id, i['rootDeviceName'], i) host = instance['host'] services = db.service_get_all_by_host(context.elevated(), host) zone = ec2utils.get_availability_zone_by_host(services, host) i['placement'] = {'availabilityZone': zone} if instance['reservation_id'] not in reservations: r = {} r['reservationId'] = instance['reservation_id'] r['ownerId'] = instance['project_id'] self._format_group_set(instance, r) r['instancesSet'] = [] reservations[instance['reservation_id']] = r reservations[instance['reservation_id']]['instancesSet'].append(i) return list(reservations.values()) def describe_addresses(self, context, **kwargs): return self.format_addresses(context) def format_addresses(self, context): addresses = [] floaters = self.network_api.get_floating_ips_by_project(context) for floating_ip_ref in floaters: if floating_ip_ref['project_id'] is None: continue address = floating_ip_ref['address'] ec2_id = None if floating_ip_ref['fixed_ip_id']: fixed_id = floating_ip_ref['fixed_ip_id'] fixed = self.network_api.get_fixed_ip(context, fixed_id) if fixed['instance_id'] is not None: ec2_id = ec2utils.id_to_ec2_id(fixed['instance_id']) address_rv = {'public_ip': address, 'instance_id': ec2_id} if context.is_admin: details = "%s (%s)" % (address_rv['instance_id'], floating_ip_ref['project_id']) address_rv['instance_id'] = details addresses.append(address_rv) return {'addressesSet': addresses} def allocate_address(self, context, **kwargs): LOG.audit(_("Allocate address"), context=context) public_ip = self.network_api.allocate_floating_ip(context) return {'publicIp': public_ip} def release_address(self, context, public_ip, **kwargs): LOG.audit(_("Release address %s"), public_ip, context=context) self.network_api.release_floating_ip(context, address=public_ip) return {'return': "true"} def associate_address(self, context, instance_id, public_ip, **kwargs): LOG.audit(_("Associate address %(public_ip)s to" " instance %(instance_id)s") % locals(), context=context) instance_id = ec2utils.ec2_id_to_id(instance_id) instance = self.compute_api.get(context, instance_id) self.compute_api.associate_floating_ip(context, instance, address=public_ip) return {'return': "true"} def disassociate_address(self, context, public_ip, **kwargs): LOG.audit(_("Disassociate address %s"), public_ip, context=context) self.network_api.disassociate_floating_ip(context, address=public_ip) return {'return': "true"} def run_instances(self, context, **kwargs): max_count = int(kwargs.get('max_count', 1)) if kwargs.get('kernel_id'): kernel = self._get_image(context, kwargs['kernel_id']) kwargs['kernel_id'] = ec2utils.id_to_glance_id(context, kernel['id']) if kwargs.get('ramdisk_id'): ramdisk = self._get_image(context, kwargs['ramdisk_id']) kwargs['ramdisk_id'] = ec2utils.id_to_glance_id(context, ramdisk['id']) for bdm in kwargs.get('block_device_mapping', []): _parse_block_device_mapping(bdm) image = self._get_image(context, kwargs['image_id']) image_uuid = ec2utils.id_to_glance_id(context, image['id']) if image: image_state = self._get_image_state(image) else: raise exception.ImageNotFound(image_id=kwargs['image_id']) if image_state != 'available': raise exception.EC2APIError(_('Image must be available')) (instances, resv_id) = self.compute_api.create(context, instance_type=instance_types.get_instance_type_by_name( kwargs.get('instance_type', None)), image_href=image_uuid, min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), ramdisk_id=kwargs.get('ramdisk_id'), key_name=kwargs.get('key_name'), user_data=kwargs.get('user_data'), security_group=kwargs.get('security_group'), availability_zone=kwargs.get('placement', {}).get( 'availability_zone'), block_device_mapping=kwargs.get('block_device_mapping', {})) return self._format_run_instances(context, resv_id) def terminate_instances(self, context, instance_id, **kwargs): """Terminate each instance in instance_id, which is a list of ec2 ids. instance_id is a kwarg so its name cannot be modified.""" LOG.debug(_("Going to start terminating instances")) previous_states = [] for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) previous_states.append(instance) self.compute_api.delete(context, instance) return self._format_terminate_instances(context, instance_id, previous_states) def reboot_instances(self, context, instance_id, **kwargs): """instance_id is a list of instance ids""" LOG.audit(_("Reboot instance %r"), instance_id, context=context) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.reboot(context, instance, 'HARD') return True def stop_instances(self, context, instance_id, **kwargs): """Stop each instances in instance_id. Here instance_id is a list of instance ids""" LOG.debug(_("Going to stop instances")) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.stop(context, instance) return True def start_instances(self, context, instance_id, **kwargs): """Start each instances in instance_id. Here instance_id is a list of instance ids""" LOG.debug(_("Going to start instances")) for ec2_id in instance_id: validate_ec2_id(ec2_id) _instance_id = ec2utils.ec2_id_to_id(ec2_id) instance = self.compute_api.get(context, _instance_id) self.compute_api.start(context, instance) return True def _get_image(self, context, ec2_id): try: internal_id = ec2utils.ec2_id_to_id(ec2_id) image = self.image_service.show(context, internal_id) except (exception.InvalidEc2Id, exception.ImageNotFound): try: return self.image_service.show_by_name(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id) image_type = ec2_id.split('-')[0] if ec2utils.image_type(image.get('container_format')) != image_type: raise exception.ImageNotFound(image_id=ec2_id) return image def _format_image(self, image): """Convert from format defined by GlanceImageService to S3 format.""" i = {} image_type = ec2utils.image_type(image.get('container_format')) ec2_id = ec2utils.image_ec2_id(image.get('id'), image_type) name = image.get('name') i['imageId'] = ec2_id kernel_id = image['properties'].get('kernel_id') if kernel_id: i['kernelId'] = ec2utils.image_ec2_id(kernel_id, 'aki') ramdisk_id = image['properties'].get('ramdisk_id') if ramdisk_id: i['ramdiskId'] = ec2utils.image_ec2_id(ramdisk_id, 'ari') i['imageOwnerId'] = image.get('owner') img_loc = image['properties'].get('image_location') if img_loc: i['imageLocation'] = img_loc else: i['imageLocation'] = "%s (%s)" % (img_loc, name) i['name'] = name if not name and img_loc: # This should only occur for images registered with ec2 api # prior to that api populating the glance name i['name'] = img_loc i['imageState'] = self._get_image_state(image) i['description'] = image.get('description') display_mapping = {'aki': 'kernel', 'ari': 'ramdisk', 'ami': 'machine'} i['imageType'] = display_mapping.get(image_type) i['isPublic'] = not not image.get('is_public') i['architecture'] = image['properties'].get('architecture') properties = image['properties'] root_device_name = block_device.properties_root_device_name(properties) root_device_type = 'instance-store' for bdm in properties.get('block_device_mapping', []): if (bdm.get('device_name') == root_device_name and ('snapshot_id' in bdm or 'volume_id' in bdm) and not bdm.get('no_device')): root_device_type = 'ebs' i['rootDeviceName'] = (root_device_name or block_device.DEFAULT_ROOT_DEV_NAME) i['rootDeviceType'] = root_device_type _format_mappings(properties, i) return i def describe_images(self, context, image_id=None, **kwargs): # NOTE: image_id is a list! if image_id: images = [] for ec2_id in image_id: try: image = self._get_image(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id) images.append(image) else: images = self.image_service.detail(context) images = [self._format_image(i) for i in images] return {'imagesSet': images} def deregister_image(self, context, image_id, **kwargs): LOG.audit(_("De-registering image %s"), image_id, context=context) image = self._get_image(context, image_id) internal_id = image['id'] self.image_service.delete(context, internal_id) return {'imageId': image_id} def _register_image(self, context, metadata): image = self.image_service.create(context, metadata) image_type = ec2utils.image_type(image.get('container_format')) image_id = ec2utils.image_ec2_id(image['id'], image_type) return image_id def register_image(self, context, image_location=None, **kwargs): if image_location is None and kwargs.get('name'): image_location = kwargs['name'] if image_location is None: raise exception.EC2APIError(_('imageLocation is required')) metadata = {'properties': {'image_location': image_location}} if kwargs.get('name'): metadata['name'] = kwargs['name'] else: metadata['name'] = image_location if 'root_device_name' in kwargs: metadata['properties']['root_device_name'] = kwargs.get( 'root_device_name') mappings = [_parse_block_device_mapping(bdm) for bdm in kwargs.get('block_device_mapping', [])] if mappings: metadata['properties']['block_device_mapping'] = mappings image_id = self._register_image(context, metadata) msg = _("Registered image %(image_location)s with" " id %(image_id)s") % locals() LOG.audit(msg, context=context) return {'imageId': image_id} def describe_image_attribute(self, context, image_id, attribute, **kwargs): def _block_device_mapping_attribute(image, result): _format_mappings(image['properties'], result) def _launch_permission_attribute(image, result): result['launchPermission'] = [] if image['is_public']: result['launchPermission'].append({'group': 'all'}) def _root_device_name_attribute(image, result): _prop_root_dev_name = block_device.properties_root_device_name result['rootDeviceName'] = _prop_root_dev_name(image['properties']) if result['rootDeviceName'] is None: result['rootDeviceName'] = block_device.DEFAULT_ROOT_DEV_NAME supported_attributes = { 'blockDeviceMapping': _block_device_mapping_attribute, 'launchPermission': _launch_permission_attribute, 'rootDeviceName': _root_device_name_attribute, } fn = supported_attributes.get(attribute) if fn is None: raise exception.EC2APIError(_('attribute not supported: %s') % attribute) try: image = self._get_image(context, image_id) except exception.NotFound: raise exception.ImageNotFound(image_id=image_id) result = {'imageId': image_id} fn(image, result) return result def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs): # TODO(devcamcar): Support users and groups other than 'all'. if attribute != 'launchPermission': raise exception.EC2APIError(_('attribute not supported: %s') % attribute) if not 'user_group' in kwargs: raise exception.EC2APIError(_('user or group not specified')) if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': raise exception.EC2APIError(_('only group "all" is supported')) if not operation_type in ['add', 'remove']: msg = _('operation_type must be add or remove') raise exception.EC2APIError(msg) LOG.audit(_("Updating image %s publicity"), image_id, context=context) try: image = self._get_image(context, image_id) except exception.NotFound: raise exception.ImageNotFound(image_id=image_id) internal_id = image['id'] del(image['id']) image['is_public'] = (operation_type == 'add') try: return self.image_service.update(context, internal_id, image) except exception.ImageNotAuthorized: msg = _('Not allowed to modify attributes for image %s') raise exception.EC2APIError(msg % image_id) def update_image(self, context, image_id, **kwargs): internal_id = ec2utils.ec2_id_to_id(image_id) result = self.image_service.update(context, internal_id, dict(kwargs)) return result # TODO(yamahata): race condition # At the moment there is no way to prevent others from # manipulating instances/volumes/snapshots. # As other code doesn't take it into consideration, here we don't # care of it for now. Ostrich algorithm def create_image(self, context, instance_id, **kwargs): # NOTE(yamahata): name/description are ignored by register_image(), # do so here no_reboot = kwargs.get('no_reboot', False) validate_ec2_id(instance_id) ec2_instance_id = instance_id instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) instance = self.compute_api.get(context, instance_id) # stop the instance if necessary restart_instance = False if not no_reboot: vm_state = instance['vm_state'] # if the instance is in subtle state, refuse to proceed. if vm_state not in (vm_states.ACTIVE, vm_states.SHUTOFF, vm_states.STOPPED): raise exception.InstanceNotRunning(instance_id=ec2_instance_id) if vm_state in (vm_states.ACTIVE, vm_states.SHUTOFF): restart_instance = True self.compute_api.stop(context, instance) # wait instance for really stopped start_time = time.time() while vm_state != vm_states.STOPPED: time.sleep(1) instance = self.compute_api.get(context, instance_id) vm_state = instance['vm_state'] # NOTE(yamahata): timeout and error. 1 hour for now for safety. # Is it too short/long? # Or is there any better way? timeout = 1 * 60 * 60 * 60 if time.time() > start_time + timeout: raise exception.EC2APIError( _('Couldn\'t stop instance with in %d sec') % timeout) src_image = self._get_image(context, instance['image_ref']) properties = src_image['properties'] if instance['root_device_name']: properties['root_device_name'] = instance['root_device_name'] mapping = [] bdms = db.block_device_mapping_get_all_by_instance(context, instance_id) for bdm in bdms: if bdm.no_device: continue m = {} for attr in ('device_name', 'snapshot_id', 'volume_id', 'volume_size', 'delete_on_termination', 'no_device', 'virtual_name'): val = getattr(bdm, attr) if val is not None: m[attr] = val volume_id = m.get('volume_id') if m.get('snapshot_id') and volume_id: # create snapshot based on volume_id volume = self.volume_api.get(context, volume_id) # NOTE(yamahata): Should we wait for snapshot creation? # Linux LVM snapshot creation completes in # short time, it doesn't matter for now. snapshot = self.volume_api.create_snapshot_force( context, volume, volume['display_name'], volume['display_description']) m['snapshot_id'] = snapshot['id'] del m['volume_id'] if m: mapping.append(m) for m in _properties_get_mappings(properties): virtual_name = m['virtual'] if virtual_name in ('ami', 'root'): continue assert block_device.is_swap_or_ephemeral(virtual_name) device_name = m['device'] if device_name in [b['device_name'] for b in mapping if not b.get('no_device', False)]: continue # NOTE(yamahata): swap and ephemeral devices are specified in # AMI, but disabled for this instance by user. # So disable those device by no_device. mapping.append({'device_name': device_name, 'no_device': True}) if mapping: properties['block_device_mapping'] = mapping for attr in ('status', 'location', 'id'): src_image.pop(attr, None) image_id = self._register_image(context, src_image) if restart_instance: self.compute_api.start(context, instance_id=instance_id) return {'imageId': image_id}
./CrossVul/dataset_final_sorted/CWE-264/py/good_3632_0
crossvul-python_data_bad_3773_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ /images endpoint for Glance v1 API """ import errno import logging import sys import traceback from webob.exc import (HTTPError, HTTPNotFound, HTTPConflict, HTTPBadRequest, HTTPForbidden, HTTPRequestEntityTooLarge, HTTPServiceUnavailable, ) from glance.api import policy import glance.api.v1 from glance.api.v1 import controller from glance.api.v1 import filters from glance.common import cfg from glance.common import exception from glance.common import wsgi from glance.common import utils import glance.store import glance.store.filesystem import glance.store.http import glance.store.rbd import glance.store.s3 import glance.store.swift from glance.store import (get_from_backend, get_size_from_backend, schedule_delete_from_backend, get_store_from_location, get_store_from_scheme) from glance import registry from glance import notifier logger = logging.getLogger(__name__) SUPPORTED_PARAMS = glance.api.v1.SUPPORTED_PARAMS SUPPORTED_FILTERS = glance.api.v1.SUPPORTED_FILTERS # 1 PiB, which is a *huge* image by anyone's measure. This is just to protect # against client programming errors (or DoS attacks) in the image metadata. # We have a known limit of 1 << 63 in the database -- images.size is declared # as a BigInteger. IMAGE_SIZE_CAP = 1 << 50 class Controller(controller.BaseController): """ WSGI controller for images resource in Glance v1 API The images resource API is a RESTful web service for image data. The API is as follows:: GET /images -- Returns a set of brief metadata about images GET /images/detail -- Returns a set of detailed metadata about images HEAD /images/<ID> -- Return metadata about an image with id <ID> GET /images/<ID> -- Return image data for image with id <ID> POST /images -- Store image data and return metadata about the newly-stored image PUT /images/<ID> -- Update image metadata and/or upload image data for a previously-reserved image DELETE /images/<ID> -- Delete the image with id <ID> """ default_store_opt = cfg.StrOpt('default_store', default='file') def __init__(self, conf): self.conf = conf self.conf.register_opt(self.default_store_opt) glance.store.create_stores(conf) self.verify_store_or_exit(self.conf.default_store) self.notifier = notifier.Notifier(conf) registry.configure_registry_client(conf) self.policy = policy.Enforcer(conf) def _enforce(self, req, action): """Authorize an action against our policies""" try: self.policy.enforce(req.context, action, {}) except exception.Forbidden: raise HTTPForbidden() def index(self, req): """ Returns the following information for all public, available images: * id -- The opaque image identifier * name -- The name of the image * disk_format -- The disk image format * container_format -- The "container" format of the image * checksum -- MD5 checksum of the image data * size -- Size of image data in bytes :param req: The WSGI/Webob Request object :retval The response body is a mapping of the following form:: {'images': [ {'id': <ID>, 'name': <NAME>, 'disk_format': <DISK_FORMAT>, 'container_format': <DISK_FORMAT>, 'checksum': <CHECKSUM> 'size': <SIZE>}, ... ]} """ self._enforce(req, 'get_images') params = self._get_query_params(req) try: images = registry.get_images_list(req.context, **params) except exception.Invalid, e: raise HTTPBadRequest(explanation="%s" % e) return dict(images=images) def detail(self, req): """ Returns detailed information for all public, available images :param req: The WSGI/Webob Request object :retval The response body is a mapping of the following form:: {'images': [ {'id': <ID>, 'name': <NAME>, 'size': <SIZE>, 'disk_format': <DISK_FORMAT>, 'container_format': <CONTAINER_FORMAT>, 'checksum': <CHECKSUM>, 'min_disk': <MIN_DISK>, 'min_ram': <MIN_RAM>, 'store': <STORE>, 'status': <STATUS>, 'created_at': <TIMESTAMP>, 'updated_at': <TIMESTAMP>, 'deleted_at': <TIMESTAMP>|<NONE>, 'properties': {'distro': 'Ubuntu 10.04 LTS', ...}}, ... ]} """ self._enforce(req, 'get_images') params = self._get_query_params(req) try: images = registry.get_images_detail(req.context, **params) # Strip out the Location attribute. Temporary fix for # LP Bug #755916. This information is still coming back # from the registry, since the API server still needs access # to it, however we do not return this potential security # information to the API end user... for image in images: del image['location'] except exception.Invalid, e: raise HTTPBadRequest(explanation="%s" % e) return dict(images=images) def _get_query_params(self, req): """ Extracts necessary query params from request. :param req: the WSGI Request object :retval dict of parameters that can be used by registry client """ params = {'filters': self._get_filters(req)} for PARAM in SUPPORTED_PARAMS: if PARAM in req.params: params[PARAM] = req.params.get(PARAM) return params def _get_filters(self, req): """ Return a dictionary of query param filters from the request :param req: the Request object coming from the wsgi layer :retval a dict of key/value filters """ query_filters = {} for param in req.params: if param in SUPPORTED_FILTERS or param.startswith('property-'): query_filters[param] = req.params.get(param) if not filters.validate(param, query_filters[param]): raise HTTPBadRequest('Bad value passed to filter %s ' 'got %s' % (param, query_filters[param])) return query_filters def meta(self, req, id): """ Returns metadata about an image in the HTTP headers of the response object :param req: The WSGI/Webob Request object :param id: The opaque image identifier :retval similar to 'show' method but without image_data :raises HTTPNotFound if image metadata is not available to user """ self._enforce(req, 'get_image') image_meta = self.get_image_meta_or_404(req, id) del image_meta['location'] return { 'image_meta': image_meta } @staticmethod def _validate_source(source, req): """ External sources (as specified via the location or copy-from headers) are supported only over non-local store types, i.e. S3, Swift, HTTP. Note the absence of file:// for security reasons, see LP bug #942118. If the above constraint is violated, we reject with 400 "Bad Request". """ if source: for scheme in ['s3', 'swift', 'http']: if source.lower().startswith(scheme): return source msg = _("External sourcing not supported for store %s") % source logger.error(msg) raise HTTPBadRequest(msg, request=req, content_type="text/plain") @staticmethod def _copy_from(req): return req.headers.get('x-glance-api-copy-from') @staticmethod def _external_source(image_meta, req): source = image_meta.get('location', Controller._copy_from(req)) return Controller._validate_source(source, req) @staticmethod def _get_from_store(where): try: image_data, image_size = get_from_backend(where) except exception.NotFound, e: raise HTTPNotFound(explanation="%s" % e) image_size = int(image_size) if image_size else None return image_data, image_size def show(self, req, id): """ Returns an iterator that can be used to retrieve an image's data along with the image metadata. :param req: The WSGI/Webob Request object :param id: The opaque image identifier :raises HTTPNotFound if image is not available to user """ self._enforce(req, 'get_image') image_meta = self.get_active_image_meta_or_404(req, id) if image_meta.get('size') == 0: image_iterator = iter([]) else: image_iterator, size = self._get_from_store(image_meta['location']) image_meta['size'] = size or image_meta['size'] del image_meta['location'] return { 'image_iterator': image_iterator, 'image_meta': image_meta, } def _reserve(self, req, image_meta): """ Adds the image metadata to the registry and assigns an image identifier if one is not supplied in the request headers. Sets the image's status to `queued`. :param req: The WSGI/Webob Request object :param id: The opaque image identifier :param image_meta: The image metadata :raises HTTPConflict if image already exists :raises HTTPBadRequest if image metadata is not valid """ location = self._external_source(image_meta, req) image_meta['status'] = ('active' if image_meta.get('size') == 0 else 'queued') if location: store = get_store_from_location(location) # check the store exists before we hit the registry, but we # don't actually care what it is at this point self.get_store_or_400(req, store) # retrieve the image size from remote store (if not provided) image_meta['size'] = self._get_size(image_meta, location) else: # Ensure that the size attribute is set to zero for directly # uploadable images (if not provided). The size will be set # to a non-zero value during upload image_meta['size'] = image_meta.get('size', 0) try: image_meta = registry.add_image_metadata(req.context, image_meta) return image_meta except exception.Duplicate: msg = (_("An image with identifier %s already exists") % image_meta['id']) logger.error(msg) raise HTTPConflict(msg, request=req, content_type="text/plain") except exception.Invalid, e: msg = (_("Failed to reserve image. Got error: %(e)s") % locals()) for line in msg.split('\n'): logger.error(line) raise HTTPBadRequest(msg, request=req, content_type="text/plain") except exception.Forbidden: msg = _("Forbidden to reserve image.") logger.error(msg) raise HTTPForbidden(msg, request=req, content_type="text/plain") def _upload(self, req, image_meta): """ Uploads the payload of the request to a backend store in Glance. If the `x-image-meta-store` header is set, Glance will attempt to use that store, if not, Glance will use the store set by the flag `default_store`. :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about image :raises HTTPConflict if image already exists :retval The location where the image was stored """ copy_from = self._copy_from(req) if copy_from: image_data, image_size = self._get_from_store(copy_from) image_meta['size'] = image_size or image_meta['size'] else: try: req.get_content_type('application/octet-stream') except exception.InvalidContentType: self._safe_kill(req, image_meta['id']) msg = _("Content-Type must be application/octet-stream") logger.error(msg) raise HTTPBadRequest(explanation=msg) image_data = req.body_file if req.content_length: image_size = int(req.content_length) elif 'x-image-meta-size' in req.headers: image_size = int(req.headers['x-image-meta-size']) else: logger.debug(_("Got request with no content-length and no " "x-image-meta-size header")) image_size = 0 store_name = req.headers.get('x-image-meta-store', self.conf.default_store) store = self.get_store_or_400(req, store_name) image_id = image_meta['id'] logger.debug(_("Setting image %s to status 'saving'"), image_id) registry.update_image_metadata(req.context, image_id, {'status': 'saving'}) try: logger.debug(_("Uploading image data for image %(image_id)s " "to %(store_name)s store"), locals()) if image_size > IMAGE_SIZE_CAP: max_image_size = IMAGE_SIZE_CAP msg = _("Denying attempt to upload image larger than " "%(max_image_size)d. Supplied image size was " "%(image_size)d") % locals() logger.warn(msg) raise HTTPBadRequest(msg, request=req) location, size, checksum = store.add(image_meta['id'], image_data, image_size) # Verify any supplied checksum value matches checksum # returned from store when adding image supplied_checksum = image_meta.get('checksum') if supplied_checksum and supplied_checksum != checksum: msg = _("Supplied checksum (%(supplied_checksum)s) and " "checksum generated from uploaded image " "(%(checksum)s) did not match. Setting image " "status to 'killed'.") % locals() logger.error(msg) self._safe_kill(req, image_id) raise HTTPBadRequest(msg, content_type="text/plain", request=req) # Update the database with the checksum returned # from the backend store logger.debug(_("Updating image %(image_id)s data. " "Checksum set to %(checksum)s, size set " "to %(size)d"), locals()) update_data = {'checksum': checksum, 'size': size} image_meta = registry.update_image_metadata(req.context, image_id, update_data) self.notifier.info('image.upload', image_meta) return location except exception.Duplicate, e: msg = _("Attempt to upload duplicate image: %s") % e logger.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPConflict(msg, request=req) except exception.Forbidden, e: msg = _("Forbidden upload attempt: %s") % e logger.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPForbidden(msg, request=req, content_type="text/plain") except exception.StorageFull, e: msg = _("Image storage media is full: %s") % e logger.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPRequestEntityTooLarge(msg, request=req, content_type='text/plain') except exception.StorageWriteDenied, e: msg = _("Insufficient permissions on image storage media: %s") % e logger.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPServiceUnavailable(msg, request=req, content_type='text/plain') except HTTPError, e: self._safe_kill(req, image_id) self.notifier.error('image.upload', e.explanation) raise except Exception, e: tb_info = traceback.format_exc() logger.error(tb_info) self._safe_kill(req, image_id) msg = _("Error uploading image: (%(class_name)s): " "%(exc)s") % ({'class_name': e.__class__.__name__, 'exc': str(e)}) self.notifier.error('image.upload', msg) raise HTTPBadRequest(msg, request=req) def _activate(self, req, image_id, location): """ Sets the image status to `active` and the image's location attribute. :param req: The WSGI/Webob Request object :param image_id: Opaque image identifier :param location: Location of where Glance stored this image """ image_meta = {} image_meta['location'] = location image_meta['status'] = 'active' try: return registry.update_image_metadata(req.context, image_id, image_meta) except exception.Invalid, e: msg = (_("Failed to activate image. Got error: %(e)s") % locals()) for line in msg.split('\n'): logger.error(line) self.notifier.error('image.update', msg) raise HTTPBadRequest(msg, request=req, content_type="text/plain") def _kill(self, req, image_id): """ Marks the image status to `killed`. :param req: The WSGI/Webob Request object :param image_id: Opaque image identifier """ registry.update_image_metadata(req.context, image_id, {'status': 'killed'}) def _safe_kill(self, req, image_id): """ Mark image killed without raising exceptions if it fails. Since _kill is meant to be called from exceptions handlers, it should not raise itself, rather it should just log its error. :param req: The WSGI/Webob Request object :param image_id: Opaque image identifier """ try: self._kill(req, image_id) except Exception, e: logger.error(_("Unable to kill image %(id)s: " "%(exc)s") % ({'id': image_id, 'exc': repr(e)})) def _upload_and_activate(self, req, image_meta): """ Safely uploads the image data in the request payload and activates the image in the registry after a successful upload. :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about image :retval Mapping of updated image data """ image_id = image_meta['id'] # This is necessary because of a bug in Webob 1.0.2 - 1.0.7 # See: https://bitbucket.org/ianb/webob/ # issue/12/fix-for-issue-6-broke-chunked-transfer req.is_body_readable = True location = self._upload(req, image_meta) return self._activate(req, image_id, location) def _get_size(self, image_meta, location): # retrieve the image size from remote store (if not provided) return image_meta.get('size', 0) or get_size_from_backend(location) def _handle_source(self, req, image_id, image_meta, image_data): if image_data or self._copy_from(req): image_meta = self._upload_and_activate(req, image_meta) else: location = image_meta.get('location') if location: image_meta = self._activate(req, image_id, location) return image_meta def create(self, req, image_meta, image_data): """ Adds a new image to Glance. Four scenarios exist when creating an image: 1. If the image data is available directly for upload, create can be passed the image data as the request body and the metadata as the request headers. The image will initially be 'queued', during upload it will be in the 'saving' status, and then 'killed' or 'active' depending on whether the upload completed successfully. 2. If the image data exists somewhere else, you can upload indirectly from the external source using the x-glance-api-copy-from header. Once the image is uploaded, the external store is not subsequently consulted, i.e. the image content is served out from the configured glance image store. State transitions are as for option #1. 3. If the image data exists somewhere else, you can reference the source using the x-image-meta-location header. The image content will be served out from the external store, i.e. is never uploaded to the configured glance image store. 4. If the image data is not available yet, but you'd like reserve a spot for it, you can omit the data and a record will be created in the 'queued' state. This exists primarily to maintain backwards compatibility with OpenStack/Rackspace API semantics. The request body *must* be encoded as application/octet-stream, otherwise an HTTPBadRequest is returned. Upon a successful save of the image data and metadata, a response containing metadata about the image is returned, including its opaque identifier. :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about image :param image_data: Actual image data that is to be stored :raises HTTPBadRequest if x-image-meta-location is missing and the request body is not application/octet-stream image data. """ self._enforce(req, 'add_image') if image_meta.get('is_public'): self._enforce(req, 'publicize_image') if req.context.read_only: msg = _("Read-only access") logger.debug(msg) raise HTTPForbidden(msg, request=req, content_type="text/plain") image_meta = self._reserve(req, image_meta) id = image_meta['id'] image_meta = self._handle_source(req, id, image_meta, image_data) # Prevent client from learning the location, as it # could contain security credentials image_meta.pop('location', None) return {'image_meta': image_meta} def update(self, req, id, image_meta, image_data): """ Updates an existing image with the registry. :param request: The WSGI/Webob Request object :param id: The opaque image identifier :retval Returns the updated image information as a mapping """ self._enforce(req, 'modify_image') if image_meta.get('is_public'): self._enforce(req, 'publicize_image') if req.context.read_only: msg = _("Read-only access") logger.debug(msg) raise HTTPForbidden(msg, request=req, content_type="text/plain") orig_image_meta = self.get_image_meta_or_404(req, id) orig_status = orig_image_meta['status'] # The default behaviour for a PUT /images/<IMAGE_ID> is to # override any properties that were previously set. This, however, # leads to a number of issues for the common use case where a caller # registers an image with some properties and then almost immediately # uploads an image file along with some more properties. Here, we # check for a special header value to be false in order to force # properties NOT to be purged. However we also disable purging of # properties if an image file is being uploaded... purge_props = req.headers.get('x-glance-registry-purge-props', True) purge_props = (utils.bool_from_string(purge_props) and image_data is None) if image_data is not None and orig_status != 'queued': raise HTTPConflict(_("Cannot upload to an unqueued image")) # Only allow the Location|Copy-From fields to be modified if the # image is in queued status, which indicates that the user called # POST /images but originally supply neither a Location|Copy-From # field NOR image data location = self._external_source(image_meta, req) reactivating = orig_status != 'queued' and location activating = orig_status == 'queued' and (location or image_data) if reactivating: msg = _("Attempted to update Location field for an image " "not in queued status.") raise HTTPBadRequest(msg, request=req, content_type="text/plain") try: if location: image_meta['size'] = self._get_size(image_meta, location) image_meta = registry.update_image_metadata(req.context, id, image_meta, purge_props) if activating: image_meta = self._handle_source(req, id, image_meta, image_data) except exception.Invalid, e: msg = (_("Failed to update image metadata. Got error: %(e)s") % locals()) for line in msg.split('\n'): logger.error(line) self.notifier.error('image.update', msg) raise HTTPBadRequest(msg, request=req, content_type="text/plain") except exception.NotFound, e: msg = ("Failed to find image to update: %(e)s" % locals()) for line in msg.split('\n'): logger.info(line) self.notifier.info('image.update', msg) raise HTTPNotFound(msg, request=req, content_type="text/plain") except exception.Forbidden, e: msg = ("Forbidden to update image: %(e)s" % locals()) for line in msg.split('\n'): logger.info(line) self.notifier.info('image.update', msg) raise HTTPForbidden(msg, request=req, content_type="text/plain") else: self.notifier.info('image.update', image_meta) # Prevent client from learning the location, as it # could contain security credentials image_meta.pop('location', None) return {'image_meta': image_meta} def delete(self, req, id): """ Deletes the image and all its chunks from the Glance :param req: The WSGI/Webob Request object :param id: The opaque image identifier :raises HttpBadRequest if image registry is invalid :raises HttpNotFound if image or any chunk is not available :raises HttpUnauthorized if image or any chunk is not deleteable by the requesting user """ self._enforce(req, 'delete_image') if req.context.read_only: msg = _("Read-only access") logger.debug(msg) raise HTTPForbidden(msg, request=req, content_type="text/plain") image = self.get_image_meta_or_404(req, id) if image['protected']: msg = _("Image is protected") logger.debug(msg) raise HTTPForbidden(msg, request=req, content_type="text/plain") # The image's location field may be None in the case # of a saving or queued image, therefore don't ask a backend # to delete the image if the backend doesn't yet store it. # See https://bugs.launchpad.net/glance/+bug/747799 try: if image['location']: schedule_delete_from_backend(image['location'], self.conf, req.context, id) registry.delete_image_metadata(req.context, id) except exception.NotFound, e: msg = ("Failed to find image to delete: %(e)s" % locals()) for line in msg.split('\n'): logger.info(line) self.notifier.info('image.delete', msg) raise HTTPNotFound(msg, request=req, content_type="text/plain") except exception.Forbidden, e: msg = ("Forbidden to delete image: %(e)s" % locals()) for line in msg.split('\n'): logger.info(line) self.notifier.info('image.delete', msg) raise HTTPForbidden(msg, request=req, content_type="text/plain") else: self.notifier.info('image.delete', id) def get_store_or_400(self, request, store_name): """ Grabs the storage backend for the supplied store name or raises an HTTPBadRequest (400) response :param request: The WSGI/Webob Request object :param store_name: The backend store name :raises HTTPNotFound if store does not exist """ try: return get_store_from_scheme(store_name) except exception.UnknownScheme: msg = (_("Requested store %s not available on this Glance server") % store_name) logger.error(msg) raise HTTPBadRequest(msg, request=request, content_type='text/plain') def verify_store_or_exit(self, store_name): """ Verifies availability of the storage backend for the given store name or exits :param store_name: The backend store name """ try: get_store_from_scheme(store_name) except exception.UnknownScheme: msg = (_("Default store %s not available on this Glance server\n") % store_name) logger.error(msg) # message on stderr will only be visible if started directly via # bin/glance-api, as opposed to being daemonized by glance-control sys.stderr.write(msg) sys.exit(255) class ImageDeserializer(wsgi.JSONRequestDeserializer): """Handles deserialization of specific controller method requests.""" def _deserialize(self, request): result = {} try: result['image_meta'] = utils.get_image_meta_from_headers(request) except exception.Invalid: image_size_str = request.headers['x-image-meta-size'] msg = _("Incoming image size of %s was not convertible to " "an integer.") % image_size_str raise HTTPBadRequest(msg, request=request) image_meta = result['image_meta'] if 'size' in image_meta: incoming_image_size = image_meta['size'] if incoming_image_size > IMAGE_SIZE_CAP: max_image_size = IMAGE_SIZE_CAP msg = _("Denying attempt to upload image larger than " "%(max_image_size)d. Supplied image size was " "%(incoming_image_size)d") % locals() logger.warn(msg) raise HTTPBadRequest(msg, request=request) data = request.body_file if self.has_body(request) else None result['image_data'] = data return result def create(self, request): return self._deserialize(request) def update(self, request): return self._deserialize(request) class ImageSerializer(wsgi.JSONResponseSerializer): """Handles serialization of specific controller method responses.""" def __init__(self, conf): self.conf = conf self.notifier = notifier.Notifier(conf) def _inject_location_header(self, response, image_meta): location = self._get_image_location(image_meta) response.headers['Location'] = location def _inject_checksum_header(self, response, image_meta): response.headers['ETag'] = image_meta['checksum'] def _inject_image_meta_headers(self, response, image_meta): """ Given a response and mapping of image metadata, injects the Response with a set of HTTP headers for the image metadata. Each main image metadata field is injected as a HTTP header with key 'x-image-meta-<FIELD>' except for the properties field, which is further broken out into a set of 'x-image-meta-property-<KEY>' headers :param response: The Webob Response object :param image_meta: Mapping of image metadata """ headers = utils.image_meta_to_http_headers(image_meta) for k, v in headers.items(): response.headers[k] = v def _get_image_location(self, image_meta): """Build a relative url to reach the image defined by image_meta.""" return "/v1/images/%s" % image_meta['id'] def meta(self, response, result): image_meta = result['image_meta'] self._inject_image_meta_headers(response, image_meta) self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def image_send_notification(self, bytes_written, expected_size, image_meta, request): """Send an image.send message to the notifier.""" try: context = request.context payload = { 'bytes_sent': bytes_written, 'image_id': image_meta['id'], 'owner_id': image_meta['owner'], 'receiver_tenant_id': context.tenant, 'receiver_user_id': context.user, 'destination_ip': request.remote_addr, } if bytes_written != expected_size: self.notifier.error('image.send', payload) else: self.notifier.info('image.send', payload) except Exception, err: msg = _("An error occurred during image.send" " notification: %(err)s") % locals() logger.error(msg) def show(self, response, result): image_meta = result['image_meta'] image_id = image_meta['id'] # We use a secondary iterator here to wrap the # iterator coming back from the store driver in # order to check for disconnections from the backend # storage connections and log an error if the size of # the transferred image is not the same as the expected # size of the image file. See LP Bug #882585. def checked_iter(image_id, expected_size, image_iter): bytes_written = 0 def notify_image_sent_hook(env): self.image_send_notification(bytes_written, expected_size, image_meta, response.request) # Add hook to process after response is fully sent if 'eventlet.posthooks' in response.request.environ: response.request.environ['eventlet.posthooks'].append( (notify_image_sent_hook, (), {})) try: for chunk in image_iter: yield chunk bytes_written += len(chunk) except Exception, err: msg = _("An error occurred reading from backend storage " "for image %(image_id): %(err)s") % locals() logger.error(msg) raise if expected_size != bytes_written: msg = _("Backend storage for image %(image_id)s " "disconnected after writing only %(bytes_written)d " "bytes") % locals() logger.error(msg) raise IOError(errno.EPIPE, _("Corrupt image download for " "image %(image_id)s") % locals()) image_iter = result['image_iterator'] # image_meta['size'] is a str expected_size = int(image_meta['size']) response.app_iter = checked_iter(image_id, expected_size, image_iter) # Using app_iter blanks content-length, so we set it here... response.headers['Content-Length'] = image_meta['size'] response.headers['Content-Type'] = 'application/octet-stream' self._inject_image_meta_headers(response, image_meta) self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def update(self, response, result): image_meta = result['image_meta'] response.body = self.to_json(dict(image=image_meta)) response.headers['Content-Type'] = 'application/json' self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def create(self, response, result): image_meta = result['image_meta'] response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(image=image_meta)) self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def create_resource(conf): """Images resource factory method""" deserializer = ImageDeserializer() serializer = ImageSerializer(conf) return wsgi.Resource(Controller(conf), deserializer, serializer)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3773_0
crossvul-python_data_good_2042_1
from collections import OrderedDict import copy import operator from functools import partial, reduce, update_wrapper import warnings from django import forms from django.conf import settings from django.contrib import messages from django.contrib.admin import widgets, helpers from django.contrib.admin import validation from django.contrib.admin.checks import (BaseModelAdminChecks, ModelAdminChecks, InlineModelAdminChecks) from django.contrib.admin.exceptions import DisallowedModelAdminToField from django.contrib.admin.utils import (quote, unquote, flatten_fieldsets, get_deleted_objects, model_format_dict, NestedObjects, lookup_needs_distinct) from django.contrib.admin.templatetags.admin_static import static from django.contrib.admin.templatetags.admin_urls import add_preserved_filters from django.contrib.auth import get_permission_codename from django.core import checks from django.core.exceptions import (PermissionDenied, ValidationError, FieldError, ImproperlyConfigured) from django.core.paginator import Paginator from django.core.urlresolvers import reverse from django.db import models, transaction, router from django.db.models.constants import LOOKUP_SEP from django.db.models.related import RelatedObject from django.db.models.fields import BLANK_CHOICE_DASH, FieldDoesNotExist from django.db.models.sql.constants import QUERY_TERMS from django.forms.formsets import all_valid, DELETION_FIELD_NAME from django.forms.models import (modelform_factory, modelformset_factory, inlineformset_factory, BaseInlineFormSet, modelform_defines_fields) from django.http import Http404, HttpResponseRedirect from django.http.response import HttpResponseBase from django.shortcuts import get_object_or_404 from django.template.response import SimpleTemplateResponse, TemplateResponse from django.utils import six from django.utils.decorators import method_decorator from django.utils.deprecation import (RenameMethodsBase, RemovedInDjango18Warning, RemovedInDjango19Warning) from django.utils.encoding import force_text, python_2_unicode_compatible from django.utils.html import escape, escapejs from django.utils.http import urlencode from django.utils.text import capfirst, get_text_list from django.utils.translation import ugettext as _ from django.utils.translation import ungettext from django.utils.safestring import mark_safe from django.views.decorators.csrf import csrf_protect IS_POPUP_VAR = '_popup' TO_FIELD_VAR = '_to_field' HORIZONTAL, VERTICAL = 1, 2 def get_content_type_for_model(obj): # Since this module gets imported in the application's root package, # it cannot import models from other applications at the module level. from django.contrib.contenttypes.models import ContentType return ContentType.objects.get_for_model(obj, for_concrete_model=False) def get_ul_class(radio_style): return 'radiolist' if radio_style == VERTICAL else 'radiolist inline' class IncorrectLookupParameters(Exception): pass # Defaults for formfield_overrides. ModelAdmin subclasses can change this # by adding to ModelAdmin.formfield_overrides. FORMFIELD_FOR_DBFIELD_DEFAULTS = { models.DateTimeField: { 'form_class': forms.SplitDateTimeField, 'widget': widgets.AdminSplitDateTime }, models.DateField: {'widget': widgets.AdminDateWidget}, models.TimeField: {'widget': widgets.AdminTimeWidget}, models.TextField: {'widget': widgets.AdminTextareaWidget}, models.URLField: {'widget': widgets.AdminURLFieldWidget}, models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget}, models.BigIntegerField: {'widget': widgets.AdminBigIntegerFieldWidget}, models.CharField: {'widget': widgets.AdminTextInputWidget}, models.ImageField: {'widget': widgets.AdminFileWidget}, models.FileField: {'widget': widgets.AdminFileWidget}, models.EmailField: {'widget': widgets.AdminEmailInputWidget}, } csrf_protect_m = method_decorator(csrf_protect) class RenameBaseModelAdminMethods(forms.MediaDefiningClass, RenameMethodsBase): renamed_methods = ( ('queryset', 'get_queryset', RemovedInDjango18Warning), ) class BaseModelAdmin(six.with_metaclass(RenameBaseModelAdminMethods)): """Functionality common to both ModelAdmin and InlineAdmin.""" raw_id_fields = () fields = None exclude = None fieldsets = None form = forms.ModelForm filter_vertical = () filter_horizontal = () radio_fields = {} prepopulated_fields = {} formfield_overrides = {} readonly_fields = () ordering = None view_on_site = True # Validation of ModelAdmin definitions # Old, deprecated style: validator_class = None default_validator_class = validation.BaseValidator # New style: checks_class = BaseModelAdminChecks @classmethod def validate(cls, model): warnings.warn( 'ModelAdmin.validate() is deprecated. Use "check()" instead.', RemovedInDjango19Warning) if cls.validator_class: validator = cls.validator_class() else: validator = cls.default_validator_class() validator.validate(cls, model) @classmethod def check(cls, model, **kwargs): if cls.validator_class: warnings.warn( 'ModelAdmin.validator_class is deprecated. ' 'ModeAdmin validators must be converted to use ' 'the system check framework.', RemovedInDjango19Warning) validator = cls.validator_class() try: validator.validate(cls, model) except ImproperlyConfigured as e: return [checks.Error(e.args[0], hint=None, obj=cls)] else: return [] else: return cls.checks_class().check(cls, model, **kwargs) def __init__(self): overrides = FORMFIELD_FOR_DBFIELD_DEFAULTS.copy() overrides.update(self.formfield_overrides) self.formfield_overrides = overrides def formfield_for_dbfield(self, db_field, **kwargs): """ Hook for specifying the form Field instance for a given database Field instance. If kwargs are given, they're passed to the form Field's constructor. """ request = kwargs.pop("request", None) # If the field specifies choices, we don't need to look for special # admin widgets - we just need to use a select widget of some kind. if db_field.choices: return self.formfield_for_choice_field(db_field, request, **kwargs) # ForeignKey or ManyToManyFields if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)): # Combine the field kwargs with any options for formfield_overrides. # Make sure the passed in **kwargs override anything in # formfield_overrides because **kwargs is more specific, and should # always win. if db_field.__class__ in self.formfield_overrides: kwargs = dict(self.formfield_overrides[db_field.__class__], **kwargs) # Get the correct formfield. if isinstance(db_field, models.ForeignKey): formfield = self.formfield_for_foreignkey(db_field, request, **kwargs) elif isinstance(db_field, models.ManyToManyField): formfield = self.formfield_for_manytomany(db_field, request, **kwargs) # For non-raw_id fields, wrap the widget with a wrapper that adds # extra HTML -- the "add other" interface -- to the end of the # rendered output. formfield can be None if it came from a # OneToOneField with parent_link=True or a M2M intermediary. if formfield and db_field.name not in self.raw_id_fields: related_modeladmin = self.admin_site._registry.get(db_field.rel.to) can_add_related = bool(related_modeladmin and related_modeladmin.has_add_permission(request)) formfield.widget = widgets.RelatedFieldWidgetWrapper( formfield.widget, db_field.rel, self.admin_site, can_add_related=can_add_related) return formfield # If we've got overrides for the formfield defined, use 'em. **kwargs # passed to formfield_for_dbfield override the defaults. for klass in db_field.__class__.mro(): if klass in self.formfield_overrides: kwargs = dict(copy.deepcopy(self.formfield_overrides[klass]), **kwargs) return db_field.formfield(**kwargs) # For any other type of field, just call its formfield() method. return db_field.formfield(**kwargs) def formfield_for_choice_field(self, db_field, request=None, **kwargs): """ Get a form Field for a database Field that has declared choices. """ # If the field is named as a radio_field, use a RadioSelect if db_field.name in self.radio_fields: # Avoid stomping on custom widget/choices arguments. if 'widget' not in kwargs: kwargs['widget'] = widgets.AdminRadioSelect(attrs={ 'class': get_ul_class(self.radio_fields[db_field.name]), }) if 'choices' not in kwargs: kwargs['choices'] = db_field.get_choices( include_blank=db_field.blank, blank_choice=[('', _('None'))] ) return db_field.formfield(**kwargs) def get_field_queryset(self, db, db_field, request): """ If the ModelAdmin specifies ordering, the queryset should respect that ordering. Otherwise don't specify the queryset, let the field decide (returns None in that case). """ related_admin = self.admin_site._registry.get(db_field.rel.to, None) if related_admin is not None: ordering = related_admin.get_ordering(request) if ordering is not None and ordering != (): return db_field.rel.to._default_manager.using(db).order_by(*ordering) return None def formfield_for_foreignkey(self, db_field, request=None, **kwargs): """ Get a form Field for a ForeignKey. """ db = kwargs.get('using') if db_field.name in self.raw_id_fields: kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.rel, self.admin_site, using=db) elif db_field.name in self.radio_fields: kwargs['widget'] = widgets.AdminRadioSelect(attrs={ 'class': get_ul_class(self.radio_fields[db_field.name]), }) kwargs['empty_label'] = _('None') if db_field.blank else None if 'queryset' not in kwargs: queryset = self.get_field_queryset(db, db_field, request) if queryset is not None: kwargs['queryset'] = queryset return db_field.formfield(**kwargs) def formfield_for_manytomany(self, db_field, request=None, **kwargs): """ Get a form Field for a ManyToManyField. """ # If it uses an intermediary model that isn't auto created, don't show # a field in admin. if not db_field.rel.through._meta.auto_created: return None db = kwargs.get('using') if db_field.name in self.raw_id_fields: kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.rel, self.admin_site, using=db) kwargs['help_text'] = '' elif db_field.name in (list(self.filter_vertical) + list(self.filter_horizontal)): kwargs['widget'] = widgets.FilteredSelectMultiple(db_field.verbose_name, (db_field.name in self.filter_vertical)) if 'queryset' not in kwargs: queryset = self.get_field_queryset(db, db_field, request) if queryset is not None: kwargs['queryset'] = queryset return db_field.formfield(**kwargs) def get_view_on_site_url(self, obj=None): if obj is None or not self.view_on_site: return None if callable(self.view_on_site): return self.view_on_site(obj) elif self.view_on_site and hasattr(obj, 'get_absolute_url'): # use the ContentType lookup if view_on_site is True return reverse('admin:view_on_site', kwargs={ 'content_type_id': get_content_type_for_model(obj).pk, 'object_id': obj.pk }) @property def declared_fieldsets(self): warnings.warn( "ModelAdmin.declared_fieldsets is deprecated and " "will be removed in Django 1.9.", RemovedInDjango19Warning, stacklevel=2 ) if self.fieldsets: return self.fieldsets elif self.fields: return [(None, {'fields': self.fields})] return None def get_fields(self, request, obj=None): """ Hook for specifying fields. """ return self.fields def get_fieldsets(self, request, obj=None): """ Hook for specifying fieldsets. """ # We access the property and check if it triggers a warning. # If it does, then it's ours and we can safely ignore it, but if # it doesn't then it has been overridden so we must warn about the # deprecation. with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") declared_fieldsets = self.declared_fieldsets if len(w) != 1 or not issubclass(w[0].category, RemovedInDjango19Warning): warnings.warn( "ModelAdmin.declared_fieldsets is deprecated and " "will be removed in Django 1.9.", RemovedInDjango19Warning ) if declared_fieldsets: return declared_fieldsets if self.fieldsets: return self.fieldsets return [(None, {'fields': self.get_fields(request, obj)})] def get_ordering(self, request): """ Hook for specifying field ordering. """ return self.ordering or () # otherwise we might try to *None, which is bad ;) def get_readonly_fields(self, request, obj=None): """ Hook for specifying custom readonly fields. """ return self.readonly_fields def get_prepopulated_fields(self, request, obj=None): """ Hook for specifying custom prepopulated fields. """ return self.prepopulated_fields def get_queryset(self, request): """ Returns a QuerySet of all model instances that can be edited by the admin site. This is used by changelist_view. """ qs = self.model._default_manager.get_queryset() # TODO: this should be handled by some parameter to the ChangeList. ordering = self.get_ordering(request) if ordering: qs = qs.order_by(*ordering) return qs def lookup_allowed(self, lookup, value): from django.contrib.admin.filters import SimpleListFilter model = self.model # Check FKey lookups that are allowed, so that popups produced by # ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to, # are allowed to work. for l in model._meta.related_fkey_lookups: # As ``limit_choices_to`` can be a callable, invoke it here. if callable(l): l = l() for k, v in widgets.url_params_from_lookup_dict(l).items(): if k == lookup and v == value: return True parts = lookup.split(LOOKUP_SEP) # Last term in lookup is a query term (__exact, __startswith etc) # This term can be ignored. if len(parts) > 1 and parts[-1] in QUERY_TERMS: parts.pop() # Special case -- foo__id__exact and foo__id queries are implied # if foo has been specifically included in the lookup list; so # drop __id if it is the last part. However, first we need to find # the pk attribute name. rel_name = None for part in parts[:-1]: try: field, _, _, _ = model._meta.get_field_by_name(part) except FieldDoesNotExist: # Lookups on non-existent fields are ok, since they're ignored # later. return True if hasattr(field, 'rel'): if field.rel is None: # This property or relation doesn't exist, but it's allowed # since it's ignored in ChangeList.get_filters(). return True model = field.rel.to rel_name = field.rel.get_related_field().name elif isinstance(field, RelatedObject): model = field.model rel_name = model._meta.pk.name else: rel_name = None if rel_name and len(parts) > 1 and parts[-1] == rel_name: parts.pop() if len(parts) == 1: return True clean_lookup = LOOKUP_SEP.join(parts) valid_lookups = [self.date_hierarchy] for filter_item in self.list_filter: if isinstance(filter_item, type) and issubclass(filter_item, SimpleListFilter): valid_lookups.append(filter_item.parameter_name) elif isinstance(filter_item, (list, tuple)): valid_lookups.append(filter_item[0]) else: valid_lookups.append(filter_item) return clean_lookup in valid_lookups def to_field_allowed(self, request, to_field): opts = self.model._meta try: field = opts.get_field(to_field) except FieldDoesNotExist: return False # Make sure at least one of the models registered for this site # references this field. registered_models = self.admin_site._registry for related_object in opts.get_all_related_objects(): if (related_object.model in registered_models and field in related_object.field.foreign_related_fields): return True return False def has_add_permission(self, request): """ Returns True if the given request has permission to add an object. Can be overridden by the user in subclasses. """ opts = self.opts codename = get_permission_codename('add', opts) return request.user.has_perm("%s.%s" % (opts.app_label, codename)) def has_change_permission(self, request, obj=None): """ Returns True if the given request has permission to change the given Django model instance, the default implementation doesn't examine the `obj` parameter. Can be overridden by the user in subclasses. In such case it should return True if the given request has permission to change the `obj` model instance. If `obj` is None, this should return True if the given request has permission to change *any* object of the given type. """ opts = self.opts codename = get_permission_codename('change', opts) return request.user.has_perm("%s.%s" % (opts.app_label, codename)) def has_delete_permission(self, request, obj=None): """ Returns True if the given request has permission to change the given Django model instance, the default implementation doesn't examine the `obj` parameter. Can be overridden by the user in subclasses. In such case it should return True if the given request has permission to delete the `obj` model instance. If `obj` is None, this should return True if the given request has permission to delete *any* object of the given type. """ opts = self.opts codename = get_permission_codename('delete', opts) return request.user.has_perm("%s.%s" % (opts.app_label, codename)) @python_2_unicode_compatible class ModelAdmin(BaseModelAdmin): "Encapsulates all admin options and functionality for a given model." list_display = ('__str__',) list_display_links = () list_filter = () list_select_related = False list_per_page = 100 list_max_show_all = 200 list_editable = () search_fields = () date_hierarchy = None save_as = False save_on_top = False paginator = Paginator preserve_filters = True inlines = [] # Custom templates (designed to be over-ridden in subclasses) add_form_template = None change_form_template = None change_list_template = None delete_confirmation_template = None delete_selected_confirmation_template = None object_history_template = None # Actions actions = [] action_form = helpers.ActionForm actions_on_top = True actions_on_bottom = False actions_selection_counter = True # validation # Old, deprecated style: default_validator_class = validation.ModelAdminValidator # New style: checks_class = ModelAdminChecks def __init__(self, model, admin_site): self.model = model self.opts = model._meta self.admin_site = admin_site super(ModelAdmin, self).__init__() def __str__(self): return "%s.%s" % (self.model._meta.app_label, self.__class__.__name__) def get_inline_instances(self, request, obj=None): inline_instances = [] for inline_class in self.inlines: inline = inline_class(self.model, self.admin_site) if request: if not (inline.has_add_permission(request) or inline.has_change_permission(request, obj) or inline.has_delete_permission(request, obj)): continue if not inline.has_add_permission(request): inline.max_num = 0 inline_instances.append(inline) return inline_instances def get_urls(self): from django.conf.urls import patterns, url def wrap(view): def wrapper(*args, **kwargs): return self.admin_site.admin_view(view)(*args, **kwargs) return update_wrapper(wrapper, view) info = self.model._meta.app_label, self.model._meta.model_name urlpatterns = patterns('', url(r'^$', wrap(self.changelist_view), name='%s_%s_changelist' % info), url(r'^add/$', wrap(self.add_view), name='%s_%s_add' % info), url(r'^(.+)/history/$', wrap(self.history_view), name='%s_%s_history' % info), url(r'^(.+)/delete/$', wrap(self.delete_view), name='%s_%s_delete' % info), url(r'^(.+)/$', wrap(self.change_view), name='%s_%s_change' % info), ) return urlpatterns def urls(self): return self.get_urls() urls = property(urls) @property def media(self): extra = '' if settings.DEBUG else '.min' js = [ 'core.js', 'admin/RelatedObjectLookups.js', 'jquery%s.js' % extra, 'jquery.init.js' ] if self.actions is not None: js.append('actions%s.js' % extra) if self.prepopulated_fields: js.extend(['urlify.js', 'prepopulate%s.js' % extra]) return forms.Media(js=[static('admin/js/%s' % url) for url in js]) def get_model_perms(self, request): """ Returns a dict of all perms for this model. This dict has the keys ``add``, ``change``, and ``delete`` mapping to the True/False for each of those actions. """ return { 'add': self.has_add_permission(request), 'change': self.has_change_permission(request), 'delete': self.has_delete_permission(request), } def get_fields(self, request, obj=None): if self.fields: return self.fields form = self.get_form(request, obj, fields=None) return list(form.base_fields) + list(self.get_readonly_fields(request, obj)) def get_form(self, request, obj=None, **kwargs): """ Returns a Form class for use in the admin add view. This is used by add_view and change_view. """ if 'fields' in kwargs: fields = kwargs.pop('fields') else: fields = flatten_fieldsets(self.get_fieldsets(request, obj)) if self.exclude is None: exclude = [] else: exclude = list(self.exclude) exclude.extend(self.get_readonly_fields(request, obj)) if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude: # Take the custom ModelForm's Meta.exclude into account only if the # ModelAdmin doesn't define its own. exclude.extend(self.form._meta.exclude) # if exclude is an empty list we pass None to be consistent with the # default on modelform_factory exclude = exclude or None defaults = { "form": self.form, "fields": fields, "exclude": exclude, "formfield_callback": partial(self.formfield_for_dbfield, request=request), } defaults.update(kwargs) if defaults['fields'] is None and not modelform_defines_fields(defaults['form']): defaults['fields'] = forms.ALL_FIELDS try: return modelform_factory(self.model, **defaults) except FieldError as e: raise FieldError('%s. Check fields/fieldsets/exclude attributes of class %s.' % (e, self.__class__.__name__)) def get_changelist(self, request, **kwargs): """ Returns the ChangeList class for use on the changelist page. """ from django.contrib.admin.views.main import ChangeList return ChangeList def get_object(self, request, object_id): """ Returns an instance matching the primary key provided. ``None`` is returned if no match is found (or the object_id failed validation against the primary key field). """ queryset = self.get_queryset(request) model = queryset.model try: object_id = model._meta.pk.to_python(object_id) return queryset.get(pk=object_id) except (model.DoesNotExist, ValidationError, ValueError): return None def get_changelist_form(self, request, **kwargs): """ Returns a Form class for use in the Formset on the changelist page. """ defaults = { "formfield_callback": partial(self.formfield_for_dbfield, request=request), } defaults.update(kwargs) if (defaults.get('fields') is None and not modelform_defines_fields(defaults.get('form'))): defaults['fields'] = forms.ALL_FIELDS return modelform_factory(self.model, **defaults) def get_changelist_formset(self, request, **kwargs): """ Returns a FormSet class for use on the changelist page if list_editable is used. """ defaults = { "formfield_callback": partial(self.formfield_for_dbfield, request=request), } defaults.update(kwargs) return modelformset_factory(self.model, self.get_changelist_form(request), extra=0, fields=self.list_editable, **defaults) def _get_formsets(self, request, obj): """ Helper function that exists to allow the deprecation warning to be executed while this function continues to return a generator. """ for inline in self.get_inline_instances(request, obj): yield inline.get_formset(request, obj) def get_formsets(self, request, obj=None): warnings.warn( "ModelAdmin.get_formsets() is deprecated and will be removed in " "Django 1.9. Use ModelAdmin.get_formsets_with_inlines() instead.", RemovedInDjango19Warning, stacklevel=2 ) return self._get_formsets(request, obj) def get_formsets_with_inlines(self, request, obj=None): """ Yields formsets and the corresponding inlines. """ # We call get_formsets() [deprecated] and check if it triggers a # warning. If it does, then it's ours and we can safely ignore it, but # if it doesn't then it has been overridden so we must warn about the # deprecation. with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") formsets = self.get_formsets(request, obj) if len(w) != 1 or not issubclass(w[0].category, RemovedInDjango19Warning): warnings.warn( "ModelAdmin.get_formsets() is deprecated and will be removed in " "Django 1.9. Use ModelAdmin.get_formsets_with_inlines() instead.", RemovedInDjango19Warning ) if formsets: zipped = zip(formsets, self.get_inline_instances(request, None)) for formset, inline in zipped: yield formset, inline else: for inline in self.get_inline_instances(request, obj): yield inline.get_formset(request, obj), inline def get_paginator(self, request, queryset, per_page, orphans=0, allow_empty_first_page=True): return self.paginator(queryset, per_page, orphans, allow_empty_first_page) def log_addition(self, request, object): """ Log that an object has been successfully added. The default implementation creates an admin LogEntry object. """ from django.contrib.admin.models import LogEntry, ADDITION LogEntry.objects.log_action( user_id=request.user.pk, content_type_id=get_content_type_for_model(object).pk, object_id=object.pk, object_repr=force_text(object), action_flag=ADDITION ) def log_change(self, request, object, message): """ Log that an object has been successfully changed. The default implementation creates an admin LogEntry object. """ from django.contrib.admin.models import LogEntry, CHANGE LogEntry.objects.log_action( user_id=request.user.pk, content_type_id=get_content_type_for_model(object).pk, object_id=object.pk, object_repr=force_text(object), action_flag=CHANGE, change_message=message ) def log_deletion(self, request, object, object_repr): """ Log that an object will be deleted. Note that this method must be called before the deletion. The default implementation creates an admin LogEntry object. """ from django.contrib.admin.models import LogEntry, DELETION LogEntry.objects.log_action( user_id=request.user.pk, content_type_id=get_content_type_for_model(object).pk, object_id=object.pk, object_repr=object_repr, action_flag=DELETION ) def action_checkbox(self, obj): """ A list_display column containing a checkbox widget. """ return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, force_text(obj.pk)) action_checkbox.short_description = mark_safe('<input type="checkbox" id="action-toggle" />') action_checkbox.allow_tags = True def get_actions(self, request): """ Return a dictionary mapping the names of all actions for this ModelAdmin to a tuple of (callable, name, description) for each action. """ # If self.actions is explicitly set to None that means that we don't # want *any* actions enabled on this page. from django.contrib.admin.views.main import _is_changelist_popup if self.actions is None or _is_changelist_popup(request): return OrderedDict() actions = [] # Gather actions from the admin site first for (name, func) in self.admin_site.actions: description = getattr(func, 'short_description', name.replace('_', ' ')) actions.append((func, name, description)) # Then gather them from the model admin and all parent classes, # starting with self and working back up. for klass in self.__class__.mro()[::-1]: class_actions = getattr(klass, 'actions', []) # Avoid trying to iterate over None if not class_actions: continue actions.extend(self.get_action(action) for action in class_actions) # get_action might have returned None, so filter any of those out. actions = filter(None, actions) # Convert the actions into an OrderedDict keyed by name. actions = OrderedDict( (name, (func, name, desc)) for func, name, desc in actions ) return actions def get_action_choices(self, request, default_choices=BLANK_CHOICE_DASH): """ Return a list of choices for use in a form object. Each choice is a tuple (name, description). """ choices = [] + default_choices for func, name, description in six.itervalues(self.get_actions(request)): choice = (name, description % model_format_dict(self.opts)) choices.append(choice) return choices def get_action(self, action): """ Return a given action from a parameter, which can either be a callable, or the name of a method on the ModelAdmin. Return is a tuple of (callable, name, description). """ # If the action is a callable, just use it. if callable(action): func = action action = action.__name__ # Next, look for a method. Grab it off self.__class__ to get an unbound # method instead of a bound one; this ensures that the calling # conventions are the same for functions and methods. elif hasattr(self.__class__, action): func = getattr(self.__class__, action) # Finally, look for a named method on the admin site else: try: func = self.admin_site.get_action(action) except KeyError: return None if hasattr(func, 'short_description'): description = func.short_description else: description = capfirst(action.replace('_', ' ')) return func, action, description def get_list_display(self, request): """ Return a sequence containing the fields to be displayed on the changelist. """ return self.list_display def get_list_display_links(self, request, list_display): """ Return a sequence containing the fields to be displayed as links on the changelist. The list_display parameter is the list of fields returned by get_list_display(). """ if self.list_display_links or self.list_display_links is None or not list_display: return self.list_display_links else: # Use only the first item in list_display as link return list(list_display)[:1] def get_list_filter(self, request): """ Returns a sequence containing the fields to be displayed as filters in the right sidebar of the changelist page. """ return self.list_filter def get_search_fields(self, request): """ Returns a sequence containing the fields to be searched whenever somebody submits a search query. """ return self.search_fields def get_search_results(self, request, queryset, search_term): """ Returns a tuple containing a queryset to implement the search, and a boolean indicating if the results may contain duplicates. """ # Apply keyword searches. def construct_search(field_name): if field_name.startswith('^'): return "%s__istartswith" % field_name[1:] elif field_name.startswith('='): return "%s__iexact" % field_name[1:] elif field_name.startswith('@'): return "%s__search" % field_name[1:] else: return "%s__icontains" % field_name use_distinct = False search_fields = self.get_search_fields(request) if search_fields and search_term: orm_lookups = [construct_search(str(search_field)) for search_field in search_fields] for bit in search_term.split(): or_queries = [models.Q(**{orm_lookup: bit}) for orm_lookup in orm_lookups] queryset = queryset.filter(reduce(operator.or_, or_queries)) if not use_distinct: for search_spec in orm_lookups: if lookup_needs_distinct(self.opts, search_spec): use_distinct = True break return queryset, use_distinct def get_preserved_filters(self, request): """ Returns the preserved filters querystring. """ match = request.resolver_match if self.preserve_filters and match: opts = self.model._meta current_url = '%s:%s' % (match.app_name, match.url_name) changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name) if current_url == changelist_url: preserved_filters = request.GET.urlencode() else: preserved_filters = request.GET.get('_changelist_filters') if preserved_filters: return urlencode({'_changelist_filters': preserved_filters}) return '' def construct_change_message(self, request, form, formsets): """ Construct a change message from a changed object. """ change_message = [] if form.changed_data: change_message.append(_('Changed %s.') % get_text_list(form.changed_data, _('and'))) if formsets: for formset in formsets: for added_object in formset.new_objects: change_message.append(_('Added %(name)s "%(object)s".') % {'name': force_text(added_object._meta.verbose_name), 'object': force_text(added_object)}) for changed_object, changed_fields in formset.changed_objects: change_message.append(_('Changed %(list)s for %(name)s "%(object)s".') % {'list': get_text_list(changed_fields, _('and')), 'name': force_text(changed_object._meta.verbose_name), 'object': force_text(changed_object)}) for deleted_object in formset.deleted_objects: change_message.append(_('Deleted %(name)s "%(object)s".') % {'name': force_text(deleted_object._meta.verbose_name), 'object': force_text(deleted_object)}) change_message = ' '.join(change_message) return change_message or _('No fields changed.') def message_user(self, request, message, level=messages.INFO, extra_tags='', fail_silently=False): """ Send a message to the user. The default implementation posts a message using the django.contrib.messages backend. Exposes almost the same API as messages.add_message(), but accepts the positional arguments in a different order to maintain backwards compatibility. For convenience, it accepts the `level` argument as a string rather than the usual level number. """ if not isinstance(level, int): # attempt to get the level if passed a string try: level = getattr(messages.constants, level.upper()) except AttributeError: levels = messages.constants.DEFAULT_TAGS.values() levels_repr = ', '.join('`%s`' % l for l in levels) raise ValueError('Bad message level string: `%s`. ' 'Possible values are: %s' % (level, levels_repr)) messages.add_message(request, level, message, extra_tags=extra_tags, fail_silently=fail_silently) def save_form(self, request, form, change): """ Given a ModelForm return an unsaved instance. ``change`` is True if the object is being changed, and False if it's being added. """ return form.save(commit=False) def save_model(self, request, obj, form, change): """ Given a model instance save it to the database. """ obj.save() def delete_model(self, request, obj): """ Given a model instance delete it from the database. """ obj.delete() def save_formset(self, request, form, formset, change): """ Given an inline formset save it to the database. """ formset.save() def save_related(self, request, form, formsets, change): """ Given the ``HttpRequest``, the parent ``ModelForm`` instance, the list of inline formsets and a boolean value based on whether the parent is being added or changed, save the related objects to the database. Note that at this point save_form() and save_model() have already been called. """ form.save_m2m() for formset in formsets: self.save_formset(request, form, formset, change=change) def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None): opts = self.model._meta app_label = opts.app_label preserved_filters = self.get_preserved_filters(request) form_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, form_url) view_on_site_url = self.get_view_on_site_url(obj) context.update({ 'add': add, 'change': change, 'has_add_permission': self.has_add_permission(request), 'has_change_permission': self.has_change_permission(request, obj), 'has_delete_permission': self.has_delete_permission(request, obj), 'has_file_field': True, # FIXME - this should check if form or formsets have a FileField, 'has_absolute_url': view_on_site_url is not None, 'absolute_url': view_on_site_url, 'form_url': form_url, 'opts': opts, 'content_type_id': get_content_type_for_model(self.model).pk, 'save_as': self.save_as, 'save_on_top': self.save_on_top, 'to_field_var': TO_FIELD_VAR, 'is_popup_var': IS_POPUP_VAR, 'app_label': app_label, }) if add and self.add_form_template is not None: form_template = self.add_form_template else: form_template = self.change_form_template return TemplateResponse(request, form_template or [ "admin/%s/%s/change_form.html" % (app_label, opts.model_name), "admin/%s/change_form.html" % app_label, "admin/change_form.html" ], context, current_app=self.admin_site.name) def response_add(self, request, obj, post_url_continue=None): """ Determines the HttpResponse for the add_view stage. """ opts = obj._meta pk_value = obj._get_pk_val() preserved_filters = self.get_preserved_filters(request) msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)} # Here, we distinguish between different save types by checking for # the presence of keys in request.POST. if IS_POPUP_VAR in request.POST: to_field = request.POST.get(TO_FIELD_VAR) if to_field: attr = str(to_field) else: attr = obj._meta.pk.attname value = obj.serializable_value(attr) return SimpleTemplateResponse('admin/popup_response.html', { 'pk_value': escape(pk_value), # for possible backwards-compatibility 'value': escape(value), 'obj': escapejs(obj) }) elif "_continue" in request.POST: msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % msg_dict self.message_user(request, msg, messages.SUCCESS) if post_url_continue is None: post_url_continue = reverse('admin:%s_%s_change' % (opts.app_label, opts.model_name), args=(quote(pk_value),), current_app=self.admin_site.name) post_url_continue = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url_continue) return HttpResponseRedirect(post_url_continue) elif "_addanother" in request.POST: msg = _('The %(name)s "%(obj)s" was added successfully. You may add another %(name)s below.') % msg_dict self.message_user(request, msg, messages.SUCCESS) redirect_url = request.path redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url) return HttpResponseRedirect(redirect_url) else: msg = _('The %(name)s "%(obj)s" was added successfully.') % msg_dict self.message_user(request, msg, messages.SUCCESS) return self.response_post_save_add(request, obj) def response_change(self, request, obj): """ Determines the HttpResponse for the change_view stage. """ opts = self.model._meta pk_value = obj._get_pk_val() preserved_filters = self.get_preserved_filters(request) msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)} if "_continue" in request.POST: msg = _('The %(name)s "%(obj)s" was changed successfully. You may edit it again below.') % msg_dict self.message_user(request, msg, messages.SUCCESS) redirect_url = request.path redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url) return HttpResponseRedirect(redirect_url) elif "_saveasnew" in request.POST: msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % msg_dict self.message_user(request, msg, messages.SUCCESS) redirect_url = reverse('admin:%s_%s_change' % (opts.app_label, opts.model_name), args=(pk_value,), current_app=self.admin_site.name) redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url) return HttpResponseRedirect(redirect_url) elif "_addanother" in request.POST: msg = _('The %(name)s "%(obj)s" was changed successfully. You may add another %(name)s below.') % msg_dict self.message_user(request, msg, messages.SUCCESS) redirect_url = reverse('admin:%s_%s_add' % (opts.app_label, opts.model_name), current_app=self.admin_site.name) redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url) return HttpResponseRedirect(redirect_url) else: msg = _('The %(name)s "%(obj)s" was changed successfully.') % msg_dict self.message_user(request, msg, messages.SUCCESS) return self.response_post_save_change(request, obj) def response_post_save_add(self, request, obj): """ Figure out where to redirect after the 'Save' button has been pressed when adding a new object. """ opts = self.model._meta if self.has_change_permission(request, None): post_url = reverse('admin:%s_%s_changelist' % (opts.app_label, opts.model_name), current_app=self.admin_site.name) preserved_filters = self.get_preserved_filters(request) post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url) else: post_url = reverse('admin:index', current_app=self.admin_site.name) return HttpResponseRedirect(post_url) def response_post_save_change(self, request, obj): """ Figure out where to redirect after the 'Save' button has been pressed when editing an existing object. """ opts = self.model._meta if self.has_change_permission(request, None): post_url = reverse('admin:%s_%s_changelist' % (opts.app_label, opts.model_name), current_app=self.admin_site.name) preserved_filters = self.get_preserved_filters(request) post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url) else: post_url = reverse('admin:index', current_app=self.admin_site.name) return HttpResponseRedirect(post_url) def response_action(self, request, queryset): """ Handle an admin action. This is called if a request is POSTed to the changelist; it returns an HttpResponse if the action was handled, and None otherwise. """ # There can be multiple action forms on the page (at the top # and bottom of the change list, for example). Get the action # whose button was pushed. try: action_index = int(request.POST.get('index', 0)) except ValueError: action_index = 0 # Construct the action form. data = request.POST.copy() data.pop(helpers.ACTION_CHECKBOX_NAME, None) data.pop("index", None) # Use the action whose button was pushed try: data.update({'action': data.getlist('action')[action_index]}) except IndexError: # If we didn't get an action from the chosen form that's invalid # POST data, so by deleting action it'll fail the validation check # below. So no need to do anything here pass action_form = self.action_form(data, auto_id=None) action_form.fields['action'].choices = self.get_action_choices(request) # If the form's valid we can handle the action. if action_form.is_valid(): action = action_form.cleaned_data['action'] select_across = action_form.cleaned_data['select_across'] func = self.get_actions(request)[action][0] # Get the list of selected PKs. If nothing's selected, we can't # perform an action on it, so bail. Except we want to perform # the action explicitly on all objects. selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME) if not selected and not select_across: # Reminder that something needs to be selected or nothing will happen msg = _("Items must be selected in order to perform " "actions on them. No items have been changed.") self.message_user(request, msg, messages.WARNING) return None if not select_across: # Perform the action only on the selected objects queryset = queryset.filter(pk__in=selected) response = func(self, request, queryset) # Actions may return an HttpResponse-like object, which will be # used as the response from the POST. If not, we'll be a good # little HTTP citizen and redirect back to the changelist page. if isinstance(response, HttpResponseBase): return response else: return HttpResponseRedirect(request.get_full_path()) else: msg = _("No action selected.") self.message_user(request, msg, messages.WARNING) return None def response_delete(self, request, obj_display): """ Determines the HttpResponse for the delete_view stage. """ opts = self.model._meta self.message_user(request, _('The %(name)s "%(obj)s" was deleted successfully.') % { 'name': force_text(opts.verbose_name), 'obj': force_text(obj_display) }, messages.SUCCESS) if self.has_change_permission(request, None): post_url = reverse('admin:%s_%s_changelist' % (opts.app_label, opts.model_name), current_app=self.admin_site.name) preserved_filters = self.get_preserved_filters(request) post_url = add_preserved_filters( {'preserved_filters': preserved_filters, 'opts': opts}, post_url ) else: post_url = reverse('admin:index', current_app=self.admin_site.name) return HttpResponseRedirect(post_url) def render_delete_form(self, request, context): opts = self.model._meta app_label = opts.app_label return TemplateResponse(request, self.delete_confirmation_template or [ "admin/{}/{}/delete_confirmation.html".format(app_label, opts.model_name), "admin/{}/delete_confirmation.html".format(app_label), "admin/delete_confirmation.html" ], context, current_app=self.admin_site.name) def get_inline_formsets(self, request, formsets, inline_instances, obj=None): inline_admin_formsets = [] for inline, formset in zip(inline_instances, formsets): fieldsets = list(inline.get_fieldsets(request, obj)) readonly = list(inline.get_readonly_fields(request, obj)) prepopulated = dict(inline.get_prepopulated_fields(request, obj)) inline_admin_formset = helpers.InlineAdminFormSet(inline, formset, fieldsets, prepopulated, readonly, model_admin=self) inline_admin_formsets.append(inline_admin_formset) return inline_admin_formsets def get_changeform_initial_data(self, request): """ Get the initial form data. Unless overridden, this populates from the GET params. """ initial = dict(request.GET.items()) for k in initial: try: f = self.model._meta.get_field(k) except models.FieldDoesNotExist: continue # We have to special-case M2Ms as a list of comma-separated PKs. if isinstance(f, models.ManyToManyField): initial[k] = initial[k].split(",") return initial @csrf_protect_m @transaction.atomic def changeform_view(self, request, object_id=None, form_url='', extra_context=None): to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR)) if to_field and not self.to_field_allowed(request, to_field): raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field) model = self.model opts = model._meta add = object_id is None if add: if not self.has_add_permission(request): raise PermissionDenied obj = None else: obj = self.get_object(request, unquote(object_id)) if not self.has_change_permission(request, obj): raise PermissionDenied if obj is None: raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % { 'name': force_text(opts.verbose_name), 'key': escape(object_id)}) if request.method == 'POST' and "_saveasnew" in request.POST: return self.add_view(request, form_url=reverse('admin:%s_%s_add' % ( opts.app_label, opts.model_name), current_app=self.admin_site.name)) ModelForm = self.get_form(request, obj) if request.method == 'POST': form = ModelForm(request.POST, request.FILES, instance=obj) if form.is_valid(): form_validated = True new_object = self.save_form(request, form, change=not add) else: form_validated = False new_object = form.instance formsets, inline_instances = self._create_formsets(request, new_object) if all_valid(formsets) and form_validated: self.save_model(request, new_object, form, not add) self.save_related(request, form, formsets, not add) if add: self.log_addition(request, new_object) return self.response_add(request, new_object) else: change_message = self.construct_change_message(request, form, formsets) self.log_change(request, new_object, change_message) return self.response_change(request, new_object) else: if add: initial = self.get_changeform_initial_data(request) form = ModelForm(initial=initial) formsets, inline_instances = self._create_formsets(request, self.model()) else: form = ModelForm(instance=obj) formsets, inline_instances = self._create_formsets(request, obj) adminForm = helpers.AdminForm( form, list(self.get_fieldsets(request, obj)), self.get_prepopulated_fields(request, obj), self.get_readonly_fields(request, obj), model_admin=self) media = self.media + adminForm.media inline_formsets = self.get_inline_formsets(request, formsets, inline_instances, obj) for inline_formset in inline_formsets: media = media + inline_formset.media context = dict(self.admin_site.each_context(), title=(_('Add %s') if add else _('Change %s')) % force_text(opts.verbose_name), adminform=adminForm, object_id=object_id, original=obj, is_popup=(IS_POPUP_VAR in request.POST or IS_POPUP_VAR in request.GET), to_field=to_field, media=media, inline_admin_formsets=inline_formsets, errors=helpers.AdminErrorList(form, formsets), preserved_filters=self.get_preserved_filters(request), ) context.update(extra_context or {}) return self.render_change_form(request, context, add=add, change=not add, obj=obj, form_url=form_url) def add_view(self, request, form_url='', extra_context=None): return self.changeform_view(request, None, form_url, extra_context) def change_view(self, request, object_id, form_url='', extra_context=None): return self.changeform_view(request, object_id, form_url, extra_context) @csrf_protect_m def changelist_view(self, request, extra_context=None): """ The 'change list' admin view for this model. """ from django.contrib.admin.views.main import ERROR_FLAG opts = self.model._meta app_label = opts.app_label if not self.has_change_permission(request, None): raise PermissionDenied list_display = self.get_list_display(request) list_display_links = self.get_list_display_links(request, list_display) list_filter = self.get_list_filter(request) search_fields = self.get_search_fields(request) # Check actions to see if any are available on this changelist actions = self.get_actions(request) if actions: # Add the action checkboxes if there are any actions available. list_display = ['action_checkbox'] + list(list_display) ChangeList = self.get_changelist(request) try: cl = ChangeList(request, self.model, list_display, list_display_links, list_filter, self.date_hierarchy, search_fields, self.list_select_related, self.list_per_page, self.list_max_show_all, self.list_editable, self) except IncorrectLookupParameters: # Wacky lookup parameters were given, so redirect to the main # changelist page, without parameters, and pass an 'invalid=1' # parameter via the query string. If wacky parameters were given # and the 'invalid=1' parameter was already in the query string, # something is screwed up with the database, so display an error # page. if ERROR_FLAG in request.GET.keys(): return SimpleTemplateResponse('admin/invalid_setup.html', { 'title': _('Database error'), }) return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1') # If the request was POSTed, this might be a bulk action or a bulk # edit. Try to look up an action or confirmation first, but if this # isn't an action the POST will fall through to the bulk edit check, # below. action_failed = False selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME) # Actions with no confirmation if (actions and request.method == 'POST' and 'index' in request.POST and '_save' not in request.POST): if selected: response = self.response_action(request, queryset=cl.get_queryset(request)) if response: return response else: action_failed = True else: msg = _("Items must be selected in order to perform " "actions on them. No items have been changed.") self.message_user(request, msg, messages.WARNING) action_failed = True # Actions with confirmation if (actions and request.method == 'POST' and helpers.ACTION_CHECKBOX_NAME in request.POST and 'index' not in request.POST and '_save' not in request.POST): if selected: response = self.response_action(request, queryset=cl.get_queryset(request)) if response: return response else: action_failed = True # If we're allowing changelist editing, we need to construct a formset # for the changelist given all the fields to be edited. Then we'll # use the formset to validate/process POSTed data. formset = cl.formset = None # Handle POSTed bulk-edit data. if (request.method == "POST" and cl.list_editable and '_save' in request.POST and not action_failed): FormSet = self.get_changelist_formset(request) formset = cl.formset = FormSet(request.POST, request.FILES, queryset=cl.result_list) if formset.is_valid(): changecount = 0 for form in formset.forms: if form.has_changed(): obj = self.save_form(request, form, change=True) self.save_model(request, obj, form, change=True) self.save_related(request, form, formsets=[], change=True) change_msg = self.construct_change_message(request, form, None) self.log_change(request, obj, change_msg) changecount += 1 if changecount: if changecount == 1: name = force_text(opts.verbose_name) else: name = force_text(opts.verbose_name_plural) msg = ungettext("%(count)s %(name)s was changed successfully.", "%(count)s %(name)s were changed successfully.", changecount) % {'count': changecount, 'name': name, 'obj': force_text(obj)} self.message_user(request, msg, messages.SUCCESS) return HttpResponseRedirect(request.get_full_path()) # Handle GET -- construct a formset for display. elif cl.list_editable: FormSet = self.get_changelist_formset(request) formset = cl.formset = FormSet(queryset=cl.result_list) # Build the list of media to be used by the formset. if formset: media = self.media + formset.media else: media = self.media # Build the action form and populate it with available actions. if actions: action_form = self.action_form(auto_id=None) action_form.fields['action'].choices = self.get_action_choices(request) else: action_form = None selection_note_all = ungettext('%(total_count)s selected', 'All %(total_count)s selected', cl.result_count) context = dict( self.admin_site.each_context(), module_name=force_text(opts.verbose_name_plural), selection_note=_('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)}, selection_note_all=selection_note_all % {'total_count': cl.result_count}, title=cl.title, is_popup=cl.is_popup, to_field=cl.to_field, cl=cl, media=media, has_add_permission=self.has_add_permission(request), opts=cl.opts, action_form=action_form, actions_on_top=self.actions_on_top, actions_on_bottom=self.actions_on_bottom, actions_selection_counter=self.actions_selection_counter, preserved_filters=self.get_preserved_filters(request), ) context.update(extra_context or {}) return TemplateResponse(request, self.change_list_template or [ 'admin/%s/%s/change_list.html' % (app_label, opts.model_name), 'admin/%s/change_list.html' % app_label, 'admin/change_list.html' ], context, current_app=self.admin_site.name) @csrf_protect_m @transaction.atomic def delete_view(self, request, object_id, extra_context=None): "The 'delete' admin view for this model." opts = self.model._meta app_label = opts.app_label obj = self.get_object(request, unquote(object_id)) if not self.has_delete_permission(request, obj): raise PermissionDenied if obj is None: raise Http404( _('%(name)s object with primary key %(key)r does not exist.') % {'name': force_text(opts.verbose_name), 'key': escape(object_id)} ) using = router.db_for_write(self.model) # Populate deleted_objects, a data structure of all related objects that # will also be deleted. (deleted_objects, perms_needed, protected) = get_deleted_objects( [obj], opts, request.user, self.admin_site, using) if request.POST: # The user has already confirmed the deletion. if perms_needed: raise PermissionDenied obj_display = force_text(obj) self.log_deletion(request, obj, obj_display) self.delete_model(request, obj) return self.response_delete(request, obj_display) object_name = force_text(opts.verbose_name) if perms_needed or protected: title = _("Cannot delete %(name)s") % {"name": object_name} else: title = _("Are you sure?") context = dict( self.admin_site.each_context(), title=title, object_name=object_name, object=obj, deleted_objects=deleted_objects, perms_lacking=perms_needed, protected=protected, opts=opts, app_label=app_label, preserved_filters=self.get_preserved_filters(request), ) context.update(extra_context or {}) return self.render_delete_form(request, context) def history_view(self, request, object_id, extra_context=None): "The 'history' admin view for this model." from django.contrib.admin.models import LogEntry # First check if the user can see this history. model = self.model obj = get_object_or_404(self.get_queryset(request), pk=unquote(object_id)) if not self.has_change_permission(request, obj): raise PermissionDenied # Then get the history for this object. opts = model._meta app_label = opts.app_label action_list = LogEntry.objects.filter( object_id=unquote(object_id), content_type=get_content_type_for_model(model) ).select_related().order_by('action_time') context = dict(self.admin_site.each_context(), title=_('Change history: %s') % force_text(obj), action_list=action_list, module_name=capfirst(force_text(opts.verbose_name_plural)), object=obj, opts=opts, preserved_filters=self.get_preserved_filters(request), ) context.update(extra_context or {}) return TemplateResponse(request, self.object_history_template or [ "admin/%s/%s/object_history.html" % (app_label, opts.model_name), "admin/%s/object_history.html" % app_label, "admin/object_history.html" ], context, current_app=self.admin_site.name) def _create_formsets(self, request, obj): "Helper function to generate formsets for add/change_view." formsets = [] inline_instances = [] prefixes = {} get_formsets_args = [request] if obj.pk: get_formsets_args.append(obj) for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args): prefix = FormSet.get_default_prefix() prefixes[prefix] = prefixes.get(prefix, 0) + 1 if prefixes[prefix] != 1 or not prefix: prefix = "%s-%s" % (prefix, prefixes[prefix]) formset_params = { 'instance': obj, 'prefix': prefix, 'queryset': inline.get_queryset(request), } if request.method == 'POST': formset_params.update({ 'data': request.POST, 'files': request.FILES, 'save_as_new': '_saveasnew' in request.POST }) formsets.append(FormSet(**formset_params)) inline_instances.append(inline) return formsets, inline_instances class InlineModelAdmin(BaseModelAdmin): """ Options for inline editing of ``model`` instances. Provide ``fk_name`` to specify the attribute name of the ``ForeignKey`` from ``model`` to its parent. This is required if ``model`` has more than one ``ForeignKey`` to its parent. """ model = None fk_name = None formset = BaseInlineFormSet extra = 3 min_num = None max_num = None template = None verbose_name = None verbose_name_plural = None can_delete = True checks_class = InlineModelAdminChecks def __init__(self, parent_model, admin_site): self.admin_site = admin_site self.parent_model = parent_model self.opts = self.model._meta super(InlineModelAdmin, self).__init__() if self.verbose_name is None: self.verbose_name = self.model._meta.verbose_name if self.verbose_name_plural is None: self.verbose_name_plural = self.model._meta.verbose_name_plural @property def media(self): extra = '' if settings.DEBUG else '.min' js = ['jquery%s.js' % extra, 'jquery.init.js', 'inlines%s.js' % extra] if self.prepopulated_fields: js.extend(['urlify.js', 'prepopulate%s.js' % extra]) if self.filter_vertical or self.filter_horizontal: js.extend(['SelectBox.js', 'SelectFilter2.js']) return forms.Media(js=[static('admin/js/%s' % url) for url in js]) def get_extra(self, request, obj=None, **kwargs): """Hook for customizing the number of extra inline forms.""" return self.extra def get_min_num(self, request, obj=None, **kwargs): """Hook for customizing the min number of inline forms.""" return self.min_num def get_max_num(self, request, obj=None, **kwargs): """Hook for customizing the max number of extra inline forms.""" return self.max_num def get_formset(self, request, obj=None, **kwargs): """Returns a BaseInlineFormSet class for use in admin add/change views.""" if 'fields' in kwargs: fields = kwargs.pop('fields') else: fields = flatten_fieldsets(self.get_fieldsets(request, obj)) if self.exclude is None: exclude = [] else: exclude = list(self.exclude) exclude.extend(self.get_readonly_fields(request, obj)) if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude: # Take the custom ModelForm's Meta.exclude into account only if the # InlineModelAdmin doesn't define its own. exclude.extend(self.form._meta.exclude) # If exclude is an empty list we use None, since that's the actual # default. exclude = exclude or None can_delete = self.can_delete and self.has_delete_permission(request, obj) defaults = { "form": self.form, "formset": self.formset, "fk_name": self.fk_name, "fields": fields, "exclude": exclude, "formfield_callback": partial(self.formfield_for_dbfield, request=request), "extra": self.get_extra(request, obj, **kwargs), "min_num": self.get_min_num(request, obj, **kwargs), "max_num": self.get_max_num(request, obj, **kwargs), "can_delete": can_delete, } defaults.update(kwargs) base_model_form = defaults['form'] class DeleteProtectedModelForm(base_model_form): def hand_clean_DELETE(self): """ We don't validate the 'DELETE' field itself because on templates it's not rendered using the field information, but just using a generic "deletion_field" of the InlineModelAdmin. """ if self.cleaned_data.get(DELETION_FIELD_NAME, False): using = router.db_for_write(self._meta.model) collector = NestedObjects(using=using) collector.collect([self.instance]) if collector.protected: objs = [] for p in collector.protected: objs.append( # Translators: Model verbose name and instance representation, suitable to be an item in a list _('%(class_name)s %(instance)s') % { 'class_name': p._meta.verbose_name, 'instance': p} ) params = {'class_name': self._meta.model._meta.verbose_name, 'instance': self.instance, 'related_objects': get_text_list(objs, _('and'))} msg = _("Deleting %(class_name)s %(instance)s would require " "deleting the following protected related objects: " "%(related_objects)s") raise ValidationError(msg, code='deleting_protected', params=params) def is_valid(self): result = super(DeleteProtectedModelForm, self).is_valid() self.hand_clean_DELETE() return result defaults['form'] = DeleteProtectedModelForm if defaults['fields'] is None and not modelform_defines_fields(defaults['form']): defaults['fields'] = forms.ALL_FIELDS return inlineformset_factory(self.parent_model, self.model, **defaults) def get_fields(self, request, obj=None): if self.fields: return self.fields form = self.get_formset(request, obj, fields=None).form return list(form.base_fields) + list(self.get_readonly_fields(request, obj)) def get_queryset(self, request): queryset = super(InlineModelAdmin, self).get_queryset(request) if not self.has_change_permission(request): queryset = queryset.none() return queryset def has_add_permission(self, request): if self.opts.auto_created: # We're checking the rights to an auto-created intermediate model, # which doesn't have its own individual permissions. The user needs # to have the change permission for the related model in order to # be able to do anything with the intermediate model. return self.has_change_permission(request) return super(InlineModelAdmin, self).has_add_permission(request) def has_change_permission(self, request, obj=None): opts = self.opts if opts.auto_created: # The model was auto-created as intermediary for a # ManyToMany-relationship, find the target model for field in opts.fields: if field.rel and field.rel.to != self.parent_model: opts = field.rel.to._meta break codename = get_permission_codename('change', opts) return request.user.has_perm("%s.%s" % (opts.app_label, codename)) def has_delete_permission(self, request, obj=None): if self.opts.auto_created: # We're checking the rights to an auto-created intermediate model, # which doesn't have its own individual permissions. The user needs # to have the change permission for the related model in order to # be able to do anything with the intermediate model. return self.has_change_permission(request, obj) return super(InlineModelAdmin, self).has_delete_permission(request, obj) class StackedInline(InlineModelAdmin): template = 'admin/edit_inline/stacked.html' class TabularInline(InlineModelAdmin): template = 'admin/edit_inline/tabular.html'
./CrossVul/dataset_final_sorted/CWE-264/py/good_2042_1
crossvul-python_data_good_3634_4
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of SQLAlchemy backend.""" import datetime import functools import re import warnings from nova import block_device from nova import db from nova import exception from nova import flags from nova import utils from nova import log as logging from nova.compute import aggregate_states from nova.compute import vm_states from nova.db.sqlalchemy import models from nova.db.sqlalchemy.session import get_session from sqlalchemy import and_ from sqlalchemy import or_ from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import joinedload from sqlalchemy.orm import joinedload_all from sqlalchemy.sql import func from sqlalchemy.sql.expression import asc from sqlalchemy.sql.expression import desc from sqlalchemy.sql.expression import literal_column FLAGS = flags.FLAGS flags.DECLARE('reserved_host_disk_mb', 'nova.scheduler.host_manager') flags.DECLARE('reserved_host_memory_mb', 'nova.scheduler.host_manager') LOG = logging.getLogger(__name__) def is_admin_context(context): """Indicates if the request context is an administrator.""" if not context: warnings.warn(_('Use of empty request context is deprecated'), DeprecationWarning) raise Exception('die') return context.is_admin def is_user_context(context): """Indicates if the request context is a normal user.""" if not context: return False if context.is_admin: return False if not context.user_id or not context.project_id: return False return True def authorize_project_context(context, project_id): """Ensures a request has permission to access the given project.""" if is_user_context(context): if not context.project_id: raise exception.NotAuthorized() elif context.project_id != project_id: raise exception.NotAuthorized() def authorize_user_context(context, user_id): """Ensures a request has permission to access the given user.""" if is_user_context(context): if not context.user_id: raise exception.NotAuthorized() elif context.user_id != user_id: raise exception.NotAuthorized() def require_admin_context(f): """Decorator to require admin request context. The first argument to the wrapped function must be the context. """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]): raise exception.AdminRequired() return f(*args, **kwargs) return wrapper def require_context(f): """Decorator to require *any* user or admin context. This does no authorization for user or project access matching, see :py:func:`authorize_project_context` and :py:func:`authorize_user_context`. The first argument to the wrapped function must be the context. """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]) and not is_user_context(args[0]): raise exception.NotAuthorized() return f(*args, **kwargs) return wrapper def require_instance_exists(f): """Decorator to require the specified instance to exist. Requires the wrapped function to use context and instance_id as their first two arguments. """ def wrapper(context, instance_id, *args, **kwargs): db.instance_get(context, instance_id) return f(context, instance_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper def require_volume_exists(f): """Decorator to require the specified volume to exist. Requires the wrapped function to use context and volume_id as their first two arguments. """ def wrapper(context, volume_id, *args, **kwargs): db.volume_get(context, volume_id) return f(context, volume_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper def require_aggregate_exists(f): """Decorator to require the specified aggregate to exist. Requires the wrapped function to use context and aggregate_id as their first two arguments. """ @functools.wraps(f) def wrapper(context, aggregate_id, *args, **kwargs): db.aggregate_get(context, aggregate_id) return f(context, aggregate_id, *args, **kwargs) return wrapper def model_query(context, *args, **kwargs): """Query helper that accounts for context's `read_deleted` field. :param context: context to query under :param session: if present, the session to use :param read_deleted: if present, overrides context's read_deleted field. :param project_only: if present and context is user-type, then restrict query to match the context's project_id. """ session = kwargs.get('session') or get_session() read_deleted = kwargs.get('read_deleted') or context.read_deleted project_only = kwargs.get('project_only') query = session.query(*args) if read_deleted == 'no': query = query.filter_by(deleted=False) elif read_deleted == 'yes': pass # omit the filter to include deleted and active elif read_deleted == 'only': query = query.filter_by(deleted=True) else: raise Exception( _("Unrecognized read_deleted value '%s'") % read_deleted) if project_only and is_user_context(context): query = query.filter_by(project_id=context.project_id) return query def exact_filter(query, model, filters, legal_keys): """Applies exact match filtering to a query. Returns the updated query. Modifies filters argument to remove filters consumed. :param query: query to apply filters to :param model: model object the query applies to, for IN-style filtering :param filters: dictionary of filters; values that are lists, tuples, sets, or frozensets cause an 'IN' test to be performed, while exact matching ('==' operator) is used for other values :param legal_keys: list of keys to apply exact filtering to """ filter_dict = {} # Walk through all the keys for key in legal_keys: # Skip ones we're not filtering on if key not in filters: continue # OK, filtering on this key; what value do we search for? value = filters.pop(key) if isinstance(value, (list, tuple, set, frozenset)): # Looking for values in a list; apply to query directly column_attr = getattr(model, key) query = query.filter(column_attr.in_(value)) else: # OK, simple exact match; save for later filter_dict[key] = value # Apply simple exact matches if filter_dict: query = query.filter_by(**filter_dict) return query ################### @require_admin_context def service_destroy(context, service_id): session = get_session() with session.begin(): service_ref = service_get(context, service_id, session=session) service_ref.delete(session=session) if service_ref.topic == 'compute' and service_ref.compute_node: for c in service_ref.compute_node: c.delete(session=session) @require_admin_context def service_get(context, service_id, session=None): result = model_query(context, models.Service, session=session).\ options(joinedload('compute_node')).\ filter_by(id=service_id).\ first() if not result: raise exception.ServiceNotFound(service_id=service_id) return result @require_admin_context def service_get_all(context, disabled=None): query = model_query(context, models.Service) if disabled is not None: query = query.filter_by(disabled=disabled) return query.all() @require_admin_context def service_get_all_by_topic(context, topic): return model_query(context, models.Service, read_deleted="no").\ filter_by(disabled=False).\ filter_by(topic=topic).\ all() @require_admin_context def service_get_by_host_and_topic(context, host, topic): return model_query(context, models.Service, read_deleted="no").\ filter_by(disabled=False).\ filter_by(host=host).\ filter_by(topic=topic).\ first() @require_admin_context def service_get_all_by_host(context, host): return model_query(context, models.Service, read_deleted="no").\ filter_by(host=host).\ all() @require_admin_context def service_get_all_compute_by_host(context, host): result = model_query(context, models.Service, read_deleted="no").\ options(joinedload('compute_node')).\ filter_by(host=host).\ filter_by(topic="compute").\ all() if not result: raise exception.ComputeHostNotFound(host=host) return result @require_admin_context def _service_get_all_topic_subquery(context, session, topic, subq, label): sort_value = getattr(subq.c, label) return model_query(context, models.Service, func.coalesce(sort_value, 0), session=session, read_deleted="no").\ filter_by(topic=topic).\ filter_by(disabled=False).\ outerjoin((subq, models.Service.host == subq.c.host)).\ order_by(sort_value).\ all() @require_admin_context def service_get_all_compute_sorted(context): session = get_session() with session.begin(): # NOTE(vish): The intended query is below # SELECT services.*, COALESCE(inst_cores.instance_cores, # 0) # FROM services LEFT OUTER JOIN # (SELECT host, SUM(instances.vcpus) AS instance_cores # FROM instances GROUP BY host) AS inst_cores # ON services.host = inst_cores.host topic = 'compute' label = 'instance_cores' subq = model_query(context, models.Instance.host, func.sum(models.Instance.vcpus).label(label), session=session, read_deleted="no").\ group_by(models.Instance.host).\ subquery() return _service_get_all_topic_subquery(context, session, topic, subq, label) @require_admin_context def service_get_all_volume_sorted(context): session = get_session() with session.begin(): topic = 'volume' label = 'volume_gigabytes' subq = model_query(context, models.Volume.host, func.sum(models.Volume.size).label(label), session=session, read_deleted="no").\ group_by(models.Volume.host).\ subquery() return _service_get_all_topic_subquery(context, session, topic, subq, label) @require_admin_context def service_get_by_args(context, host, binary): result = model_query(context, models.Service).\ filter_by(host=host).\ filter_by(binary=binary).\ first() if not result: raise exception.HostBinaryNotFound(host=host, binary=binary) return result @require_admin_context def service_create(context, values): service_ref = models.Service() service_ref.update(values) if not FLAGS.enable_new_services: service_ref.disabled = True service_ref.save() return service_ref @require_admin_context def service_update(context, service_id, values): session = get_session() with session.begin(): service_ref = service_get(context, service_id, session=session) service_ref.update(values) service_ref.save(session=session) ################### @require_admin_context def compute_node_get(context, compute_id, session=None): result = model_query(context, models.ComputeNode, session=session).\ filter_by(id=compute_id).\ first() if not result: raise exception.ComputeHostNotFound(host=compute_id) return result @require_admin_context def compute_node_get_all(context, session=None): return model_query(context, models.ComputeNode, session=session).\ options(joinedload('service')).\ all() def _get_host_utilization(context, host, ram_mb, disk_gb): """Compute the current utilization of a given host.""" instances = instance_get_all_by_host(context, host) vms = len(instances) free_ram_mb = ram_mb - FLAGS.reserved_host_memory_mb free_disk_gb = disk_gb - (FLAGS.reserved_host_disk_mb * 1024) work = 0 for instance in instances: free_ram_mb -= instance.memory_mb free_disk_gb -= instance.root_gb free_disk_gb -= instance.ephemeral_gb if instance.vm_state in [vm_states.BUILDING, vm_states.REBUILDING, vm_states.MIGRATING, vm_states.RESIZING]: work += 1 return dict(free_ram_mb=free_ram_mb, free_disk_gb=free_disk_gb, current_workload=work, running_vms=vms) def _adjust_compute_node_values_for_utilization(context, values, session): service_ref = service_get(context, values['service_id'], session=session) host = service_ref['host'] ram_mb = values['memory_mb'] disk_gb = values['local_gb'] values.update(_get_host_utilization(context, host, ram_mb, disk_gb)) @require_admin_context def compute_node_create(context, values, session=None): """Creates a new ComputeNode and populates the capacity fields with the most recent data.""" if not session: session = get_session() _adjust_compute_node_values_for_utilization(context, values, session) with session.begin(subtransactions=True): compute_node_ref = models.ComputeNode() session.add(compute_node_ref) compute_node_ref.update(values) return compute_node_ref @require_admin_context def compute_node_update(context, compute_id, values, auto_adjust): """Creates a new ComputeNode and populates the capacity fields with the most recent data.""" session = get_session() if auto_adjust: _adjust_compute_node_values_for_utilization(context, values, session) with session.begin(subtransactions=True): compute_ref = compute_node_get(context, compute_id, session=session) compute_ref.update(values) compute_ref.save(session=session) def compute_node_get_by_host(context, host): """Get all capacity entries for the given host.""" session = get_session() with session.begin(): node = session.query(models.ComputeNode).\ options(joinedload('service')).\ filter(models.Service.host == host).\ filter_by(deleted=False) return node.first() def compute_node_utilization_update(context, host, free_ram_mb_delta=0, free_disk_gb_delta=0, work_delta=0, vm_delta=0): """Update a specific ComputeNode entry by a series of deltas. Do this as a single atomic action and lock the row for the duration of the operation. Requires that ComputeNode record exist.""" session = get_session() compute_node = None with session.begin(subtransactions=True): compute_node = session.query(models.ComputeNode).\ options(joinedload('service')).\ filter(models.Service.host == host).\ filter_by(deleted=False).\ with_lockmode('update').\ first() if compute_node is None: raise exception.NotFound(_("No ComputeNode for %(host)s") % locals()) # This table thingy is how we get atomic UPDATE x = x + 1 # semantics. table = models.ComputeNode.__table__ if free_ram_mb_delta != 0: compute_node.free_ram_mb = table.c.free_ram_mb + free_ram_mb_delta if free_disk_gb_delta != 0: compute_node.free_disk_gb = (table.c.free_disk_gb + free_disk_gb_delta) if work_delta != 0: compute_node.current_workload = (table.c.current_workload + work_delta) if vm_delta != 0: compute_node.running_vms = table.c.running_vms + vm_delta return compute_node def compute_node_utilization_set(context, host, free_ram_mb=None, free_disk_gb=None, work=None, vms=None): """Like compute_node_utilization_update() modify a specific host entry. But this function will set the metrics absolutely (vs. a delta update). """ session = get_session() compute_node = None with session.begin(subtransactions=True): compute_node = session.query(models.ComputeNode).\ options(joinedload('service')).\ filter(models.Service.host == host).\ filter_by(deleted=False).\ with_lockmode('update').\ first() if compute_node is None: raise exception.NotFound(_("No ComputeNode for %(host)s") % locals()) if free_ram_mb != None: compute_node.free_ram_mb = free_ram_mb if free_disk_gb != None: compute_node.free_disk_gb = free_disk_gb if work != None: compute_node.current_workload = work if vms != None: compute_node.running_vms = vms return compute_node ################### @require_admin_context def certificate_get(context, certificate_id, session=None): result = model_query(context, models.Certificate, session=session).\ filter_by(id=certificate_id).\ first() if not result: raise exception.CertificateNotFound(certificate_id=certificate_id) return result @require_admin_context def certificate_create(context, values): certificate_ref = models.Certificate() for (key, value) in values.iteritems(): certificate_ref[key] = value certificate_ref.save() return certificate_ref @require_admin_context def certificate_get_all_by_project(context, project_id): return model_query(context, models.Certificate, read_deleted="no").\ filter_by(project_id=project_id).\ all() @require_admin_context def certificate_get_all_by_user(context, user_id): return model_query(context, models.Certificate, read_deleted="no").\ filter_by(user_id=user_id).\ all() @require_admin_context def certificate_get_all_by_user_and_project(context, user_id, project_id): return model_query(context, models.Certificate, read_deleted="no").\ filter_by(user_id=user_id).\ filter_by(project_id=project_id).\ all() ################### @require_context def floating_ip_get(context, id): result = model_query(context, models.FloatingIp, project_only=True).\ filter_by(id=id).\ first() if not result: raise exception.FloatingIpNotFound(id=id) return result @require_context def floating_ip_get_pools(context): session = get_session() pools = [] for result in session.query(models.FloatingIp.pool).distinct(): pools.append({'name': result[0]}) return pools @require_context def floating_ip_allocate_address(context, project_id, pool): authorize_project_context(context, project_id) session = get_session() with session.begin(): floating_ip_ref = model_query(context, models.FloatingIp, session=session, read_deleted="no").\ filter_by(fixed_ip_id=None).\ filter_by(project_id=None).\ filter_by(pool=pool).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not floating_ip_ref: raise exception.NoMoreFloatingIps() floating_ip_ref['project_id'] = project_id session.add(floating_ip_ref) return floating_ip_ref['address'] @require_context def floating_ip_create(context, values): floating_ip_ref = models.FloatingIp() floating_ip_ref.update(values) floating_ip_ref.save() return floating_ip_ref['address'] @require_context def floating_ip_count_by_project(context, project_id): authorize_project_context(context, project_id) # TODO(tr3buchet): why leave auto_assigned floating IPs out? return model_query(context, models.FloatingIp, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(auto_assigned=False).\ count() @require_context def floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, floating_address, session=session) fixed_ip_ref = fixed_ip_get_by_address(context, fixed_address, session=session) floating_ip_ref.fixed_ip_id = fixed_ip_ref["id"] floating_ip_ref.host = host floating_ip_ref.save(session=session) @require_context def floating_ip_deallocate(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) floating_ip_ref['project_id'] = None floating_ip_ref['host'] = None floating_ip_ref['auto_assigned'] = False floating_ip_ref.save(session=session) @require_context def floating_ip_destroy(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) floating_ip_ref.delete(session=session) @require_context def floating_ip_disassociate(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) fixed_ip_ref = fixed_ip_get(context, floating_ip_ref['fixed_ip_id']) if fixed_ip_ref: fixed_ip_address = fixed_ip_ref['address'] else: fixed_ip_address = None floating_ip_ref.fixed_ip_id = None floating_ip_ref.host = None floating_ip_ref.save(session=session) return fixed_ip_address @require_context def floating_ip_set_auto_assigned(context, address): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session=session) floating_ip_ref.auto_assigned = True floating_ip_ref.save(session=session) def _floating_ip_get_all(context): return model_query(context, models.FloatingIp, read_deleted="no") @require_admin_context def floating_ip_get_all(context): floating_ip_refs = _floating_ip_get_all(context).all() if not floating_ip_refs: raise exception.NoFloatingIpsDefined() return floating_ip_refs @require_admin_context def floating_ip_get_all_by_host(context, host): floating_ip_refs = _floating_ip_get_all(context).\ filter_by(host=host).\ all() if not floating_ip_refs: raise exception.FloatingIpNotFoundForHost(host=host) return floating_ip_refs @require_context def floating_ip_get_all_by_project(context, project_id): authorize_project_context(context, project_id) # TODO(tr3buchet): why do we not want auto_assigned floating IPs here? return _floating_ip_get_all(context).\ filter_by(project_id=project_id).\ filter_by(auto_assigned=False).\ all() @require_context def floating_ip_get_by_address(context, address, session=None): result = model_query(context, models.FloatingIp, session=session).\ filter_by(address=address).\ first() if not result: raise exception.FloatingIpNotFoundForAddress(address=address) # If the floating IP has a project ID set, check to make sure # the non-admin user has access. if result.project_id and is_user_context(context): authorize_project_context(context, result.project_id) return result @require_context def floating_ip_get_by_fixed_address(context, fixed_address, session=None): if not session: session = get_session() fixed_ip = fixed_ip_get_by_address(context, fixed_address, session) fixed_ip_id = fixed_ip['id'] return model_query(context, models.FloatingIp, session=session).\ filter_by(fixed_ip_id=fixed_ip_id).\ all() # NOTE(tr3buchet) please don't invent an exception here, empty list is fine @require_context def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id, session=None): if not session: session = get_session() return model_query(context, models.FloatingIp, session=session).\ filter_by(fixed_ip_id=fixed_ip_id).\ all() @require_context def floating_ip_update(context, address, values): session = get_session() with session.begin(): floating_ip_ref = floating_ip_get_by_address(context, address, session) for (key, value) in values.iteritems(): floating_ip_ref[key] = value floating_ip_ref.save(session=session) @require_context def _dnsdomain_get(context, session, fqdomain): return model_query(context, models.DNSDomain, session=session, read_deleted="no").\ filter_by(domain=fqdomain).\ with_lockmode('update').\ first() @require_context def dnsdomain_get(context, fqdomain): session = get_session() with session.begin(): return _dnsdomain_get(context, session, fqdomain) @require_admin_context def _dnsdomain_get_or_create(context, session, fqdomain): domain_ref = _dnsdomain_get(context, session, fqdomain) if not domain_ref: dns_ref = models.DNSDomain() dns_ref.update({'domain': fqdomain, 'availability_zone': None, 'project_id': None}) return dns_ref return domain_ref @require_admin_context def dnsdomain_register_for_zone(context, fqdomain, zone): session = get_session() with session.begin(): domain_ref = _dnsdomain_get_or_create(context, session, fqdomain) domain_ref.scope = 'private' domain_ref.availability_zone = zone domain_ref.save(session=session) @require_admin_context def dnsdomain_register_for_project(context, fqdomain, project): session = get_session() with session.begin(): domain_ref = _dnsdomain_get_or_create(context, session, fqdomain) domain_ref.scope = 'public' domain_ref.project_id = project domain_ref.save(session=session) @require_admin_context def dnsdomain_unregister(context, fqdomain): session = get_session() with session.begin(): session.query(models.DNSDomain).\ filter_by(domain=fqdomain).\ delete() @require_context def dnsdomain_list(context): session = get_session() records = model_query(context, models.DNSDomain, session=session, read_deleted="no").\ with_lockmode('update').all() domains = [] for record in records: domains.append(record.domain) return domains ################### @require_admin_context def fixed_ip_associate(context, address, instance_id, network_id=None, reserved=False): """Keyword arguments: reserved -- should be a boolean value(True or False), exact value will be used to filter on the fixed ip address """ session = get_session() with session.begin(): network_or_none = or_(models.FixedIp.network_id == network_id, models.FixedIp.network_id == None) fixed_ip_ref = model_query(context, models.FixedIp, session=session, read_deleted="no").\ filter(network_or_none).\ filter_by(reserved=reserved).\ filter_by(address=address).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if fixed_ip_ref is None: raise exception.FixedIpNotFoundForNetwork(address=address, network_id=network_id) if fixed_ip_ref.instance_id: raise exception.FixedIpAlreadyInUse(address=address) if not fixed_ip_ref.network_id: fixed_ip_ref.network_id = network_id fixed_ip_ref.instance_id = instance_id session.add(fixed_ip_ref) return fixed_ip_ref['address'] @require_admin_context def fixed_ip_associate_pool(context, network_id, instance_id=None, host=None): session = get_session() with session.begin(): network_or_none = or_(models.FixedIp.network_id == network_id, models.FixedIp.network_id == None) fixed_ip_ref = model_query(context, models.FixedIp, session=session, read_deleted="no").\ filter(network_or_none).\ filter_by(reserved=False).\ filter_by(instance_id=None).\ filter_by(host=None).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not fixed_ip_ref: raise exception.NoMoreFixedIps() if fixed_ip_ref['network_id'] is None: fixed_ip_ref['network'] = network_id if instance_id: fixed_ip_ref['instance_id'] = instance_id if host: fixed_ip_ref['host'] = host session.add(fixed_ip_ref) return fixed_ip_ref['address'] @require_context def fixed_ip_create(context, values): fixed_ip_ref = models.FixedIp() fixed_ip_ref.update(values) fixed_ip_ref.save() return fixed_ip_ref['address'] @require_context def fixed_ip_bulk_create(context, ips): session = get_session() with session.begin(): for ip in ips: model = models.FixedIp() model.update(ip) session.add(model) @require_context def fixed_ip_disassociate(context, address): session = get_session() with session.begin(): fixed_ip_ref = fixed_ip_get_by_address(context, address, session=session) fixed_ip_ref['instance_id'] = None fixed_ip_ref.save(session=session) @require_admin_context def fixed_ip_disassociate_all_by_timeout(context, host, time): session = get_session() # NOTE(vish): only update fixed ips that "belong" to this # host; i.e. the network host or the instance # host matches. Two queries necessary because # join with update doesn't work. host_filter = or_(and_(models.Instance.host == host, models.Network.multi_host == True), models.Network.host == host) result = session.query(models.FixedIp.id).\ filter(models.FixedIp.deleted == False).\ filter(models.FixedIp.allocated == False).\ filter(models.FixedIp.updated_at < time).\ join((models.Network, models.Network.id == models.FixedIp.network_id)).\ join((models.Instance, models.Instance.id == models.FixedIp.instance_id)).\ filter(host_filter).\ all() fixed_ip_ids = [fip[0] for fip in result] if not fixed_ip_ids: return 0 result = model_query(context, models.FixedIp, session=session).\ filter(models.FixedIp.id.in_(fixed_ip_ids)).\ update({'instance_id': None, 'leased': False, 'updated_at': utils.utcnow()}, synchronize_session='fetch') return result @require_context def fixed_ip_get(context, id, session=None): result = model_query(context, models.FixedIp, session=session).\ filter_by(id=id).\ first() if not result: raise exception.FixedIpNotFound(id=id) # FIXME(sirp): shouldn't we just use project_only here to restrict the # results? if is_user_context(context) and result['instance_id'] is not None: instance = instance_get(context, result['instance_id'], session) authorize_project_context(context, instance.project_id) return result @require_admin_context def fixed_ip_get_all(context, session=None): result = model_query(context, models.FixedIp, session=session, read_deleted="yes").\ all() if not result: raise exception.NoFixedIpsDefined() return result @require_context def fixed_ip_get_by_address(context, address, session=None): result = model_query(context, models.FixedIp, session=session, read_deleted="yes").\ filter_by(address=address).\ first() if not result: raise exception.FixedIpNotFoundForAddress(address=address) # NOTE(sirp): shouldn't we just use project_only here to restrict the # results? if is_user_context(context) and result['instance_id'] is not None: instance = instance_get(context, result['instance_id'], session) authorize_project_context(context, instance.project_id) return result @require_context def fixed_ip_get_by_instance(context, instance_id): result = model_query(context, models.FixedIp, read_deleted="no").\ filter_by(instance_id=instance_id).\ all() if not result: raise exception.FixedIpNotFoundForInstance(instance_id=instance_id) return result @require_context def fixed_ip_get_by_network_host(context, network_id, host): result = model_query(context, models.FixedIp, read_deleted="no").\ filter_by(network_id=network_id).\ filter_by(host=host).\ first() if not result: raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id, host=host) return result @require_context def fixed_ips_by_virtual_interface(context, vif_id): result = model_query(context, models.FixedIp, read_deleted="no").\ filter_by(virtual_interface_id=vif_id).\ all() return result @require_admin_context def fixed_ip_get_network(context, address): fixed_ip_ref = fixed_ip_get_by_address(context, address) return fixed_ip_ref.network @require_context def fixed_ip_update(context, address, values): session = get_session() with session.begin(): fixed_ip_ref = fixed_ip_get_by_address(context, address, session=session) fixed_ip_ref.update(values) fixed_ip_ref.save(session=session) ################### @require_context def virtual_interface_create(context, values): """Create a new virtual interface record in the database. :param values: = dict containing column values """ try: vif_ref = models.VirtualInterface() vif_ref.update(values) vif_ref.save() except IntegrityError: raise exception.VirtualInterfaceCreateException() return vif_ref @require_context def _virtual_interface_query(context, session=None): return model_query(context, models.VirtualInterface, session=session, read_deleted="yes") @require_context def virtual_interface_get(context, vif_id, session=None): """Gets a virtual interface from the table. :param vif_id: = id of the virtual interface """ vif_ref = _virtual_interface_query(context, session=session).\ filter_by(id=vif_id).\ first() return vif_ref @require_context def virtual_interface_get_by_address(context, address): """Gets a virtual interface from the table. :param address: = the address of the interface you're looking to get """ vif_ref = _virtual_interface_query(context).\ filter_by(address=address).\ first() return vif_ref @require_context def virtual_interface_get_by_uuid(context, vif_uuid): """Gets a virtual interface from the table. :param vif_uuid: the uuid of the interface you're looking to get """ vif_ref = _virtual_interface_query(context).\ filter_by(uuid=vif_uuid).\ first() return vif_ref @require_context @require_instance_exists def virtual_interface_get_by_instance(context, instance_id): """Gets all virtual interfaces for instance. :param instance_id: = id of the instance to retrieve vifs for """ vif_refs = _virtual_interface_query(context).\ filter_by(instance_id=instance_id).\ all() return vif_refs @require_context def virtual_interface_get_by_instance_and_network(context, instance_id, network_id): """Gets virtual interface for instance that's associated with network.""" vif_ref = _virtual_interface_query(context).\ filter_by(instance_id=instance_id).\ filter_by(network_id=network_id).\ first() return vif_ref @require_context def virtual_interface_delete(context, vif_id): """Delete virtual interface record from the database. :param vif_id: = id of vif to delete """ session = get_session() vif_ref = virtual_interface_get(context, vif_id, session) with session.begin(): session.delete(vif_ref) @require_context def virtual_interface_delete_by_instance(context, instance_id): """Delete virtual interface records that are associated with the instance given by instance_id. :param instance_id: = id of instance """ vif_refs = virtual_interface_get_by_instance(context, instance_id) for vif_ref in vif_refs: virtual_interface_delete(context, vif_ref['id']) @require_context def virtual_interface_get_all(context): """Get all vifs""" vif_refs = _virtual_interface_query(context).all() return vif_refs ################### def _metadata_refs(metadata_dict, meta_class): metadata_refs = [] if metadata_dict: for k, v in metadata_dict.iteritems(): metadata_ref = meta_class() metadata_ref['key'] = k metadata_ref['value'] = v metadata_refs.append(metadata_ref) return metadata_refs @require_context def instance_create(context, values): """Create a new Instance record in the database. context - request context object values - dict containing column values. """ values = values.copy() values['metadata'] = _metadata_refs(values.get('metadata'), models.InstanceMetadata) instance_ref = models.Instance() if not values.get('uuid'): values['uuid'] = str(utils.gen_uuid()) instance_ref.update(values) session = get_session() with session.begin(): instance_ref.save(session=session) # and creat the info_cache table entry for instance instance_info_cache_create(context, {'instance_id': instance_ref['uuid']}) return instance_ref @require_admin_context def instance_data_get_for_project(context, project_id): result = model_query(context, func.count(models.Instance.id), func.sum(models.Instance.vcpus), func.sum(models.Instance.memory_mb), read_deleted="no").\ filter_by(project_id=project_id).\ first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0, result[2] or 0) @require_context def instance_destroy(context, instance_id): session = get_session() with session.begin(): if utils.is_uuid_like(instance_id): instance_ref = instance_get_by_uuid(context, instance_id, session=session) instance_id = instance_ref['id'] else: instance_ref = instance_get(context, instance_id, session=session) session.query(models.Instance).\ filter_by(id=instance_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.BlockDeviceMapping).\ filter_by(instance_id=instance_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) instance_info_cache_delete(context, instance_ref['uuid'], session=session) return instance_ref @require_context def instance_get_by_uuid(context, uuid, session=None): result = _build_instance_get(context, session=session).\ filter_by(uuid=uuid).\ first() if not result: raise exception.InstanceNotFound(instance_id=uuid) return result @require_context def instance_get(context, instance_id, session=None): result = _build_instance_get(context, session=session).\ filter_by(id=instance_id).\ first() if not result: raise exception.InstanceNotFound(instance_id=instance_id) return result @require_context def _build_instance_get(context, session=None): return model_query(context, models.Instance, session=session, project_only=True).\ options(joinedload_all('security_groups.rules')).\ options(joinedload('info_cache')).\ options(joinedload('volumes')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')) @require_admin_context def instance_get_all(context): return model_query(context, models.Instance).\ options(joinedload('info_cache')).\ options(joinedload('security_groups')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ all() @require_context def instance_get_all_by_filters(context, filters, sort_key, sort_dir): """Return instances that match all filters. Deleted instances will be returned by default, unless there's a filter that says otherwise""" def _regexp_filter_by_metadata(instance, meta): inst_metadata = [{node['key']: node['value']} for node in instance['metadata']] if isinstance(meta, list): for node in meta: if node not in inst_metadata: return False elif isinstance(meta, dict): for k, v in meta.iteritems(): if {k: v} not in inst_metadata: return False return True def _regexp_filter_by_column(instance, filter_name, filter_re): try: v = getattr(instance, filter_name) except AttributeError: return True if v and filter_re.match(str(v)): return True return False sort_fn = {'desc': desc, 'asc': asc} session = get_session() query_prefix = session.query(models.Instance).\ options(joinedload('info_cache')).\ options(joinedload('security_groups')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ order_by(sort_fn[sort_dir](getattr(models.Instance, sort_key))) # Make a copy of the filters dictionary to use going forward, as we'll # be modifying it and we shouldn't affect the caller's use of it. filters = filters.copy() if 'changes-since' in filters: changes_since = utils.normalize_time(filters['changes-since']) query_prefix = query_prefix.\ filter(models.Instance.updated_at > changes_since) if 'deleted' in filters: # Instances can be soft or hard deleted and the query needs to # include or exclude both if filters.pop('deleted'): deleted = or_(models.Instance.deleted == True, models.Instance.vm_state == vm_states.SOFT_DELETE) query_prefix = query_prefix.filter(deleted) else: query_prefix = query_prefix.\ filter_by(deleted=False).\ filter(models.Instance.vm_state != vm_states.SOFT_DELETE) if not context.is_admin: # If we're not admin context, add appropriate filter.. if context.project_id: filters['project_id'] = context.project_id else: filters['user_id'] = context.user_id # Filters for exact matches that we can do along with the SQL query... # For other filters that don't match this, we will do regexp matching exact_match_filter_names = ['project_id', 'user_id', 'image_ref', 'vm_state', 'instance_type_id', 'uuid'] # Filter the query query_prefix = exact_filter(query_prefix, models.Instance, filters, exact_match_filter_names) instances = query_prefix.all() if not instances: return [] # Now filter on everything else for regexp matching.. # For filters not in the list, we'll attempt to use the filter_name # as a column name in Instance.. regexp_filter_funcs = {} for filter_name in filters.iterkeys(): filter_func = regexp_filter_funcs.get(filter_name, None) filter_re = re.compile(str(filters[filter_name])) if filter_func: filter_l = lambda instance: filter_func(instance, filter_re) elif filter_name == 'metadata': filter_l = lambda instance: _regexp_filter_by_metadata(instance, filters[filter_name]) else: filter_l = lambda instance: _regexp_filter_by_column(instance, filter_name, filter_re) instances = filter(filter_l, instances) if not instances: break return instances @require_context def instance_get_active_by_window(context, begin, end=None, project_id=None): """Return instances that were active during window.""" session = get_session() query = session.query(models.Instance) query = query.filter(or_(models.Instance.terminated_at == None, models.Instance.terminated_at > begin)) if end: query = query.filter(models.Instance.launched_at < end) if project_id: query = query.filter_by(project_id=project_id) return query.all() @require_admin_context def instance_get_active_by_window_joined(context, begin, end=None, project_id=None): """Return instances and joins that were active during window.""" session = get_session() query = session.query(models.Instance) query = query.options(joinedload('info_cache')).\ options(joinedload('security_groups')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter(or_(models.Instance.terminated_at == None, models.Instance.terminated_at > begin)) if end: query = query.filter(models.Instance.launched_at < end) if project_id: query = query.filter_by(project_id=project_id) return query.all() @require_admin_context def _instance_get_all_query(context, project_only=False): return model_query(context, models.Instance, project_only=project_only).\ options(joinedload('info_cache')).\ options(joinedload('security_groups')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')) @require_admin_context def instance_get_all_by_host(context, host): return _instance_get_all_query(context).filter_by(host=host).all() @require_context def instance_get_all_by_project(context, project_id): authorize_project_context(context, project_id) return _instance_get_all_query(context).\ filter_by(project_id=project_id).\ all() @require_context def instance_get_all_by_reservation(context, reservation_id): return _instance_get_all_query(context, project_only=True).\ filter_by(reservation_id=reservation_id).\ all() # NOTE(jkoelker) This is only being left here for compat with floating # ips. Currently the network_api doesn't return floaters # in network_info. Once it starts return the model. This # function and it's call in compute/manager.py on 1829 can # go away @require_context def instance_get_floating_address(context, instance_id): fixed_ips = fixed_ip_get_by_instance(context, instance_id) if not fixed_ips: return None # NOTE(tr3buchet): this only gets the first fixed_ip # won't find floating ips associated with other fixed_ips floating_ips = floating_ip_get_by_fixed_address(context, fixed_ips[0]['address']) if not floating_ips: return None # NOTE(vish): this just returns the first floating ip return floating_ips[0]['address'] @require_admin_context def instance_get_all_hung_in_rebooting(context, reboot_window, session=None): reboot_window = datetime.datetime.utcnow() - datetime.timedelta( seconds=reboot_window) if not session: session = get_session() results = session.query(models.Instance).\ filter(models.Instance.updated_at <= reboot_window).\ filter_by(task_state="rebooting").all() return results @require_context def instance_test_and_set(context, instance_id, attr, ok_states, new_state, session=None): """Atomically check if an instance is in a valid state, and if it is, set the instance into a new state. """ if not session: session = get_session() with session.begin(): query = model_query(context, models.Instance, session=session, project_only=True) if utils.is_uuid_like(instance_id): query = query.filter_by(uuid=instance_id) else: query = query.filter_by(id=instance_id) # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues instance = query.with_lockmode('update').first() state = instance[attr] if state not in ok_states: raise exception.InstanceInvalidState( attr=attr, instance_uuid=instance['uuid'], state=state, method='instance_test_and_set') instance[attr] = new_state instance.save(session=session) @require_context def instance_update(context, instance_id, values): session = get_session() if utils.is_uuid_like(instance_id): instance_ref = instance_get_by_uuid(context, instance_id, session=session) else: instance_ref = instance_get(context, instance_id, session=session) metadata = values.get('metadata') if metadata is not None: instance_metadata_update(context, instance_ref['id'], values.pop('metadata'), delete=True) with session.begin(): instance_ref.update(values) instance_ref.save(session=session) return instance_ref def instance_add_security_group(context, instance_uuid, security_group_id): """Associate the given security group with the given instance""" session = get_session() with session.begin(): instance_ref = instance_get_by_uuid(context, instance_uuid, session=session) security_group_ref = security_group_get(context, security_group_id, session=session) instance_ref.security_groups += [security_group_ref] instance_ref.save(session=session) @require_context def instance_remove_security_group(context, instance_uuid, security_group_id): """Disassociate the given security group from the given instance""" session = get_session() instance_ref = instance_get_by_uuid(context, instance_uuid, session=session) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_ref['id']).\ filter_by(security_group_id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def instance_action_create(context, values): """Create an instance action from the values dictionary.""" action_ref = models.InstanceActions() action_ref.update(values) session = get_session() with session.begin(): action_ref.save(session=session) return action_ref @require_admin_context def instance_get_actions(context, instance_uuid): """Return the actions associated to the given instance id""" session = get_session() return session.query(models.InstanceActions).\ filter_by(instance_uuid=instance_uuid).\ all() @require_context def instance_get_id_to_uuid_mapping(context, ids): session = get_session() instances = session.query(models.Instance).\ filter(models.Instance.id.in_(ids)).\ all() mapping = {} for instance in instances: mapping[instance['id']] = instance['uuid'] return mapping ################### @require_context def instance_info_cache_create(context, values): """Create a new instance cache record in the table. :param context: = request context object :param values: = dict containing column values """ info_cache = models.InstanceInfoCache() info_cache.update(values) session = get_session() with session.begin(): info_cache.save(session=session) return info_cache @require_context def instance_info_cache_get(context, instance_uuid, session=None): """Gets an instance info cache from the table. :param instance_uuid: = uuid of the info cache's instance :param session: = optional session object """ session = session or get_session() info_cache = session.query(models.InstanceInfoCache).\ filter_by(instance_id=instance_uuid).\ first() return info_cache @require_context def instance_info_cache_update(context, instance_uuid, values, session=None): """Update an instance info cache record in the table. :param instance_uuid: = uuid of info cache's instance :param values: = dict containing column values to update :param session: = optional session object """ session = session or get_session() info_cache = instance_info_cache_get(context, instance_uuid, session=session) if info_cache: info_cache.update(values) info_cache.save(session=session) else: # NOTE(tr3buchet): just in case someone blows away an instance's # cache entry values['instance_id'] = instance_uuid info_cache = instance_info_cache_create(context, values) return info_cache @require_context def instance_info_cache_delete(context, instance_uuid, session=None): """Deletes an existing instance_info_cache record :param instance_uuid: = uuid of the instance tied to the cache record :param session: = optional session object """ values = {'deleted': True, 'deleted_at': utils.utcnow()} instance_info_cache_update(context, instance_uuid, values, session) ################### @require_context def key_pair_create(context, values): key_pair_ref = models.KeyPair() key_pair_ref.update(values) key_pair_ref.save() return key_pair_ref @require_context def key_pair_destroy(context, user_id, name): authorize_user_context(context, user_id) session = get_session() with session.begin(): key_pair_ref = key_pair_get(context, user_id, name, session=session) key_pair_ref.delete(session=session) @require_context def key_pair_destroy_all_by_user(context, user_id): authorize_user_context(context, user_id) session = get_session() with session.begin(): session.query(models.KeyPair).\ filter_by(user_id=user_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def key_pair_get(context, user_id, name, session=None): authorize_user_context(context, user_id) result = model_query(context, models.KeyPair, session=session).\ filter_by(user_id=user_id).\ filter_by(name=name).\ first() if not result: raise exception.KeypairNotFound(user_id=user_id, name=name) return result @require_context def key_pair_get_all_by_user(context, user_id): authorize_user_context(context, user_id) return model_query(context, models.KeyPair, read_deleted="no").\ filter_by(user_id=user_id).\ all() ################### @require_admin_context def network_associate(context, project_id, force=False): """Associate a project with a network. called by project_get_networks under certain conditions and network manager add_network_to_project() only associate if the project doesn't already have a network or if force is True force solves race condition where a fresh project has multiple instance builds simultaneously picked up by multiple network hosts which attempt to associate the project with multiple networks force should only be used as a direct consequence of user request all automated requests should not use force """ session = get_session() with session.begin(): def network_query(project_filter): return model_query(context, models.Network, session=session, read_deleted="no").\ filter_by(project_id=project_filter).\ with_lockmode('update').\ first() if not force: # find out if project has a network network_ref = network_query(project_id) if force or not network_ref: # in force mode or project doesn't have a network so associate # with a new network # get new network network_ref = network_query(None) if not network_ref: raise db.NoMoreNetworks() # associate with network # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues network_ref['project_id'] = project_id session.add(network_ref) return network_ref @require_admin_context def network_count(context): return model_query(context, models.Network).count() @require_admin_context def _network_ips_query(context, network_id): return model_query(context, models.FixedIp, read_deleted="no").\ filter_by(network_id=network_id) @require_admin_context def network_count_reserved_ips(context, network_id): return _network_ips_query(context, network_id).\ filter_by(reserved=True).\ count() @require_admin_context def network_create_safe(context, values): if values.get('vlan'): if model_query(context, models.Network, read_deleted="no")\ .filter_by(vlan=values['vlan'])\ .first(): raise exception.DuplicateVlan(vlan=values['vlan']) network_ref = models.Network() network_ref['uuid'] = str(utils.gen_uuid()) network_ref.update(values) try: network_ref.save() return network_ref except IntegrityError: return None @require_admin_context def network_delete_safe(context, network_id): session = get_session() with session.begin(): network_ref = network_get(context, network_id=network_id, session=session) session.delete(network_ref) @require_admin_context def network_disassociate(context, network_id): network_update(context, network_id, {'project_id': None, 'host': None}) @require_context def network_get(context, network_id, session=None): result = model_query(context, models.Network, session=session, project_only=True).\ filter_by(id=network_id).\ first() if not result: raise exception.NetworkNotFound(network_id=network_id) return result @require_admin_context def network_get_all(context): result = model_query(context, models.Network, read_deleted="no").all() if not result: raise exception.NoNetworksFound() return result @require_admin_context def network_get_all_by_uuids(context, network_uuids, project_id=None): project_or_none = or_(models.Network.project_id == project_id, models.Network.project_id == None) result = model_query(context, models.Network, read_deleted="no").\ filter(models.Network.uuid.in_(network_uuids)).\ filter(project_or_none).\ all() if not result: raise exception.NoNetworksFound() #check if host is set to all of the networks # returned in the result for network in result: if network['host'] is None: raise exception.NetworkHostNotSet(network_id=network['id']) #check if the result contains all the networks #we are looking for for network_uuid in network_uuids: found = False for network in result: if network['uuid'] == network_uuid: found = True break if not found: if project_id: raise exception.NetworkNotFoundForProject( network_uuid=network_uuid, project_id=context.project_id) raise exception.NetworkNotFound(network_id=network_uuid) return result # NOTE(vish): pylint complains because of the long method name, but # it fits with the names of the rest of the methods # pylint: disable=C0103 @require_admin_context def network_get_associated_fixed_ips(context, network_id, host=None): # FIXME(sirp): since this returns fixed_ips, this would be better named # fixed_ip_get_all_by_network. # NOTE(vish): The ugly joins here are to solve a performance issue and # should be removed once we can add and remove leases # without regenerating the whole list vif_and = and_(models.VirtualInterface.id == models.FixedIp.virtual_interface_id, models.VirtualInterface.deleted == False) inst_and = and_(models.Instance.id == models.FixedIp.instance_id, models.Instance.deleted == False) session = get_session() query = session.query(models.FixedIp.address, models.FixedIp.instance_id, models.FixedIp.network_id, models.FixedIp.virtual_interface_id, models.VirtualInterface.address, models.Instance.hostname, models.Instance.updated_at, models.Instance.created_at).\ filter(models.FixedIp.deleted == False).\ filter(models.FixedIp.network_id == network_id).\ filter(models.FixedIp.allocated == True).\ join((models.VirtualInterface, vif_and)).\ join((models.Instance, inst_and)).\ filter(models.FixedIp.instance_id != None).\ filter(models.FixedIp.virtual_interface_id != None) if host: query = query.filter(models.Instance.host == host) result = query.all() data = [] for datum in result: cleaned = {} cleaned['address'] = datum[0] cleaned['instance_id'] = datum[1] cleaned['network_id'] = datum[2] cleaned['vif_id'] = datum[3] cleaned['vif_address'] = datum[4] cleaned['instance_hostname'] = datum[5] cleaned['instance_updated'] = datum[6] cleaned['instance_created'] = datum[7] data.append(cleaned) return data @require_admin_context def _network_get_query(context, session=None): return model_query(context, models.Network, session=session, read_deleted="no") @require_admin_context def network_get_by_bridge(context, bridge): result = _network_get_query(context).filter_by(bridge=bridge).first() if not result: raise exception.NetworkNotFoundForBridge(bridge=bridge) return result @require_admin_context def network_get_by_uuid(context, uuid): result = _network_get_query(context).filter_by(uuid=uuid).first() if not result: raise exception.NetworkNotFoundForUUID(uuid=uuid) return result @require_admin_context def network_get_by_cidr(context, cidr): result = _network_get_query(context).\ filter(or_(models.Network.cidr == cidr, models.Network.cidr_v6 == cidr)).\ first() if not result: raise exception.NetworkNotFoundForCidr(cidr=cidr) return result @require_admin_context def network_get_by_instance(context, instance_id): # note this uses fixed IP to get to instance # only works for networks the instance has an IP from result = _network_get_query(context).\ filter_by(instance_id=instance_id).\ first() if not result: raise exception.NetworkNotFoundForInstance(instance_id=instance_id) return result @require_admin_context def network_get_all_by_instance(context, instance_id): result = _network_get_query(context).\ filter_by(instance_id=instance_id).\ all() if not result: raise exception.NetworkNotFoundForInstance(instance_id=instance_id) return result @require_admin_context def network_get_all_by_host(context, host): session = get_session() fixed_ip_query = model_query(context, models.FixedIp.network_id, session=session).\ filter(models.FixedIp.host == host) # NOTE(vish): return networks that have host set # or that have a fixed ip with host set host_filter = or_(models.Network.host == host, models.Network.id.in_(fixed_ip_query.subquery())) return _network_get_query(context, session=session).\ filter(host_filter).\ all() @require_admin_context def network_set_host(context, network_id, host_id): session = get_session() with session.begin(): network_ref = _network_get_query(context, session=session).\ filter_by(id=network_id).\ with_lockmode('update').\ first() if not network_ref: raise exception.NetworkNotFound(network_id=network_id) # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not network_ref['host']: network_ref['host'] = host_id session.add(network_ref) return network_ref['host'] @require_context def network_update(context, network_id, values): session = get_session() with session.begin(): network_ref = network_get(context, network_id, session=session) network_ref.update(values) network_ref.save(session=session) return network_ref ################### def queue_get_for(context, topic, physical_node_id): # FIXME(ja): this should be servername? return "%s.%s" % (topic, physical_node_id) ################### @require_admin_context def iscsi_target_count_by_host(context, host): return model_query(context, models.IscsiTarget).\ filter_by(host=host).\ count() @require_admin_context def iscsi_target_create_safe(context, values): iscsi_target_ref = models.IscsiTarget() for (key, value) in values.iteritems(): iscsi_target_ref[key] = value try: iscsi_target_ref.save() return iscsi_target_ref except IntegrityError: return None ################### @require_admin_context def auth_token_destroy(context, token_id): session = get_session() with session.begin(): token_ref = auth_token_get(context, token_id, session=session) token_ref.delete(session=session) @require_admin_context def auth_token_get(context, token_hash, session=None): result = model_query(context, models.AuthToken, session=session).\ filter_by(token_hash=token_hash).\ first() if not result: raise exception.AuthTokenNotFound(token=token_hash) return result @require_admin_context def auth_token_update(context, token_hash, values): session = get_session() with session.begin(): token_ref = auth_token_get(context, token_hash, session=session) token_ref.update(values) token_ref.save(session=session) @require_admin_context def auth_token_create(context, token): tk = models.AuthToken() tk.update(token) tk.save() return tk ################### @require_context def quota_get(context, project_id, resource, session=None): result = model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ first() if not result: raise exception.ProjectQuotaNotFound(project_id=project_id) return result @require_context def quota_get_all_by_project(context, project_id): authorize_project_context(context, project_id) rows = model_query(context, models.Quota, read_deleted="no").\ filter_by(project_id=project_id).\ all() result = {'project_id': project_id} for row in rows: result[row.resource] = row.hard_limit return result @require_admin_context def quota_create(context, project_id, resource, limit): # NOTE: Treat -1 as unlimited for consistency w/ flags if limit == -1: limit = None quota_ref = models.Quota() quota_ref.project_id = project_id quota_ref.resource = resource quota_ref.hard_limit = limit quota_ref.save() return quota_ref @require_admin_context def quota_update(context, project_id, resource, limit): # NOTE: Treat -1 as unlimited for consistency w/ flags if limit == -1: limit = None session = get_session() with session.begin(): quota_ref = quota_get(context, project_id, resource, session=session) quota_ref.hard_limit = limit quota_ref.save(session=session) @require_admin_context def quota_destroy(context, project_id, resource): session = get_session() with session.begin(): quota_ref = quota_get(context, project_id, resource, session=session) quota_ref.delete(session=session) @require_admin_context def quota_destroy_all_by_project(context, project_id): session = get_session() with session.begin(): quotas = model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ all() for quota_ref in quotas: quota_ref.delete(session=session) ################### @require_admin_context def volume_allocate_iscsi_target(context, volume_id, host): session = get_session() with session.begin(): iscsi_target_ref = model_query(context, models.IscsiTarget, session=session, read_deleted="no").\ filter_by(volume=None).\ filter_by(host=host).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not iscsi_target_ref: raise db.NoMoreTargets() iscsi_target_ref.volume_id = volume_id session.add(iscsi_target_ref) return iscsi_target_ref.target_num @require_admin_context def volume_attached(context, volume_id, instance_id, mountpoint): session = get_session() with session.begin(): volume_ref = volume_get(context, volume_id, session=session) volume_ref['status'] = 'in-use' volume_ref['mountpoint'] = mountpoint volume_ref['attach_status'] = 'attached' volume_ref.instance = instance_get(context, instance_id, session=session) volume_ref.save(session=session) @require_context def volume_create(context, values): values['volume_metadata'] = _metadata_refs(values.get('metadata'), models.VolumeMetadata) volume_ref = models.Volume() volume_ref.update(values) session = get_session() with session.begin(): volume_ref.save(session=session) return volume_ref @require_admin_context def volume_data_get_for_project(context, project_id): result = model_query(context, func.count(models.Volume.id), func.sum(models.Volume.size), read_deleted="no").\ filter_by(project_id=project_id).\ first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_admin_context def volume_destroy(context, volume_id): session = get_session() with session.begin(): session.query(models.Volume).\ filter_by(id=volume_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.IscsiTarget).\ filter_by(volume_id=volume_id).\ update({'volume_id': None}) session.query(models.VolumeMetadata).\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context def volume_detached(context, volume_id): session = get_session() with session.begin(): volume_ref = volume_get(context, volume_id, session=session) volume_ref['status'] = 'available' volume_ref['mountpoint'] = None volume_ref['attach_status'] = 'detached' volume_ref.instance = None volume_ref.save(session=session) @require_context def _volume_get_query(context, session=None, project_only=False): return model_query(context, models.Volume, session=session, project_only=project_only).\ options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')) @require_context def volume_get(context, volume_id, session=None): result = _volume_get_query(context, session=session, project_only=True).\ filter_by(id=volume_id).\ first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) return result @require_admin_context def volume_get_all(context): return _volume_get_query(context).all() @require_admin_context def volume_get_all_by_host(context, host): return _volume_get_query(context).filter_by(host=host).all() @require_admin_context def volume_get_all_by_instance(context, instance_id): result = model_query(context, models.Volume, read_deleted="no").\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ filter_by(instance_id=instance_id).\ all() if not result: raise exception.VolumeNotFoundForInstance(instance_id=instance_id) return result @require_context def volume_get_all_by_project(context, project_id): authorize_project_context(context, project_id) return _volume_get_query(context).filter_by(project_id=project_id).all() @require_admin_context def volume_get_instance(context, volume_id): result = _volume_get_query(context).filter_by(id=volume_id).first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) return result.instance @require_admin_context def volume_get_iscsi_target_num(context, volume_id): result = model_query(context, models.IscsiTarget, read_deleted="yes").\ filter_by(volume_id=volume_id).\ first() if not result: raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id) return result.target_num @require_context def volume_update(context, volume_id, values): session = get_session() metadata = values.get('metadata') if metadata is not None: volume_metadata_update(context, volume_id, values.pop('metadata'), delete=True) with session.begin(): volume_ref = volume_get(context, volume_id, session=session) volume_ref.update(values) volume_ref.save(session=session) #################### def _volume_metadata_get_query(context, volume_id, session=None): return model_query(context, models.VolumeMetadata, session=session, read_deleted="no").\ filter_by(volume_id=volume_id) @require_context @require_volume_exists def volume_metadata_get(context, volume_id): rows = _volume_metadata_get_query(context, volume_id).all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context @require_volume_exists def volume_metadata_delete(context, volume_id, key): _volume_metadata_get_query(context, volume_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context @require_volume_exists def volume_metadata_get_item(context, volume_id, key, session=None): result = _volume_metadata_get_query(context, volume_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.VolumeMetadataNotFound(metadata_key=key, volume_id=volume_id) return result @require_context @require_volume_exists def volume_metadata_update(context, volume_id, metadata, delete): session = get_session() # Set existing metadata to deleted if delete argument is True if delete: original_metadata = volume_metadata_get(context, volume_id) for meta_key, meta_value in original_metadata.iteritems(): if meta_key not in metadata: meta_ref = volume_metadata_get_item(context, volume_id, meta_key, session) meta_ref.update({'deleted': True}) meta_ref.save(session=session) meta_ref = None # Now update all existing items with new values, or create new meta objects for meta_key, meta_value in metadata.iteritems(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = volume_metadata_get_item(context, volume_id, meta_key, session) except exception.VolumeMetadataNotFound, e: meta_ref = models.VolumeMetadata() item.update({"key": meta_key, "volume_id": volume_id}) meta_ref.update(item) meta_ref.save(session=session) return metadata ################### @require_context def snapshot_create(context, values): snapshot_ref = models.Snapshot() snapshot_ref.update(values) session = get_session() with session.begin(): snapshot_ref.save(session=session) return snapshot_ref @require_admin_context def snapshot_destroy(context, snapshot_id): session = get_session() with session.begin(): session.query(models.Snapshot).\ filter_by(id=snapshot_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def snapshot_get(context, snapshot_id, session=None): result = model_query(context, models.Snapshot, session=session, project_only=True).\ filter_by(id=snapshot_id).\ first() if not result: raise exception.SnapshotNotFound(snapshot_id=snapshot_id) return result @require_admin_context def snapshot_get_all(context): return model_query(context, models.Snapshot).all() @require_context def snapshot_get_all_for_volume(context, volume_id): return model_query(context, models.Snapshot, read_deleted='no', project_only=True).\ filter_by(volume_id=volume_id).all() @require_context def snapshot_get_all_by_project(context, project_id): authorize_project_context(context, project_id) return model_query(context, models.Snapshot).\ filter_by(project_id=project_id).\ all() @require_context def snapshot_update(context, snapshot_id, values): session = get_session() with session.begin(): snapshot_ref = snapshot_get(context, snapshot_id, session=session) snapshot_ref.update(values) snapshot_ref.save(session=session) ################### def _block_device_mapping_get_query(context, session=None): return model_query(context, models.BlockDeviceMapping, session=session, read_deleted="no") @require_context def block_device_mapping_create(context, values): bdm_ref = models.BlockDeviceMapping() bdm_ref.update(values) session = get_session() with session.begin(): bdm_ref.save(session=session) @require_context def block_device_mapping_update(context, bdm_id, values): session = get_session() with session.begin(): _block_device_mapping_get_query(context, session=session).\ filter_by(id=bdm_id).\ update(values) @require_context def block_device_mapping_update_or_create(context, values): session = get_session() with session.begin(): result = _block_device_mapping_get_query(context, session=session).\ filter_by(instance_id=values['instance_id']).\ filter_by(device_name=values['device_name']).\ first() if not result: bdm_ref = models.BlockDeviceMapping() bdm_ref.update(values) bdm_ref.save(session=session) else: result.update(values) # NOTE(yamahata): same virtual device name can be specified multiple # times. So delete the existing ones. virtual_name = values['virtual_name'] if (virtual_name is not None and block_device.is_swap_or_ephemeral(virtual_name)): session.query(models.BlockDeviceMapping).\ filter_by(instance_id=values['instance_id']).\ filter_by(virtual_name=virtual_name).\ filter(models.BlockDeviceMapping.device_name != values['device_name']).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def block_device_mapping_get_all_by_instance(context, instance_id): return _block_device_mapping_get_query(context).\ filter_by(instance_id=instance_id).\ all() @require_context def block_device_mapping_destroy(context, bdm_id): session = get_session() with session.begin(): session.query(models.BlockDeviceMapping).\ filter_by(id=bdm_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def block_device_mapping_destroy_by_instance_and_volume(context, instance_id, volume_id): session = get_session() with session.begin(): _block_device_mapping_get_query(context, session=session).\ filter_by(instance_id=instance_id).\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) ################### def _security_group_get_query(context, session=None, read_deleted=None, project_only=False): return model_query(context, models.SecurityGroup, session=session, read_deleted=read_deleted, project_only=project_only).\ options(joinedload_all('rules')) @require_context def security_group_get_all(context): return _security_group_get_query(context).all() @require_context def security_group_get(context, security_group_id, session=None): result = _security_group_get_query(context, session=session, project_only=True).\ filter_by(id=security_group_id).\ options(joinedload_all('instances')).\ first() if not result: raise exception.SecurityGroupNotFound( security_group_id=security_group_id) return result @require_context def security_group_get_by_name(context, project_id, group_name): result = _security_group_get_query(context, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(name=group_name).\ options(joinedload_all('instances')).\ first() if not result: raise exception.SecurityGroupNotFoundForProject( project_id=project_id, security_group_id=group_name) return result @require_context def security_group_get_by_project(context, project_id): return _security_group_get_query(context, read_deleted="no").\ filter_by(project_id=project_id).\ all() @require_context def security_group_get_by_instance(context, instance_id): return _security_group_get_query(context, read_deleted="no").\ join(models.SecurityGroup.instances).\ filter_by(id=instance_id).\ all() @require_context def security_group_exists(context, project_id, group_name): try: group = security_group_get_by_name(context, project_id, group_name) return group is not None except exception.NotFound: return False @require_context def security_group_in_use(context, group_id): session = get_session() with session.begin(): # Are there any instances that haven't been deleted # that include this group? inst_assoc = session.query(models.SecurityGroupInstanceAssociation).\ filter_by(security_group_id=group_id).\ filter_by(deleted=False).\ all() for ia in inst_assoc: num_instances = session.query(models.Instance).\ filter_by(deleted=False).\ filter_by(id=ia.instance_id).\ count() if num_instances: return True return False @require_context def security_group_create(context, values): security_group_ref = models.SecurityGroup() # FIXME(devcamcar): Unless I do this, rules fails with lazy load exception # once save() is called. This will get cleaned up in next orm pass. security_group_ref.rules security_group_ref.update(values) security_group_ref.save() return security_group_ref @require_context def security_group_destroy(context, security_group_id): session = get_session() with session.begin(): session.query(models.SecurityGroup).\ filter_by(id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(security_group_id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ filter_by(group_id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def security_group_count_by_project(context, project_id): authorize_project_context(context, project_id) return model_query(context, models.SecurityGroup, read_deleted="no").\ filter_by(project_id=project_id).\ count() ################### def _security_group_rule_get_query(context, session=None): return model_query(context, models.SecurityGroupIngressRule, session=session) @require_context def security_group_rule_get(context, security_group_rule_id, session=None): result = _security_group_rule_get_query(context, session=session).\ filter_by(id=security_group_rule_id).\ first() if not result: raise exception.SecurityGroupNotFoundForRule( rule_id=security_group_rule_id) return result @require_context def security_group_rule_get_by_security_group(context, security_group_id, session=None): return _security_group_rule_get_query(context, session=session).\ filter_by(parent_group_id=security_group_id).\ options(joinedload_all('grantee_group.instances')).\ all() @require_context def security_group_rule_get_by_security_group_grantee(context, security_group_id, session=None): return _security_group_rule_get_query(context, session=session).\ filter_by(group_id=security_group_id).\ all() @require_context def security_group_rule_create(context, values): security_group_rule_ref = models.SecurityGroupIngressRule() security_group_rule_ref.update(values) security_group_rule_ref.save() return security_group_rule_ref @require_context def security_group_rule_destroy(context, security_group_rule_id): session = get_session() with session.begin(): security_group_rule = security_group_rule_get(context, security_group_rule_id, session=session) security_group_rule.delete(session=session) @require_context def security_group_rule_count_by_group(context, security_group_id): return model_query(context, models.SecurityGroupIngressRule, read_deleted="no").\ filter_by(parent_group_id=security_group_id).\ count() # ################### @require_admin_context def provider_fw_rule_create(context, rule): fw_rule_ref = models.ProviderFirewallRule() fw_rule_ref.update(rule) fw_rule_ref.save() return fw_rule_ref @require_admin_context def provider_fw_rule_get_all(context): return model_query(context, models.ProviderFirewallRule).all() @require_admin_context def provider_fw_rule_destroy(context, rule_id): session = get_session() with session.begin(): session.query(models.ProviderFirewallRule).\ filter_by(id=rule_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) ################### @require_admin_context def user_get(context, id, session=None): result = model_query(context, models.User, session=session).\ filter_by(id=id).\ first() if not result: raise exception.UserNotFound(user_id=id) return result @require_admin_context def user_get_by_access_key(context, access_key, session=None): result = model_query(context, models.User, session=session).\ filter_by(access_key=access_key).\ first() if not result: raise exception.AccessKeyNotFound(access_key=access_key) return result @require_admin_context def user_create(context, values): user_ref = models.User() user_ref.update(values) user_ref.save() return user_ref @require_admin_context def user_delete(context, id): session = get_session() with session.begin(): session.query(models.UserProjectAssociation).\ filter_by(user_id=id).\ delete() session.query(models.UserRoleAssociation).\ filter_by(user_id=id).\ delete() session.query(models.UserProjectRoleAssociation).\ filter_by(user_id=id).\ delete() user_ref = user_get(context, id, session=session) session.delete(user_ref) def user_get_all(context): return model_query(context, models.User).all() def user_get_roles(context, user_id): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) return [role.role for role in user_ref['roles']] def user_get_roles_for_project(context, user_id, project_id): session = get_session() with session.begin(): res = session.query(models.UserProjectRoleAssociation).\ filter_by(user_id=user_id).\ filter_by(project_id=project_id).\ all() return [association.role for association in res] def user_remove_project_role(context, user_id, project_id, role): session = get_session() with session.begin(): session.query(models.UserProjectRoleAssociation).\ filter_by(user_id=user_id).\ filter_by(project_id=project_id).\ filter_by(role=role).\ delete() def user_remove_role(context, user_id, role): session = get_session() with session.begin(): res = session.query(models.UserRoleAssociation).\ filter_by(user_id=user_id).\ filter_by(role=role).\ all() for role in res: session.delete(role) def user_add_role(context, user_id, role): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) models.UserRoleAssociation(user=user_ref, role=role).\ save(session=session) def user_add_project_role(context, user_id, project_id, role): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) project_ref = project_get(context, project_id, session=session) models.UserProjectRoleAssociation(user_id=user_ref['id'], project_id=project_ref['id'], role=role).save(session=session) def user_update(context, user_id, values): session = get_session() with session.begin(): user_ref = user_get(context, user_id, session=session) user_ref.update(values) user_ref.save(session=session) # ################### def project_create(context, values): project_ref = models.Project() project_ref.update(values) project_ref.save() return project_ref def project_add_member(context, project_id, user_id): session = get_session() with session.begin(): project_ref = project_get(context, project_id, session=session) user_ref = user_get(context, user_id, session=session) project_ref.members += [user_ref] project_ref.save(session=session) def project_get(context, id, session=None): result = model_query(context, models.Project, session=session, read_deleted="no").\ filter_by(id=id).\ options(joinedload_all('members')).\ first() if not result: raise exception.ProjectNotFound(project_id=id) return result def project_get_all(context): return model_query(context, models.Project).\ options(joinedload_all('members')).\ all() def project_get_by_user(context, user_id): user = model_query(context, models.User).\ filter_by(id=user_id).\ options(joinedload_all('projects')).\ first() if not user: raise exception.UserNotFound(user_id=user_id) return user.projects def project_remove_member(context, project_id, user_id): session = get_session() project = project_get(context, project_id, session=session) user = user_get(context, user_id, session=session) if user in project.members: project.members.remove(user) project.save(session=session) def project_update(context, project_id, values): session = get_session() with session.begin(): project_ref = project_get(context, project_id, session=session) project_ref.update(values) project_ref.save(session=session) def project_delete(context, id): session = get_session() with session.begin(): session.query(models.UserProjectAssociation).\ filter_by(project_id=id).\ delete() session.query(models.UserProjectRoleAssociation).\ filter_by(project_id=id).\ delete() project_ref = project_get(context, id, session=session) session.delete(project_ref) @require_context def project_get_networks(context, project_id, associate=True): # NOTE(tr3buchet): as before this function will associate # a project with a network if it doesn't have one and # associate is true result = model_query(context, models.Network, read_deleted="no").\ filter_by(project_id=project_id).\ all() if not result: if not associate: return [] return [network_associate(context, project_id)] return result ################### @require_admin_context def migration_create(context, values): migration = models.Migration() migration.update(values) migration.save() return migration @require_admin_context def migration_update(context, id, values): session = get_session() with session.begin(): migration = migration_get(context, id, session=session) migration.update(values) migration.save(session=session) return migration @require_admin_context def migration_get(context, id, session=None): result = model_query(context, models.Migration, session=session, read_deleted="yes").\ filter_by(id=id).\ first() if not result: raise exception.MigrationNotFound(migration_id=id) return result @require_admin_context def migration_get_by_instance_and_status(context, instance_uuid, status): result = model_query(context, models.Migration, read_deleted="yes").\ filter_by(instance_uuid=instance_uuid).\ filter_by(status=status).\ first() if not result: raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid, status=status) return result @require_admin_context def migration_get_all_unconfirmed(context, confirm_window, session=None): confirm_window = datetime.datetime.utcnow() - datetime.timedelta( seconds=confirm_window) return model_query(context, models.Migration, session=session, read_deleted="yes").\ filter(models.Migration.updated_at <= confirm_window).\ filter_by(status="FINISHED").\ all() ################## def console_pool_create(context, values): pool = models.ConsolePool() pool.update(values) pool.save() return pool def console_pool_get(context, pool_id): result = model_query(context, models.ConsolePool, read_deleted="no").\ filter_by(id=pool_id).\ first() if not result: raise exception.ConsolePoolNotFound(pool_id=pool_id) return result def console_pool_get_by_host_type(context, compute_host, host, console_type): result = model_query(context, models.ConsolePool, read_deleted="no").\ filter_by(host=host).\ filter_by(console_type=console_type).\ filter_by(compute_host=compute_host).\ options(joinedload('consoles')).\ first() if not result: raise exception.ConsolePoolNotFoundForHostType( host=host, console_type=console_type, compute_host=compute_host) return result def console_pool_get_all_by_host_type(context, host, console_type): return model_query(context, models.ConsolePool, read_deleted="no").\ filter_by(host=host).\ filter_by(console_type=console_type).\ options(joinedload('consoles')).\ all() def console_create(context, values): console = models.Console() console.update(values) console.save() return console def console_delete(context, console_id): session = get_session() with session.begin(): # NOTE(mdragon): consoles are meant to be transient. session.query(models.Console).\ filter_by(id=console_id).\ delete() def console_get_by_pool_instance(context, pool_id, instance_id): result = model_query(context, models.Console, read_deleted="yes").\ filter_by(pool_id=pool_id).\ filter_by(instance_id=instance_id).\ options(joinedload('pool')).\ first() if not result: raise exception.ConsoleNotFoundInPoolForInstance( pool_id=pool_id, instance_id=instance_id) return result def console_get_all_by_instance(context, instance_id): return model_query(context, models.Console, read_deleted="yes").\ filter_by(instance_id=instance_id).\ all() def console_get(context, console_id, instance_id=None): query = model_query(context, models.Console, read_deleted="yes").\ filter_by(id=console_id).\ options(joinedload('pool')) if instance_id is not None: query = query.filter_by(instance_id=instance_id) result = query.first() if not result: if instance_id: raise exception.ConsoleNotFoundForInstance( console_id=console_id, instance_id=instance_id) else: raise exception.ConsoleNotFound(console_id=console_id) return result ################## @require_admin_context def instance_type_create(context, values): """Create a new instance type. In order to pass in extra specs, the values dict should contain a 'extra_specs' key/value pair: {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ session = get_session() with session.begin(): try: instance_type_get_by_name(context, values['name'], session) raise exception.InstanceTypeExists(name=values['name']) except exception.InstanceTypeNotFoundByName: pass try: instance_type_get_by_flavor_id(context, values['flavorid'], session) raise exception.InstanceTypeExists(name=values['name']) except exception.FlavorNotFound: pass try: specs = values.get('extra_specs') specs_refs = [] if specs: for k, v in specs.iteritems(): specs_ref = models.InstanceTypeExtraSpecs() specs_ref['key'] = k specs_ref['value'] = v specs_refs.append(specs_ref) values['extra_specs'] = specs_refs instance_type_ref = models.InstanceTypes() instance_type_ref.update(values) instance_type_ref.save(session=session) except Exception, e: raise exception.DBError(e) return _dict_with_extra_specs(instance_type_ref) def _dict_with_extra_specs(inst_type_query): """Takes an instance, volume, or instance type query returned by sqlalchemy and returns it as a dictionary, converting the extra_specs entry from a list of dicts: 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] to a single dict: 'extra_specs' : {'k1': 'v1'} """ inst_type_dict = dict(inst_type_query) extra_specs = dict([(x['key'], x['value']) for x in inst_type_query['extra_specs']]) inst_type_dict['extra_specs'] = extra_specs return inst_type_dict def _instance_type_get_query(context, session=None, read_deleted=None): return model_query(context, models.InstanceTypes, session=session, read_deleted=read_deleted).\ options(joinedload('extra_specs')) @require_context def instance_type_get_all(context, inactive=False, filters=None): """ Returns all instance types. """ filters = filters or {} read_deleted = "yes" if inactive else "no" query = _instance_type_get_query(context, read_deleted=read_deleted) if 'min_memory_mb' in filters: query = query.filter( models.InstanceTypes.memory_mb >= filters['min_memory_mb']) if 'min_root_gb' in filters: query = query.filter( models.InstanceTypes.root_gb >= filters['min_root_gb']) inst_types = query.order_by("name").all() return [_dict_with_extra_specs(i) for i in inst_types] @require_context def instance_type_get(context, id, session=None): """Returns a dict describing specific instance_type""" result = _instance_type_get_query(context, session=session).\ filter_by(id=id).\ first() if not result: raise exception.InstanceTypeNotFound(instance_type_id=id) return _dict_with_extra_specs(result) @require_context def instance_type_get_by_name(context, name, session=None): """Returns a dict describing specific instance_type""" result = _instance_type_get_query(context, session=session).\ filter_by(name=name).\ first() if not result: raise exception.InstanceTypeNotFoundByName(instance_type_name=name) return _dict_with_extra_specs(result) @require_context def instance_type_get_by_flavor_id(context, flavor_id, session=None): """Returns a dict describing specific flavor_id""" result = _instance_type_get_query(context, session=session).\ filter_by(flavorid=flavor_id).\ first() if not result: raise exception.FlavorNotFound(flavor_id=flavor_id) return _dict_with_extra_specs(result) @require_admin_context def instance_type_destroy(context, name): """Marks specific instance_type as deleted""" session = get_session() with session.begin(): instance_type_ref = instance_type_get_by_name(context, name, session=session) instance_type_id = instance_type_ref['id'] session.query(models.InstanceTypes).\ filter_by(id=instance_type_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.InstanceTypeExtraSpecs).\ filter_by(instance_type_id=instance_type_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) #################### @require_admin_context def cell_create(context, values): cell = models.Cell() cell.update(values) cell.save() return cell def _cell_get_by_id_query(context, cell_id, session=None): return model_query(context, models.Cell, session=session).\ filter_by(id=cell_id) @require_admin_context def cell_update(context, cell_id, values): cell = cell_get(context, cell_id) cell.update(values) cell.save() return cell @require_admin_context def cell_delete(context, cell_id): session = get_session() with session.begin(): _cell_get_by_id_query(context, cell_id, session=session).\ delete() @require_admin_context def cell_get(context, cell_id): result = _cell_get_by_id_query(context, cell_id).first() if not result: raise exception.CellNotFound(cell_id=cell_id) return result @require_admin_context def cell_get_all(context): return model_query(context, models.Cell, read_deleted="no").all() #################### def _instance_metadata_get_query(context, instance_id, session=None): return model_query(context, models.InstanceMetadata, session=session, read_deleted="no").\ filter_by(instance_id=instance_id) @require_context @require_instance_exists def instance_metadata_get(context, instance_id): rows = _instance_metadata_get_query(context, instance_id).all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context @require_instance_exists def instance_metadata_delete(context, instance_id, key): _instance_metadata_get_query(context, instance_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context @require_instance_exists def instance_metadata_get_item(context, instance_id, key, session=None): result = _instance_metadata_get_query( context, instance_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.InstanceMetadataNotFound(metadata_key=key, instance_id=instance_id) return result @require_context @require_instance_exists def instance_metadata_update(context, instance_id, metadata, delete): session = get_session() # Set existing metadata to deleted if delete argument is True if delete: original_metadata = instance_metadata_get(context, instance_id) for meta_key, meta_value in original_metadata.iteritems(): if meta_key not in metadata: meta_ref = instance_metadata_get_item(context, instance_id, meta_key, session) meta_ref.update({'deleted': True}) meta_ref.save(session=session) meta_ref = None # Now update all existing items with new values, or create new meta objects for meta_key, meta_value in metadata.iteritems(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = instance_metadata_get_item(context, instance_id, meta_key, session) except exception.InstanceMetadataNotFound, e: meta_ref = models.InstanceMetadata() item.update({"key": meta_key, "instance_id": instance_id}) meta_ref.update(item) meta_ref.save(session=session) return metadata #################### @require_admin_context def agent_build_create(context, values): agent_build_ref = models.AgentBuild() agent_build_ref.update(values) agent_build_ref.save() return agent_build_ref @require_admin_context def agent_build_get_by_triple(context, hypervisor, os, architecture, session=None): return model_query(context, models.AgentBuild, session=session, read_deleted="no").\ filter_by(hypervisor=hypervisor).\ filter_by(os=os).\ filter_by(architecture=architecture).\ first() @require_admin_context def agent_build_get_all(context): return model_query(context, models.AgentBuild, read_deleted="no").\ all() @require_admin_context def agent_build_destroy(context, agent_build_id): session = get_session() with session.begin(): model_query(context, models.AgentBuild, session=session, read_deleted="yes").\ filter_by(id=agent_build_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context def agent_build_update(context, agent_build_id, values): session = get_session() with session.begin(): agent_build_ref = model_query(context, models.AgentBuild, session=session, read_deleted="yes").\ filter_by(id=agent_build_id).\ first() agent_build_ref.update(values) agent_build_ref.save(session=session) #################### @require_context def bw_usage_get_by_macs(context, macs, start_period): return model_query(context, models.BandwidthUsage, read_deleted="yes").\ filter(models.BandwidthUsage.mac.in_(macs)).\ filter_by(start_period=start_period).\ all() @require_context def bw_usage_update(context, mac, start_period, bw_in, bw_out, session=None): if not session: session = get_session() with session.begin(): bwusage = model_query(context, models.BandwidthUsage, session=session, read_deleted="yes").\ filter_by(start_period=start_period).\ filter_by(mac=mac).\ first() if not bwusage: bwusage = models.BandwidthUsage() bwusage.start_period = start_period bwusage.mac = mac bwusage.last_refreshed = utils.utcnow() bwusage.bw_in = bw_in bwusage.bw_out = bw_out bwusage.save(session=session) #################### def _instance_type_extra_specs_get_query(context, instance_type_id, session=None): return model_query(context, models.InstanceTypeExtraSpecs, session=session, read_deleted="no").\ filter_by(instance_type_id=instance_type_id) @require_context def instance_type_extra_specs_get(context, instance_type_id): rows = _instance_type_extra_specs_get_query( context, instance_type_id).\ all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context def instance_type_extra_specs_delete(context, instance_type_id, key): _instance_type_extra_specs_get_query( context, instance_type_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def instance_type_extra_specs_get_item(context, instance_type_id, key, session=None): result = _instance_type_extra_specs_get_query( context, instance_type_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.InstanceTypeExtraSpecsNotFound( extra_specs_key=key, instance_type_id=instance_type_id) return result @require_context def instance_type_extra_specs_update_or_create(context, instance_type_id, specs): session = get_session() spec_ref = None for key, value in specs.iteritems(): try: spec_ref = instance_type_extra_specs_get_item( context, instance_type_id, key, session) except exception.InstanceTypeExtraSpecsNotFound, e: spec_ref = models.InstanceTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "instance_type_id": instance_type_id, "deleted": 0}) spec_ref.save(session=session) return specs ################## @require_admin_context def volume_type_create(context, values): """Create a new instance type. In order to pass in extra specs, the values dict should contain a 'extra_specs' key/value pair: {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ session = get_session() with session.begin(): try: volume_type_get_by_name(context, values['name'], session) raise exception.VolumeTypeExists(name=values['name']) except exception.VolumeTypeNotFoundByName: pass try: specs = values.get('extra_specs') values['extra_specs'] = _metadata_refs(values.get('extra_specs'), models.VolumeTypeExtraSpecs) volume_type_ref = models.VolumeTypes() volume_type_ref.update(values) volume_type_ref.save() except Exception, e: raise exception.DBError(e) return volume_type_ref @require_context def volume_type_get_all(context, inactive=False, filters=None): """ Returns a dict describing all volume_types with name as key. """ filters = filters or {} read_deleted = "yes" if inactive else "no" rows = model_query(context, models.VolumeTypes, read_deleted=read_deleted).\ options(joinedload('extra_specs')).\ order_by("name").\ all() # TODO(sirp): this patern of converting rows to a result with extra_specs # is repeated quite a bit, might be worth creating a method for it result = {} for row in rows: result[row['name']] = _dict_with_extra_specs(row) return result @require_context def volume_type_get(context, id, session=None): """Returns a dict describing specific volume_type""" result = model_query(context, models.VolumeTypes, session=session).\ options(joinedload('extra_specs')).\ filter_by(id=id).\ first() if not result: raise exception.VolumeTypeNotFound(volume_type=id) return _dict_with_extra_specs(result) @require_context def volume_type_get_by_name(context, name, session=None): """Returns a dict describing specific volume_type""" result = model_query(context, models.VolumeTypes, session=session).\ options(joinedload('extra_specs')).\ filter_by(name=name).\ first() if not result: raise exception.VolumeTypeNotFoundByName(volume_type_name=name) else: return _dict_with_extra_specs(result) @require_admin_context def volume_type_destroy(context, name): session = get_session() with session.begin(): volume_type_ref = volume_type_get_by_name(context, name, session=session) volume_type_id = volume_type_ref['id'] session.query(models.VolumeTypes).\ filter_by(id=volume_type_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.VolumeTypeExtraSpecs).\ filter_by(volume_type_id=volume_type_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) #################### def _volume_type_extra_specs_query(context, volume_type_id, session=None): return model_query(context, models.VolumeTypeExtraSpecs, session=session, read_deleted="no").\ filter_by(volume_type_id=volume_type_id) @require_context def volume_type_extra_specs_get(context, volume_type_id): rows = _volume_type_extra_specs_query(context, volume_type_id).\ all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context def volume_type_extra_specs_delete(context, volume_type_id, key): _volume_type_extra_specs_query(context, volume_type_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def volume_type_extra_specs_get_item(context, volume_type_id, key, session=None): result = _volume_type_extra_specs_query( context, volume_type_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.VolumeTypeExtraSpecsNotFound( extra_specs_key=key, volume_type_id=volume_type_id) return result @require_context def volume_type_extra_specs_update_or_create(context, volume_type_id, specs): session = get_session() spec_ref = None for key, value in specs.iteritems(): try: spec_ref = volume_type_extra_specs_get_item( context, volume_type_id, key, session) except exception.VolumeTypeExtraSpecsNotFound, e: spec_ref = models.VolumeTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "volume_type_id": volume_type_id, "deleted": 0}) spec_ref.save(session=session) return specs #################### def s3_image_get(context, image_id): """Find local s3 image represented by the provided id""" result = model_query(context, models.S3Image, read_deleted="yes").\ filter_by(id=image_id).\ first() if not result: raise exception.ImageNotFound(image_id=image_id) return result def s3_image_get_by_uuid(context, image_uuid): """Find local s3 image represented by the provided uuid""" result = model_query(context, models.S3Image, read_deleted="yes").\ filter_by(uuid=image_uuid).\ first() if not result: raise exception.ImageNotFound(image_id=image_uuid) return result def s3_image_create(context, image_uuid): """Create local s3 image represented by provided uuid""" try: s3_image_ref = models.S3Image() s3_image_ref.update({'uuid': image_uuid}) s3_image_ref.save() except Exception, e: raise exception.DBError(e) return s3_image_ref #################### @require_admin_context def sm_backend_conf_create(context, values): backend_conf = models.SMBackendConf() backend_conf.update(values) backend_conf.save() return backend_conf @require_admin_context def sm_backend_conf_update(context, sm_backend_id, values): session = get_session() with session.begin(): backend_conf = model_query(context, models.SMBackendConf, session=session, read_deleted="yes").\ filter_by(id=sm_backend_id).\ first() if not backend_conf: raise exception.NotFound( _("No backend config with id %(sm_backend_id)s") % locals()) backend_conf.update(values) backend_conf.save(session=session) return backend_conf @require_admin_context def sm_backend_conf_delete(context, sm_backend_id): # FIXME(sirp): for consistency, shouldn't this just mark as deleted with # `purge` actually deleting the record? session = get_session() with session.begin(): model_query(context, models.SMBackendConf, session=session, read_deleted="yes").\ filter_by(id=sm_backend_id).\ delete() @require_admin_context def sm_backend_conf_get(context, sm_backend_id): result = model_query(context, models.SMBackendConf, read_deleted="yes").\ filter_by(id=sm_backend_id).\ first() if not result: raise exception.NotFound(_("No backend config with id " "%(sm_backend_id)s") % locals()) return result @require_admin_context def sm_backend_conf_get_by_sr(context, sr_uuid): session = get_session() return model_query(context, models.SMBackendConf, read_deleted="yes").\ filter_by(sr_uuid=sr_uuid).\ first() @require_admin_context def sm_backend_conf_get_all(context): return model_query(context, models.SMBackendConf, read_deleted="yes").\ all() #################### def _sm_flavor_get_query(context, sm_flavor_label, session=None): return model_query(context, models.SMFlavors, session=session, read_deleted="yes").\ filter_by(label=sm_flavor_label) @require_admin_context def sm_flavor_create(context, values): sm_flavor = models.SMFlavors() sm_flavor.update(values) sm_flavor.save() return sm_flavor @require_admin_context def sm_flavor_update(context, sm_flavor_label, values): sm_flavor = sm_flavor_get(context, sm_flavor_label) sm_flavor.update(values) sm_flavor.save() return sm_flavor @require_admin_context def sm_flavor_delete(context, sm_flavor_label): session = get_session() with session.begin(): _sm_flavor_get_query(context, sm_flavor_label).delete() @require_admin_context def sm_flavor_get(context, sm_flavor_label): result = _sm_flavor_get_query(context, sm_flavor_label).first() if not result: raise exception.NotFound( _("No sm_flavor called %(sm_flavor)s") % locals()) return result @require_admin_context def sm_flavor_get_all(context): return model_query(context, models.SMFlavors, read_deleted="yes").all() ############################### def _sm_volume_get_query(context, volume_id, session=None): return model_query(context, models.SMVolume, session=session, read_deleted="yes").\ filter_by(id=volume_id) def sm_volume_create(context, values): sm_volume = models.SMVolume() sm_volume.update(values) sm_volume.save() return sm_volume def sm_volume_update(context, volume_id, values): sm_volume = sm_volume_get(context, volume_id) sm_volume.update(values) sm_volume.save() return sm_volume def sm_volume_delete(context, volume_id): session = get_session() with session.begin(): _sm_volume_get_query(context, volume_id, session=session).delete() def sm_volume_get(context, volume_id): result = _sm_volume_get_query(context, volume_id).first() if not result: raise exception.NotFound( _("No sm_volume with id %(volume_id)s") % locals()) return result def sm_volume_get_all(context): return model_query(context, models.SMVolume, read_deleted="yes").all() ################ def _aggregate_get_query(context, model_class, id_field, id, session=None, read_deleted='yes'): return model_query(context, model_class, session=session, read_deleted=read_deleted).filter(id_field == id) @require_admin_context def aggregate_create(context, values, metadata=None): session = get_session() aggregate = _aggregate_get_query(context, models.Aggregate, models.Aggregate.name, values['name'], session=session, read_deleted='yes').first() values.setdefault('operational_state', aggregate_states.CREATED) if not aggregate: aggregate = models.Aggregate() aggregate.update(values) aggregate.save(session=session) elif aggregate.deleted: values['deleted'] = False values['deleted_at'] = None aggregate.update(values) aggregate.save(session=session) else: raise exception.AggregateNameExists(aggregate_name=values['name']) if metadata: aggregate_metadata_add(context, aggregate.id, metadata) return aggregate @require_admin_context def aggregate_get(context, aggregate_id, read_deleted='no'): aggregate = _aggregate_get_query(context, models.Aggregate, models.Aggregate.id, aggregate_id, read_deleted=read_deleted).first() if not aggregate: raise exception.AggregateNotFound(aggregate_id=aggregate_id) return aggregate @require_admin_context def aggregate_get_by_host(context, host, read_deleted='no'): aggregate_host = _aggregate_get_query(context, models.AggregateHost, models.AggregateHost.host, host, read_deleted='no').first() if not aggregate_host: raise exception.AggregateHostNotFound(host=host) return aggregate_get(context, aggregate_host.aggregate_id, read_deleted) @require_admin_context def aggregate_update(context, aggregate_id, values): session = get_session() aggregate = _aggregate_get_query(context, models.Aggregate, models.Aggregate.id, aggregate_id, session=session, read_deleted='no').first() if aggregate: metadata = values.get('metadata') if metadata is not None: aggregate_metadata_add(context, aggregate_id, values.pop('metadata'), set_delete=True) with session.begin(): aggregate.update(values) aggregate.save(session=session) values['metadata'] = metadata return aggregate else: raise exception.AggregateNotFound(aggregate_id=aggregate_id) @require_admin_context def aggregate_delete(context, aggregate_id): query = _aggregate_get_query(context, models.Aggregate, models.Aggregate.id, aggregate_id, read_deleted='no') if query.first(): query.update({'deleted': True, 'deleted_at': utils.utcnow(), 'operational_state': aggregate_states.DISMISSED, 'updated_at': literal_column('updated_at')}) else: raise exception.AggregateNotFound(aggregate_id=aggregate_id) @require_admin_context def aggregate_get_all(context, read_deleted='yes'): return model_query(context, models.Aggregate, read_deleted=read_deleted).all() @require_admin_context @require_aggregate_exists def aggregate_metadata_get(context, aggregate_id, read_deleted='no'): rows = model_query(context, models.AggregateMetadata, read_deleted=read_deleted).\ filter_by(aggregate_id=aggregate_id).all() return dict([(r['key'], r['value']) for r in rows]) @require_admin_context @require_aggregate_exists def aggregate_metadata_delete(context, aggregate_id, key): query = _aggregate_get_query(context, models.AggregateMetadata, models.AggregateMetadata.aggregate_id, aggregate_id, read_deleted='no').\ filter_by(key=key) if query.first(): query.update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) else: raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id, metadata_key=key) @require_admin_context @require_aggregate_exists def aggregate_metadata_get_item(context, aggregate_id, key, session=None, read_deleted='yes'): result = _aggregate_get_query(context, models.AggregateMetadata, models.AggregateMetadata.aggregate_id, aggregate_id, session=session, read_deleted=read_deleted).\ filter_by(key=key).first() if not result: raise exception.AggregateMetadataNotFound(metadata_key=key, aggregate_id=aggregate_id) return result @require_admin_context @require_aggregate_exists def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False): session = get_session() if set_delete: original_metadata = aggregate_metadata_get(context, aggregate_id) for meta_key, meta_value in original_metadata.iteritems(): if meta_key not in metadata: meta_ref = aggregate_metadata_get_item(context, aggregate_id, meta_key, session) meta_ref.update({'deleted': True}) meta_ref.save(session=session) meta_ref = None for meta_key, meta_value in metadata.iteritems(): item = {"value": meta_value} try: meta_ref = aggregate_metadata_get_item(context, aggregate_id, meta_key, session) if meta_ref.deleted: item.update({'deleted': False, 'deleted_at': None}) except exception.AggregateMetadataNotFound: meta_ref = models.AggregateMetadata() item.update({"key": meta_key, "aggregate_id": aggregate_id}) meta_ref.update(item) meta_ref.save(session=session) return metadata @require_admin_context @require_aggregate_exists def aggregate_host_get_all(context, aggregate_id, read_deleted='yes'): rows = model_query(context, models.AggregateHost, read_deleted=read_deleted).\ filter_by(aggregate_id=aggregate_id).all() return [r.host for r in rows] @require_admin_context @require_aggregate_exists def aggregate_host_delete(context, aggregate_id, host): query = _aggregate_get_query(context, models.AggregateHost, models.AggregateHost.aggregate_id, aggregate_id, read_deleted='no').filter_by(host=host) if query.first(): query.update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) else: raise exception.AggregateHostNotFound(aggregate_id=aggregate_id, host=host) @require_admin_context @require_aggregate_exists def aggregate_host_add(context, aggregate_id, host): session = get_session() host_ref = _aggregate_get_query(context, models.AggregateHost, models.AggregateHost.aggregate_id, aggregate_id, session=session, read_deleted='yes').\ filter_by(host=host).first() if not host_ref: try: host_ref = models.AggregateHost() values = {"host": host, "aggregate_id": aggregate_id, } host_ref.update(values) host_ref.save(session=session) except exception.DBError: raise exception.AggregateHostConflict(host=host) elif host_ref.deleted: host_ref.update({'deleted': False, 'deleted_at': None}) host_ref.save(session=session) else: raise exception.AggregateHostExists(host=host, aggregate_id=aggregate_id) return host_ref ################ def instance_fault_create(context, values): """Create a new InstanceFault.""" fault_ref = models.InstanceFault() fault_ref.update(values) fault_ref.save() return dict(fault_ref.iteritems()) def instance_fault_get_by_instance_uuids(context, instance_uuids): """Get all instance faults for the provided instance_uuids.""" rows = model_query(context, models.InstanceFault, read_deleted='no').\ filter(models.InstanceFault.instance_uuid.in_( instance_uuids)).\ order_by(desc("created_at")).\ all() output = {} for instance_uuid in instance_uuids: output[instance_uuid] = [] for row in rows: data = dict(row.iteritems()) output[row['instance_uuid']].append(data) return output
./CrossVul/dataset_final_sorted/CWE-264/py/good_3634_4
crossvul-python_data_bad_3694_1
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import routes from keystone import catalog from keystone import exception from keystone import identity from keystone import policy from keystone import token from keystone.common import logging from keystone.common import utils from keystone.common import wsgi class AdminRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('admin') mapper.connect('/', controller=version_controller, action='get_version') # Token Operations auth_controller = TokenController() mapper.connect('/tokens', controller=auth_controller, action='authenticate', conditions=dict(method=['POST'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token', conditions=dict(method=['GET'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token_head', conditions=dict(method=['HEAD'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='delete_token', conditions=dict(method=['DELETE'])) mapper.connect('/tokens/{token_id}/endpoints', controller=auth_controller, action='endpoints', conditions=dict(method=['GET'])) # Miscellaneous Operations extensions_controller = AdminExtensionsController() mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) identity_router = identity.AdminRouter() routers = [identity_router] super(AdminRouter, self).__init__(mapper, routers) class PublicRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('public') mapper.connect('/', controller=version_controller, action='get_version') # Token Operations auth_controller = TokenController() mapper.connect('/tokens', controller=auth_controller, action='authenticate', conditions=dict(method=['POST'])) # Miscellaneous extensions_controller = PublicExtensionsController() mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) identity_router = identity.PublicRouter() routers = [identity_router] super(PublicRouter, self).__init__(mapper, routers) class PublicVersionRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('public') mapper.connect('/', controller=version_controller, action='get_versions') routers = [] super(PublicVersionRouter, self).__init__(mapper, routers) class AdminVersionRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('admin') mapper.connect('/', controller=version_controller, action='get_versions') routers = [] super(AdminVersionRouter, self).__init__(mapper, routers) class VersionController(wsgi.Application): def __init__(self, version_type): self.catalog_api = catalog.Manager() self.url_key = "%sURL" % version_type super(VersionController, self).__init__() def _get_identity_url(self, context): catalog_ref = self.catalog_api.get_catalog( context=context, user_id=None, tenant_id=None) for region, region_ref in catalog_ref.iteritems(): for service, service_ref in region_ref.iteritems(): if service == 'identity': return service_ref[self.url_key] raise exception.NotImplemented() def _get_versions_list(self, context): """The list of versions is dependent on the context.""" identity_url = self._get_identity_url(context) if not identity_url.endswith('/'): identity_url = identity_url + '/' versions = {} versions['v2.0'] = { "id": "v2.0", "status": "beta", "updated": "2011-11-19T00:00:00Z", "links": [ { "rel": "self", "href": identity_url, }, { "rel": "describedby", "type": "text/html", "href": "http://docs.openstack.org/api/openstack-" "identity-service/2.0/content/" }, { "rel": "describedby", "type": "application/pdf", "href": "http://docs.openstack.org/api/openstack-" "identity-service/2.0/identity-dev-guide-" "2.0.pdf" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.identity-v2.0" "+json" }, { "base": "application/xml", "type": "application/vnd.openstack.identity-v2.0" "+xml" } ] } return versions def get_versions(self, context): versions = self._get_versions_list(context) return wsgi.render_response(status=(300, 'Multiple Choices'), body={ "versions": { "values": versions.values() } }) def get_version(self, context): versions = self._get_versions_list(context) return wsgi.render_response(body={ "version": versions['v2.0'] }) class NoopController(wsgi.Application): def __init__(self): super(NoopController, self).__init__() def noop(self, context): return {} class TokenController(wsgi.Application): def __init__(self): self.catalog_api = catalog.Manager() self.identity_api = identity.Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(TokenController, self).__init__() def authenticate(self, context, auth=None): """Authenticate credentials and return a token. Accept auth as a dict that looks like:: { "auth":{ "passwordCredentials":{ "username":"test_user", "password":"mypass" }, "tenantName":"customer-x" } } In this case, tenant is optional, if not provided the token will be considered "unscoped" and can later be used to get a scoped token. Alternatively, this call accepts auth with only a token and tenant that will return a token that is scoped to that tenant. """ token_id = uuid.uuid4().hex if 'passwordCredentials' in auth: username = auth['passwordCredentials'].get('username', '') password = auth['passwordCredentials'].get('password', '') tenant_name = auth.get('tenantName', None) user_id = auth['passwordCredentials'].get('userId', None) if username: user_ref = self.identity_api.get_user_by_name( context=context, user_name=username) if user_ref: user_id = user_ref['id'] # more compat tenant_id = auth.get('tenantId', None) if tenant_name: tenant_ref = self.identity_api.get_tenant_by_name( context=context, tenant_name=tenant_name) if tenant_ref: tenant_id = tenant_ref['id'] try: auth_info = self.identity_api.authenticate(context=context, user_id=user_id, password=password, tenant_id=tenant_id) (user_ref, tenant_ref, metadata_ref) = auth_info # If the user is disabled don't allow them to authenticate if not user_ref.get('enabled', True): raise exception.Forbidden(message='User has been disabled') except AssertionError as e: raise exception.Unauthorized(e.message) token_ref = self.token_api.create_token( context, token_id, dict(id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref)) if tenant_ref: catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) else: catalog_ref = {} elif 'token' in auth: token = auth['token'].get('id', None) tenant_name = auth.get('tenantName') # more compat if tenant_name: tenant_ref = self.identity_api.get_tenant_by_name( context=context, tenant_name=tenant_name) tenant_id = tenant_ref['id'] else: tenant_id = auth.get('tenantId', None) try: old_token_ref = self.token_api.get_token(context=context, token_id=token) except exception.NotFound: raise exception.Unauthorized() user_ref = old_token_ref['user'] tenants = self.identity_api.get_tenants_for_user(context, user_ref['id']) if tenant_id: assert tenant_id in tenants tenant_ref = self.identity_api.get_tenant(context=context, tenant_id=tenant_id) if tenant_ref: metadata_ref = self.identity_api.get_metadata( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id']) catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) else: metadata_ref = {} catalog_ref = {} token_ref = self.token_api.create_token( context, token_id, dict(id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref)) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata roles_ref = [] for role_id in metadata_ref.get('roles', []): roles_ref.append(self.identity_api.get_role(context, role_id)) logging.debug('TOKEN_REF %s', token_ref) return self._format_authenticate(token_ref, roles_ref, catalog_ref) def _get_token_ref(self, context, token_id, belongs_to=None): """Returns a token if a valid one exists. Optionally, limited to a token owned by a specific tenant. """ # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) token_ref = self.token_api.get_token(context=context, token_id=token_id) if belongs_to: assert token_ref['tenant']['id'] == belongs_to return token_ref # admin only def validate_token_head(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Identical to ``validate_token``, except does not return a response. """ belongs_to = context['query_string'].get("belongsTo") assert self._get_token_ref(context, token_id, belongs_to) # admin only def validate_token(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Returns metadata about the token along any associated roles. """ belongs_to = context['query_string'].get("belongsTo") token_ref = self._get_token_ref(context, token_id, belongs_to) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata metadata_ref = token_ref['metadata'] roles_ref = [] for role_id in metadata_ref.get('roles', []): roles_ref.append(self.identity_api.get_role(context, role_id)) # Get a service catalog if belongs_to is not none # This is needed for on-behalf-of requests catalog_ref = None if belongs_to is not None: catalog_ref = self.catalog_api.get_catalog( context=context, user_id=token_ref['user']['id'], tenant_id=token_ref['tenant']['id'], metadata=metadata_ref) return self._format_token(token_ref, roles_ref, catalog_ref) def delete_token(self, context, token_id): """Delete a token, effectively invalidating it for authz.""" # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) self.token_api.delete_token(context=context, token_id=token_id) def endpoints(self, context, token_id): """Return a list of endpoints available to the token.""" raise exception.NotImplemented() def _format_authenticate(self, token_ref, roles_ref, catalog_ref): o = self._format_token(token_ref, roles_ref) o['access']['serviceCatalog'] = self._format_catalog(catalog_ref) return o def _format_token(self, token_ref, roles_ref, catalog_ref=None): user_ref = token_ref['user'] metadata_ref = token_ref['metadata'] expires = token_ref['expires'] if expires is not None: expires = utils.isotime(expires) o = {'access': {'token': {'id': token_ref['id'], 'expires': expires, }, 'user': {'id': user_ref['id'], 'name': user_ref['name'], 'username': user_ref['name'], 'roles': roles_ref, 'roles_links': metadata_ref.get('roles_links', []) } } } if 'tenant' in token_ref and token_ref['tenant']: token_ref['tenant']['enabled'] = True o['access']['token']['tenant'] = token_ref['tenant'] if catalog_ref is not None: o['access']['serviceCatalog'] = self._format_catalog(catalog_ref) return o def _format_catalog(self, catalog_ref): """Munge catalogs from internal to output format Internal catalogs look like: {$REGION: { {$SERVICE: { $key1: $value1, ... } } } The legacy api wants them to look like [{'name': $SERVICE[name], 'type': $SERVICE, 'endpoints': [{ 'tenantId': $tenant_id, ... 'region': $REGION, }], 'endpoints_links': [], }] """ if not catalog_ref: return {} services = {} for region, region_ref in catalog_ref.iteritems(): for service, service_ref in region_ref.iteritems(): new_service_ref = services.get(service, {}) new_service_ref['name'] = service_ref.pop('name') new_service_ref['type'] = service new_service_ref['endpoints_links'] = [] service_ref['region'] = region endpoints_ref = new_service_ref.get('endpoints', []) endpoints_ref.append(service_ref) new_service_ref['endpoints'] = endpoints_ref services[service] = new_service_ref return services.values() class ExtensionsController(wsgi.Application): """Base extensions controller to be extended by public and admin API's.""" def __init__(self, extensions=None): super(ExtensionsController, self).__init__() self.extensions = extensions or {} def get_extensions_info(self, context): return {'extensions': {'values': self.extensions.values()}} def get_extension_info(self, context, extension_alias): try: return {'extension': self.extensions[extension_alias]} except KeyError: raise exception.NotFound(target=extension_alias) class PublicExtensionsController(ExtensionsController): pass class AdminExtensionsController(ExtensionsController): def __init__(self, *args, **kwargs): super(AdminExtensionsController, self).__init__(*args, **kwargs) # TODO(dolph): Extensions should obviously provide this information # themselves, but hardcoding it here allows us to match # the API spec in the short term with minimal complexity. self.extensions['OS-KSADM'] = { 'name': 'Openstack Keystone Admin', 'namespace': 'http://docs.openstack.org/identity/api/ext/' 'OS-KSADM/v1.0', 'alias': 'OS-KSADM', 'updated': '2011-08-19T13:25:27-06:00', 'description': 'Openstack extensions to Keystone v2.0 API ' 'enabling Admin Operations.', 'links': [ { 'rel': 'describedby', # TODO(dolph): link needs to be revised after # bug 928059 merges 'type': 'text/html', 'href': ('https://github.com/openstack/' 'identity-api'), } ] } @logging.fail_gracefully def public_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return PublicRouter() @logging.fail_gracefully def admin_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AdminRouter() @logging.fail_gracefully def public_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return PublicVersionRouter() @logging.fail_gracefully def admin_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AdminVersionRouter()
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3694_1
crossvul-python_data_bad_2014_1
# -*- coding: utf-8 -*- """ jinja2.bccache ~~~~~~~~~~~~~~ This module implements the bytecode cache system Jinja is optionally using. This is useful if you have very complex template situations and the compiliation of all those templates slow down your application too much. Situations where this is useful are often forking web applications that are initialized on the first request. :copyright: (c) 2010 by the Jinja Team. :license: BSD. """ from os import path, listdir import sys import marshal import tempfile import fnmatch from hashlib import sha1 from jinja2.utils import open_if_exists from jinja2._compat import BytesIO, pickle, PY2, text_type # marshal works better on 3.x, one hack less required if not PY2: marshal_dump = marshal.dump marshal_load = marshal.load else: def marshal_dump(code, f): if isinstance(f, file): marshal.dump(code, f) else: f.write(marshal.dumps(code)) def marshal_load(f): if isinstance(f, file): return marshal.load(f) return marshal.loads(f.read()) bc_version = 2 # magic version used to only change with new jinja versions. With 2.6 # we change this to also take Python version changes into account. The # reason for this is that Python tends to segfault if fed earlier bytecode # versions because someone thought it would be a good idea to reuse opcodes # or make Python incompatible with earlier versions. bc_magic = 'j2'.encode('ascii') + \ pickle.dumps(bc_version, 2) + \ pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1]) class Bucket(object): """Buckets are used to store the bytecode for one template. It's created and initialized by the bytecode cache and passed to the loading functions. The buckets get an internal checksum from the cache assigned and use this to automatically reject outdated cache material. Individual bytecode cache subclasses don't have to care about cache invalidation. """ def __init__(self, environment, key, checksum): self.environment = environment self.key = key self.checksum = checksum self.reset() def reset(self): """Resets the bucket (unloads the bytecode).""" self.code = None def load_bytecode(self, f): """Loads bytecode from a file or file like object.""" # make sure the magic header is correct magic = f.read(len(bc_magic)) if magic != bc_magic: self.reset() return # the source code of the file changed, we need to reload checksum = pickle.load(f) if self.checksum != checksum: self.reset() return self.code = marshal_load(f) def write_bytecode(self, f): """Dump the bytecode into the file or file like object passed.""" if self.code is None: raise TypeError('can\'t write empty bucket') f.write(bc_magic) pickle.dump(self.checksum, f, 2) marshal_dump(self.code, f) def bytecode_from_string(self, string): """Load bytecode from a string.""" self.load_bytecode(BytesIO(string)) def bytecode_to_string(self): """Return the bytecode as string.""" out = BytesIO() self.write_bytecode(out) return out.getvalue() class BytecodeCache(object): """To implement your own bytecode cache you have to subclass this class and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of these methods are passed a :class:`~jinja2.bccache.Bucket`. A very basic bytecode cache that saves the bytecode on the file system:: from os import path class MyCache(BytecodeCache): def __init__(self, directory): self.directory = directory def load_bytecode(self, bucket): filename = path.join(self.directory, bucket.key) if path.exists(filename): with open(filename, 'rb') as f: bucket.load_bytecode(f) def dump_bytecode(self, bucket): filename = path.join(self.directory, bucket.key) with open(filename, 'wb') as f: bucket.write_bytecode(f) A more advanced version of a filesystem based bytecode cache is part of Jinja2. """ def load_bytecode(self, bucket): """Subclasses have to override this method to load bytecode into a bucket. If they are not able to find code in the cache for the bucket, it must not do anything. """ raise NotImplementedError() def dump_bytecode(self, bucket): """Subclasses have to override this method to write the bytecode from a bucket back to the cache. If it unable to do so it must not fail silently but raise an exception. """ raise NotImplementedError() def clear(self): """Clears the cache. This method is not used by Jinja2 but should be implemented to allow applications to clear the bytecode cache used by a particular environment. """ def get_cache_key(self, name, filename=None): """Returns the unique hash key for this template name.""" hash = sha1(name.encode('utf-8')) if filename is not None: filename = '|' + filename if isinstance(filename, text_type): filename = filename.encode('utf-8') hash.update(filename) return hash.hexdigest() def get_source_checksum(self, source): """Returns a checksum for the source.""" return sha1(source.encode('utf-8')).hexdigest() def get_bucket(self, environment, name, filename, source): """Return a cache bucket for the given template. All arguments are mandatory but filename may be `None`. """ key = self.get_cache_key(name, filename) checksum = self.get_source_checksum(source) bucket = Bucket(environment, key, checksum) self.load_bytecode(bucket) return bucket def set_bucket(self, bucket): """Put the bucket into the cache.""" self.dump_bytecode(bucket) class FileSystemBytecodeCache(BytecodeCache): """A bytecode cache that stores bytecode on the filesystem. It accepts two arguments: The directory where the cache items are stored and a pattern string that is used to build the filename. If no directory is specified the system temporary items folder is used. The pattern can be used to have multiple separate caches operate on the same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s`` is replaced with the cache key. >>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache') This bytecode cache supports clearing of the cache using the clear method. """ def __init__(self, directory=None, pattern='__jinja2_%s.cache'): if directory is None: directory = tempfile.gettempdir() self.directory = directory self.pattern = pattern def _get_cache_filename(self, bucket): return path.join(self.directory, self.pattern % bucket.key) def load_bytecode(self, bucket): f = open_if_exists(self._get_cache_filename(bucket), 'rb') if f is not None: try: bucket.load_bytecode(f) finally: f.close() def dump_bytecode(self, bucket): f = open(self._get_cache_filename(bucket), 'wb') try: bucket.write_bytecode(f) finally: f.close() def clear(self): # imported lazily here because google app-engine doesn't support # write access on the file system and the function does not exist # normally. from os import remove files = fnmatch.filter(listdir(self.directory), self.pattern % '*') for filename in files: try: remove(path.join(self.directory, filename)) except OSError: pass class MemcachedBytecodeCache(BytecodeCache): """This class implements a bytecode cache that uses a memcache cache for storing the information. It does not enforce a specific memcache library (tummy's memcache or cmemcache) but will accept any class that provides the minimal interface required. Libraries compatible with this class: - `werkzeug <http://werkzeug.pocoo.org/>`_.contrib.cache - `python-memcached <http://www.tummy.com/Community/software/python-memcached/>`_ - `cmemcache <http://gijsbert.org/cmemcache/>`_ (Unfortunately the django cache interface is not compatible because it does not support storing binary data, only unicode. You can however pass the underlying cache client to the bytecode cache which is available as `django.core.cache.cache._client`.) The minimal interface for the client passed to the constructor is this: .. class:: MinimalClientInterface .. method:: set(key, value[, timeout]) Stores the bytecode in the cache. `value` is a string and `timeout` the timeout of the key. If timeout is not provided a default timeout or no timeout should be assumed, if it's provided it's an integer with the number of seconds the cache item should exist. .. method:: get(key) Returns the value for the cache key. If the item does not exist in the cache the return value must be `None`. The other arguments to the constructor are the prefix for all keys that is added before the actual cache key and the timeout for the bytecode in the cache system. We recommend a high (or no) timeout. This bytecode cache does not support clearing of used items in the cache. The clear method is a no-operation function. .. versionadded:: 2.7 Added support for ignoring memcache errors through the `ignore_memcache_errors` parameter. """ def __init__(self, client, prefix='jinja2/bytecode/', timeout=None, ignore_memcache_errors=True): self.client = client self.prefix = prefix self.timeout = timeout self.ignore_memcache_errors = ignore_memcache_errors def load_bytecode(self, bucket): try: code = self.client.get(self.prefix + bucket.key) except Exception: if not self.ignore_memcache_errors: raise code = None if code is not None: bucket.bytecode_from_string(code) def dump_bytecode(self, bucket): args = (self.prefix + bucket.key, bucket.bytecode_to_string()) if self.timeout is not None: args += (self.timeout,) try: self.client.set(*args) except Exception: if not self.ignore_memcache_errors: raise
./CrossVul/dataset_final_sorted/CWE-264/py/bad_2014_1
crossvul-python_data_good_3693_1
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime from keystone.common import kvs from keystone import exception from keystone import token class Token(kvs.Base, token.Driver): # Public interface def get_token(self, token_id): token = self.db.get('token-%s' % token_id) if (token and (token['expires'] is None or token['expires'] > datetime.datetime.utcnow())): return token else: raise exception.TokenNotFound(token_id=token_id) def create_token(self, token_id, data): data_copy = copy.deepcopy(data) if 'expires' not in data: data_copy['expires'] = self._get_default_expire_time() self.db.set('token-%s' % token_id, data_copy) return copy.deepcopy(data_copy) def delete_token(self, token_id): try: return self.db.delete('token-%s' % token_id) except KeyError: raise exception.TokenNotFound(token_id=token_id) def list_tokens(self, user_id): tokens = [] now = datetime.datetime.utcnow() for token, user_ref in self.db.items(): if not token.startswith('token-'): continue if 'user' not in user_ref: continue if user_ref['user'].get('id') != user_id: continue if user_ref.get('expires') and user_ref.get('expires') < now: continue tokens.append(token.split('-', 1)[1]) return tokens
./CrossVul/dataset_final_sorted/CWE-264/py/good_3693_1
crossvul-python_data_bad_3725_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Identity service.""" import urllib import urlparse import uuid from keystone.common import logging from keystone.common import manager from keystone.common import wsgi from keystone import config from keystone import exception from keystone import policy from keystone import token CONF = config.CONF LOG = logging.getLogger(__name__) class Manager(manager.Manager): """Default pivot point for the Identity backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ def __init__(self): super(Manager, self).__init__(CONF.identity.driver) class Driver(object): """Interface description for an Identity driver.""" def authenticate(self, user_id=None, tenant_id=None, password=None): """Authenticate a given user, tenant and password. :returns: (user_ref, tenant_ref, metadata_ref) :raises: AssertionError """ raise exception.NotImplemented() def get_tenant(self, tenant_id): """Get a tenant by id. :returns: tenant_ref :raises: keystone.exception.TenantNotFound """ raise exception.NotImplemented() def get_tenant_by_name(self, tenant_name): """Get a tenant by name. :returns: tenant_ref :raises: keystone.exception.TenantNotFound """ raise exception.NotImplemented() def get_user(self, user_id): """Get a user by id. :returns: user_ref :raises: keystone.exception.UserNotFound """ raise exception.NotImplemented() def get_user_by_name(self, user_name): """Get a user by name. :returns: user_ref :raises: keystone.exception.UserNotFound """ raise exception.NotImplemented() def get_role(self, role_id): """Get a role by id. :returns: role_ref :raises: keystone.exception.RoleNotFound """ raise exception.NotImplemented() def list_users(self): """List all users in the system. NOTE(termie): I'd prefer if this listed only the users for a given tenant. :returns: a list of user_refs or an empty list """ raise exception.NotImplemented() def list_roles(self): """List all roles in the system. :returns: a list of role_refs or an empty list. """ raise exception.NotImplemented() # NOTE(termie): seven calls below should probably be exposed by the api # more clearly when the api redesign happens def add_user_to_tenant(self, tenant_id, user_id): """Add user to a tenant without an explicit role relationship. :raises: keystone.exception.TenantNotFound, keystone.exception.UserNotFound """ raise exception.NotImplemented() def remove_user_from_tenant(self, tenant_id, user_id): """Remove user from a tenant without an explicit role relationship. :raises: keystone.exception.TenantNotFound, keystone.exception.UserNotFound """ raise exception.NotImplemented() def get_all_tenants(self): """FIXME(dolph): Lists all tenants in the system? I'm not sure how this is different from get_tenants, why get_tenants isn't documented as part of the driver, or why it's called get_tenants instead of list_tenants (i.e. list_roles and list_users)... :returns: a list of ... FIXME(dolph): tenant_refs or tenant_id's? """ raise exception.NotImplemented() def get_tenant_users(self, tenant_id): """FIXME(dolph): Lists all users with a relationship to the specified tenant? :returns: a list of ... FIXME(dolph): user_refs or user_id's? :raises: keystone.exception.UserNotFound """ raise exception.NotImplemented() def get_tenants_for_user(self, user_id): """Get the tenants associated with a given user. :returns: a list of tenant_id's. :raises: keystone.exception.UserNotFound """ raise exception.NotImplemented() def get_roles_for_user_and_tenant(self, user_id, tenant_id): """Get the roles associated with a user within given tenant. :returns: a list of role ids. :raises: keystone.exception.UserNotFound, keystone.exception.TenantNotFound """ raise exception.NotImplemented() def add_role_to_user_and_tenant(self, user_id, tenant_id, role_id): """Add a role to a user within given tenant. :raises: keystone.exception.UserNotFound, keystone.exception.TenantNotFound, keystone.exception.RoleNotFound """ raise exception.NotImplemented() def remove_role_from_user_and_tenant(self, user_id, tenant_id, role_id): """Remove a role from a user within given tenant. :raises: keystone.exception.UserNotFound, keystone.exception.TenantNotFound, keystone.exception.RoleNotFound """ raise exception.NotImplemented() # user crud def create_user(self, user_id, user): """Creates a new user. :raises: keystone.exception.Conflict """ raise exception.NotImplemented() def update_user(self, user_id, user): """Updates an existing user. :raises: keystone.exception.UserNotFound, keystone.exception.Conflict """ raise exception.NotImplemented() def delete_user(self, user_id): """Deletes an existing user. :raises: keystone.exception.UserNotFound """ raise exception.NotImplemented() # tenant crud def create_tenant(self, tenant_id, tenant): """Creates a new tenant. :raises: keystone.exception.Conflict """ raise exception.NotImplemented() def update_tenant(self, tenant_id, tenant): """Updates an existing tenant. :raises: keystone.exception.TenantNotFound, keystone.exception.Conflict """ raise exception.NotImplemented() def delete_tenant(self, tenant_id): """Deletes an existing tenant. :raises: keystone.exception.TenantNotFound """ raise exception.NotImplemented() # metadata crud def get_metadata(self, user_id, tenant_id): raise exception.NotImplemented() def create_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def update_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def delete_metadata(self, user_id, tenant_id): raise exception.NotImplemented() # role crud def create_role(self, role_id, role): """Creates a new role. :raises: keystone.exception.Conflict """ raise exception.NotImplemented() def update_role(self, role_id, role): """Updates an existing role. :raises: keystone.exception.RoleNotFound, keystone.exception.Conflict """ raise exception.NotImplemented() def delete_role(self, role_id): """Deletes an existing role. :raises: keystone.exception.RoleNotFound """ raise exception.NotImplemented() class PublicRouter(wsgi.ComposableRouter): def add_routes(self, mapper): tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_tenants_for_token', conditions=dict(method=['GET'])) class AdminRouter(wsgi.ComposableRouter): def add_routes(self, mapper): # Tenant Operations tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_all_tenants', conditions=dict(method=['GET'])) mapper.connect('/tenants/{tenant_id}', controller=tenant_controller, action='get_tenant', conditions=dict(method=['GET'])) # User Operations user_controller = UserController() mapper.connect('/users/{user_id}', controller=user_controller, action='get_user', conditions=dict(method=['GET'])) # Role Operations roles_controller = RoleController() mapper.connect('/tenants/{tenant_id}/users/{user_id}/roles', controller=roles_controller, action='get_user_roles', conditions=dict(method=['GET'])) mapper.connect('/users/{user_id}/roles', controller=roles_controller, action='get_user_roles', conditions=dict(method=['GET'])) class TenantController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(TenantController, self).__init__() def get_all_tenants(self, context, **kw): """Gets a list of all tenants for an admin user.""" self.assert_admin(context) tenant_refs = self.identity_api.get_tenants(context) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenants_for_token(self, context, **kw): """Get valid tenants for token based on token used to authenticate. Pulls the token from the context, validates it and gets the valid tenants for the user in the token. Doesn't care about token scopedness. """ try: token_ref = self.token_api.get_token(context=context, token_id=context['token_id']) except exception.NotFound: raise exception.Unauthorized() user_ref = token_ref['user'] tenant_ids = self.identity_api.get_tenants_for_user( context, user_ref['id']) tenant_refs = [] for tenant_id in tenant_ids: tenant_refs.append(self.identity_api.get_tenant( context=context, tenant_id=tenant_id)) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenant(self, context, tenant_id): # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) return {'tenant': self.identity_api.get_tenant(context, tenant_id)} # CRUD Extension def create_tenant(self, context, tenant): tenant_ref = self._normalize_dict(tenant) if not 'name' in tenant_ref or not tenant_ref['name']: msg = 'Name field is required and cannot be empty' raise exception.ValidationError(message=msg) self.assert_admin(context) tenant_ref['id'] = tenant_ref.get('id', uuid.uuid4().hex) tenant = self.identity_api.create_tenant( context, tenant_ref['id'], tenant_ref) return {'tenant': tenant} def update_tenant(self, context, tenant_id, tenant): self.assert_admin(context) tenant_ref = self.identity_api.update_tenant( context, tenant_id, tenant) return {'tenant': tenant_ref} def delete_tenant(self, context, tenant_id): self.assert_admin(context) self.identity_api.delete_tenant(context, tenant_id) def get_tenant_users(self, context, tenant_id, **kw): self.assert_admin(context) user_refs = self.identity_api.get_tenant_users(context, tenant_id) return {'users': user_refs} def _format_tenant_list(self, tenant_refs, **kwargs): marker = kwargs.get('marker') first_index = 0 if marker is not None: for (marker_index, tenant) in enumerate(tenant_refs): if tenant['id'] == marker: # we start pagination after the marker first_index = marker_index + 1 break else: msg = 'Marker could not be found' raise exception.ValidationError(message=msg) limit = kwargs.get('limit') last_index = None if limit is not None: try: limit = int(limit) if limit < 0: raise AssertionError() except (ValueError, AssertionError): msg = 'Invalid limit value' raise exception.ValidationError(message=msg) last_index = first_index + limit tenant_refs = tenant_refs[first_index:last_index] for x in tenant_refs: if 'enabled' not in x: x['enabled'] = True o = {'tenants': tenant_refs, 'tenants_links': []} return o class UserController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(UserController, self).__init__() def get_user(self, context, user_id): self.assert_admin(context) return {'user': self.identity_api.get_user(context, user_id)} def get_users(self, context): # NOTE(termie): i can't imagine that this really wants all the data # about every single user in the system... self.assert_admin(context) return {'users': self.identity_api.list_users(context)} # CRUD extension def create_user(self, context, user): user = self._normalize_dict(user) self.assert_admin(context) if not 'name' in user or not user['name']: msg = 'Name field is required and cannot be empty' raise exception.ValidationError(message=msg) tenant_id = user.get('tenantId', None) if (tenant_id is not None and self.identity_api.get_tenant(context, tenant_id) is None): raise exception.TenantNotFound(tenant_id=tenant_id) user_id = uuid.uuid4().hex user_ref = user.copy() user_ref['id'] = user_id new_user_ref = self.identity_api.create_user( context, user_id, user_ref) if tenant_id: self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return {'user': new_user_ref} def update_user(self, context, user_id, user): # NOTE(termie): this is really more of a patch than a put self.assert_admin(context) user_ref = self.identity_api.update_user(context, user_id, user) # If the password was changed or the user was disabled we clear tokens if user.get('password') or not user.get('enabled', True): try: for token_id in self.token_api.list_tokens(context, user_id): self.token_api.delete_token(context, token_id) except exception.NotImplemented: # The users status has been changed but tokens remain valid for # backends that can't list tokens for users LOG.warning('User %s status has changed, but existing tokens ' 'remain valid' % user_id) return {'user': user_ref} def delete_user(self, context, user_id): self.assert_admin(context) self.identity_api.delete_user(context, user_id) def set_user_enabled(self, context, user_id, user): return self.update_user(context, user_id, user) def set_user_password(self, context, user_id, user): return self.update_user(context, user_id, user) def update_user_tenant(self, context, user_id, user): """Update the default tenant.""" # ensure that we're a member of that tenant tenant_id = user.get('tenantId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return self.update_user(context, user_id, user) class RoleController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(RoleController, self).__init__() # COMPAT(essex-3) def get_user_roles(self, context, user_id, tenant_id=None): """Get the roles for a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant ID required') roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) return {'roles': [self.identity_api.get_role(context, x) for x in roles]} # CRUD extension def get_role(self, context, role_id): self.assert_admin(context) return {'role': self.identity_api.get_role(context, role_id)} def create_role(self, context, role): role = self._normalize_dict(role) self.assert_admin(context) if not 'name' in role or not role['name']: msg = 'Name field is required and cannot be empty' raise exception.ValidationError(message=msg) role_id = uuid.uuid4().hex role['id'] = role_id role_ref = self.identity_api.create_role(context, role_id, role) return {'role': role_ref} def delete_role(self, context, role_id): self.assert_admin(context) self.identity_api.delete_role(context, role_id) def get_roles(self, context): self.assert_admin(context) return {'roles': self.identity_api.list_roles(context)} def add_role_to_user(self, context, user_id, role_id, tenant_id=None): """Add a role to a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} def remove_role_from_user(self, context, user_id, role_id, tenant_id=None): """Remove a role from a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant, so we must follow up on that self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id) return # COMPAT(diablo): CRUD extension def get_role_refs(self, context, user_id): """Ultimate hack to get around having to make role_refs first-class. This will basically iterate over the various roles the user has in all tenants the user is a member of and create fake role_refs where the id encodes the user-tenant-role information so we can look up the appropriate data when we need to delete them. """ self.assert_admin(context) # Ensure user exists by getting it first. self.identity_api.get_user(context, user_id) tenant_ids = self.identity_api.get_tenants_for_user(context, user_id) o = [] for tenant_id in tenant_ids: role_ids = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) for role_id in role_ids: ref = {'roleId': role_id, 'tenantId': tenant_id, 'userId': user_id} ref['id'] = urllib.urlencode(ref) o.append(ref) return {'roles': o} # COMPAT(diablo): CRUD extension def create_role_ref(self, context, user_id, role): """This is actually used for adding a user to a tenant. In the legacy data model adding a user to a tenant required setting a role. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role tenant_id = role.get('tenantId') role_id = role.get('roleId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} # COMPAT(diablo): CRUD extension def delete_role_ref(self, context, user_id, role_ref_id): """This is actually used for deleting a user from a tenant. In the legacy data model removing a user from a tenant required deleting a role. To emulate this, we encode the tenant and role in the role_ref_id, and if this happens to be the last role for the user-tenant pair, we remove the user from the tenant. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role role_ref_ref = urlparse.parse_qs(role_ref_id) tenant_id = role_ref_ref.get('tenantId')[0] role_id = role_ref_ref.get('roleId')[0] self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3725_0
crossvul-python_data_bad_3692_1
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import routes from keystone import catalog from keystone import exception from keystone import identity from keystone import policy from keystone import token from keystone.common import logging from keystone.common import utils from keystone.common import wsgi class AdminRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('admin') mapper.connect('/', controller=version_controller, action='get_version') # Token Operations auth_controller = TokenController() mapper.connect('/tokens', controller=auth_controller, action='authenticate', conditions=dict(method=['POST'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token', conditions=dict(method=['GET'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token_head', conditions=dict(method=['HEAD'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='delete_token', conditions=dict(method=['DELETE'])) mapper.connect('/tokens/{token_id}/endpoints', controller=auth_controller, action='endpoints', conditions=dict(method=['GET'])) # Miscellaneous Operations extensions_controller = AdminExtensionsController() mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) identity_router = identity.AdminRouter() routers = [identity_router] super(AdminRouter, self).__init__(mapper, routers) class PublicRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('public') mapper.connect('/', controller=version_controller, action='get_version') # Token Operations auth_controller = TokenController() mapper.connect('/tokens', controller=auth_controller, action='authenticate', conditions=dict(method=['POST'])) # Miscellaneous extensions_controller = PublicExtensionsController() mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) identity_router = identity.PublicRouter() routers = [identity_router] super(PublicRouter, self).__init__(mapper, routers) class PublicVersionRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('public') mapper.connect('/', controller=version_controller, action='get_versions') routers = [] super(PublicVersionRouter, self).__init__(mapper, routers) class AdminVersionRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('admin') mapper.connect('/', controller=version_controller, action='get_versions') routers = [] super(AdminVersionRouter, self).__init__(mapper, routers) class VersionController(wsgi.Application): def __init__(self, version_type): self.catalog_api = catalog.Manager() self.url_key = "%sURL" % version_type super(VersionController, self).__init__() def _get_identity_url(self, context): catalog_ref = self.catalog_api.get_catalog( context=context, user_id=None, tenant_id=None) for region, region_ref in catalog_ref.iteritems(): for service, service_ref in region_ref.iteritems(): if service == 'identity': return service_ref[self.url_key] raise exception.NotImplemented() def _get_versions_list(self, context): """The list of versions is dependent on the context.""" identity_url = self._get_identity_url(context) if not identity_url.endswith('/'): identity_url = identity_url + '/' versions = {} versions['v2.0'] = { "id": "v2.0", "status": "beta", "updated": "2011-11-19T00:00:00Z", "links": [ { "rel": "self", "href": identity_url, }, { "rel": "describedby", "type": "text/html", "href": "http://docs.openstack.org/api/openstack-" "identity-service/2.0/content/" }, { "rel": "describedby", "type": "application/pdf", "href": "http://docs.openstack.org/api/openstack-" "identity-service/2.0/identity-dev-guide-" "2.0.pdf" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.identity-v2.0" "+json" }, { "base": "application/xml", "type": "application/vnd.openstack.identity-v2.0" "+xml" } ] } return versions def get_versions(self, context): versions = self._get_versions_list(context) return wsgi.render_response(status=(300, 'Multiple Choices'), body={ "versions": { "values": versions.values() } }) def get_version(self, context): versions = self._get_versions_list(context) return wsgi.render_response(body={ "version": versions['v2.0'] }) class NoopController(wsgi.Application): def __init__(self): super(NoopController, self).__init__() def noop(self, context): return {} class TokenController(wsgi.Application): def __init__(self): self.catalog_api = catalog.Manager() self.identity_api = identity.Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(TokenController, self).__init__() def authenticate(self, context, auth=None): """Authenticate credentials and return a token. Accept auth as a dict that looks like:: { "auth":{ "passwordCredentials":{ "username":"test_user", "password":"mypass" }, "tenantName":"customer-x" } } In this case, tenant is optional, if not provided the token will be considered "unscoped" and can later be used to get a scoped token. Alternatively, this call accepts auth with only a token and tenant that will return a token that is scoped to that tenant. """ token_id = uuid.uuid4().hex if 'passwordCredentials' in auth: username = auth['passwordCredentials'].get('username', '') password = auth['passwordCredentials'].get('password', '') tenant_name = auth.get('tenantName', None) user_id = auth['passwordCredentials'].get('userId', None) if username: user_ref = self.identity_api.get_user_by_name( context=context, user_name=username) if user_ref: user_id = user_ref['id'] # more compat tenant_id = auth.get('tenantId', None) if tenant_name: tenant_ref = self.identity_api.get_tenant_by_name( context=context, tenant_name=tenant_name) if tenant_ref: tenant_id = tenant_ref['id'] try: auth_info = self.identity_api.authenticate(context=context, user_id=user_id, password=password, tenant_id=tenant_id) (user_ref, tenant_ref, metadata_ref) = auth_info # If the user is disabled don't allow them to authenticate if not user_ref.get('enabled', True): raise exception.Forbidden(message='User has been disabled') except AssertionError as e: raise exception.Unauthorized(e.message) token_ref = self.token_api.create_token( context, token_id, dict(id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref)) if tenant_ref: catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) else: catalog_ref = {} elif 'token' in auth: token = auth['token'].get('id', None) tenant_name = auth.get('tenantName') # more compat if tenant_name: tenant_ref = self.identity_api.get_tenant_by_name( context=context, tenant_name=tenant_name) tenant_id = tenant_ref['id'] else: tenant_id = auth.get('tenantId', None) try: old_token_ref = self.token_api.get_token(context=context, token_id=token) except exception.NotFound: raise exception.Unauthorized() user_ref = old_token_ref['user'] tenants = self.identity_api.get_tenants_for_user(context, user_ref['id']) if tenant_id: assert tenant_id in tenants tenant_ref = self.identity_api.get_tenant(context=context, tenant_id=tenant_id) if tenant_ref: metadata_ref = self.identity_api.get_metadata( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id']) catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) else: metadata_ref = {} catalog_ref = {} token_ref = self.token_api.create_token( context, token_id, dict(id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref)) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata roles_ref = [] for role_id in metadata_ref.get('roles', []): roles_ref.append(self.identity_api.get_role(context, role_id)) logging.debug('TOKEN_REF %s', token_ref) return self._format_authenticate(token_ref, roles_ref, catalog_ref) def _get_token_ref(self, context, token_id, belongs_to=None): """Returns a token if a valid one exists. Optionally, limited to a token owned by a specific tenant. """ # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) token_ref = self.token_api.get_token(context=context, token_id=token_id) if belongs_to: assert token_ref['tenant']['id'] == belongs_to return token_ref # admin only def validate_token_head(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Identical to ``validate_token``, except does not return a response. """ belongs_to = context['query_string'].get("belongsTo") assert self._get_token_ref(context, token_id, belongs_to) # admin only def validate_token(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Returns metadata about the token along any associated roles. """ belongs_to = context['query_string'].get("belongsTo") token_ref = self._get_token_ref(context, token_id, belongs_to) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata metadata_ref = token_ref['metadata'] roles_ref = [] for role_id in metadata_ref.get('roles', []): roles_ref.append(self.identity_api.get_role(context, role_id)) # Get a service catalog if belongs_to is not none # This is needed for on-behalf-of requests catalog_ref = None if belongs_to is not None: catalog_ref = self.catalog_api.get_catalog( context=context, user_id=token_ref['user']['id'], tenant_id=token_ref['tenant']['id'], metadata=metadata_ref) return self._format_token(token_ref, roles_ref, catalog_ref) def delete_token(self, context, token_id): """Delete a token, effectively invalidating it for authz.""" # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) self.token_api.delete_token(context=context, token_id=token_id) def endpoints(self, context, token_id): """Return a list of endpoints available to the token.""" raise exception.NotImplemented() def _format_authenticate(self, token_ref, roles_ref, catalog_ref): o = self._format_token(token_ref, roles_ref) o['access']['serviceCatalog'] = self._format_catalog(catalog_ref) return o def _format_token(self, token_ref, roles_ref, catalog_ref=None): user_ref = token_ref['user'] metadata_ref = token_ref['metadata'] expires = token_ref['expires'] if expires is not None: expires = utils.isotime(expires) o = {'access': {'token': {'id': token_ref['id'], 'expires': expires, }, 'user': {'id': user_ref['id'], 'name': user_ref['name'], 'username': user_ref['name'], 'roles': roles_ref, 'roles_links': metadata_ref.get('roles_links', []) } } } if 'tenant' in token_ref and token_ref['tenant']: token_ref['tenant']['enabled'] = True o['access']['token']['tenant'] = token_ref['tenant'] if catalog_ref is not None: o['access']['serviceCatalog'] = self._format_catalog(catalog_ref) return o def _format_catalog(self, catalog_ref): """Munge catalogs from internal to output format Internal catalogs look like: {$REGION: { {$SERVICE: { $key1: $value1, ... } } } The legacy api wants them to look like [{'name': $SERVICE[name], 'type': $SERVICE, 'endpoints': [{ 'tenantId': $tenant_id, ... 'region': $REGION, }], 'endpoints_links': [], }] """ if not catalog_ref: return {} services = {} for region, region_ref in catalog_ref.iteritems(): for service, service_ref in region_ref.iteritems(): new_service_ref = services.get(service, {}) new_service_ref['name'] = service_ref.pop('name') new_service_ref['type'] = service new_service_ref['endpoints_links'] = [] service_ref['region'] = region endpoints_ref = new_service_ref.get('endpoints', []) endpoints_ref.append(service_ref) new_service_ref['endpoints'] = endpoints_ref services[service] = new_service_ref return services.values() class ExtensionsController(wsgi.Application): """Base extensions controller to be extended by public and admin API's.""" def __init__(self, extensions=None): super(ExtensionsController, self).__init__() self.extensions = extensions or {} def get_extensions_info(self, context): return {'extensions': {'values': self.extensions.values()}} def get_extension_info(self, context, extension_alias): try: return {'extension': self.extensions[extension_alias]} except KeyError: raise exception.NotFound(target=extension_alias) class PublicExtensionsController(ExtensionsController): pass class AdminExtensionsController(ExtensionsController): def __init__(self, *args, **kwargs): super(AdminExtensionsController, self).__init__(*args, **kwargs) # TODO(dolph): Extensions should obviously provide this information # themselves, but hardcoding it here allows us to match # the API spec in the short term with minimal complexity. self.extensions['OS-KSADM'] = { 'name': 'Openstack Keystone Admin', 'namespace': 'http://docs.openstack.org/identity/api/ext/' 'OS-KSADM/v1.0', 'alias': 'OS-KSADM', 'updated': '2011-08-19T13:25:27-06:00', 'description': 'Openstack extensions to Keystone v2.0 API ' 'enabling Admin Operations.', 'links': [ { 'rel': 'describedby', # TODO(dolph): link needs to be revised after # bug 928059 merges 'type': 'text/html', 'href': ('https://github.com/openstack/' 'identity-api'), } ] } @logging.fail_gracefully def public_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return PublicRouter() @logging.fail_gracefully def admin_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AdminRouter() @logging.fail_gracefully def public_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return PublicVersionRouter() @logging.fail_gracefully def admin_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AdminVersionRouter()
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3692_1
crossvul-python_data_good_3565_0
import json import webob from xml.dom import minidom from xml.parsers import expat import faults from nova import exception from nova import log as logging from nova import utils from nova import wsgi XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1' XMLNS_ATOM = 'http://www.w3.org/2005/Atom' LOG = logging.getLogger('nova.api.openstack.wsgi') class Request(webob.Request): """Add some Openstack API-specific logic to the base webob.Request.""" def best_match_content_type(self, supported_content_types=None): """Determine the requested response content-type. Based on the query extension then the Accept header. """ supported_content_types = supported_content_types or \ ('application/json', 'application/xml') parts = self.path.rsplit('.', 1) if len(parts) > 1: ctype = 'application/{0}'.format(parts[1]) if ctype in supported_content_types: return ctype bm = self.accept.best_match(supported_content_types) # default to application/json if we don't find a preference return bm or 'application/json' def get_content_type(self): """Determine content type of the request body. Does not do any body introspection, only checks header """ if not "Content-Type" in self.headers: return None allowed_types = ("application/xml", "application/json") content_type = self.content_type if content_type not in allowed_types: raise exception.InvalidContentType(content_type=content_type) return content_type class ActionDispatcher(object): """Maps method name to local methods through action name.""" def dispatch(self, *args, **kwargs): """Find and call local method.""" action = kwargs.pop('action', 'default') action_method = getattr(self, str(action), self.default) return action_method(*args, **kwargs) def default(self, data): raise NotImplementedError() class TextDeserializer(ActionDispatcher): """Default request body deserialization""" def deserialize(self, datastring, action='default'): return self.dispatch(datastring, action=action) def default(self, datastring): return {} class JSONDeserializer(TextDeserializer): def _from_json(self, datastring): try: return utils.loads(datastring) except ValueError: msg = _("cannot understand JSON") raise exception.MalformedRequestBody(reason=msg) def default(self, datastring): return {'body': self._from_json(datastring)} class XMLDeserializer(TextDeserializer): def __init__(self, metadata=None): """ :param metadata: information needed to deserialize xml into a dictionary. """ super(XMLDeserializer, self).__init__() self.metadata = metadata or {} def _from_xml(self, datastring): plurals = set(self.metadata.get('plurals', {})) try: node = minidom.parseString(datastring).childNodes[0] return {node.nodeName: self._from_xml_node(node, plurals)} except expat.ExpatError: msg = _("cannot understand XML") raise exception.MalformedRequestBody(reason=msg) def _from_xml_node(self, node, listnames): """Convert a minidom node to a simple Python type. :param listnames: list of XML node names whose subnodes should be considered list items. """ if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: return node.childNodes[0].nodeValue elif node.nodeName in listnames: return [self._from_xml_node(n, listnames) for n in node.childNodes] else: result = dict() for attr in node.attributes.keys(): result[attr] = node.attributes[attr].nodeValue for child in node.childNodes: if child.nodeType != node.TEXT_NODE: result[child.nodeName] = self._from_xml_node(child, listnames) return result def find_first_child_named(self, parent, name): """Search a nodes children for the first child with a given name""" for node in parent.childNodes: if node.nodeName == name: return node return None def find_children_named(self, parent, name): """Return all of a nodes children who have the given name""" for node in parent.childNodes: if node.nodeName == name: yield node def extract_text(self, node): """Get the text field contained by the given node""" if len(node.childNodes) == 1: child = node.childNodes[0] if child.nodeType == child.TEXT_NODE: return child.nodeValue return "" def default(self, datastring): return {'body': self._from_xml(datastring)} class MetadataXMLDeserializer(XMLDeserializer): def extract_metadata(self, metadata_node): """Marshal the metadata attribute of a parsed request""" metadata = {} if metadata_node is not None: for meta_node in self.find_children_named(metadata_node, "meta"): key = meta_node.getAttribute("key") metadata[key] = self.extract_text(meta_node) return metadata class RequestHeadersDeserializer(ActionDispatcher): """Default request headers deserializer""" def deserialize(self, request, action): return self.dispatch(request, action=action) def default(self, request): return {} class RequestDeserializer(object): """Break up a Request object into more useful pieces.""" def __init__(self, body_deserializers=None, headers_deserializer=None, supported_content_types=None): self.supported_content_types = supported_content_types or \ ('application/json', 'application/xml') self.body_deserializers = { 'application/xml': XMLDeserializer(), 'application/json': JSONDeserializer(), } self.body_deserializers.update(body_deserializers or {}) self.headers_deserializer = headers_deserializer or \ RequestHeadersDeserializer() def deserialize(self, request): """Extract necessary pieces of the request. :param request: Request object :returns tuple of expected controller action name, dictionary of keyword arguments to pass to the controller, the expected content type of the response """ action_args = self.get_action_args(request.environ) action = action_args.pop('action', None) action_args.update(self.deserialize_headers(request, action)) action_args.update(self.deserialize_body(request, action)) accept = self.get_expected_content_type(request) return (action, action_args, accept) def deserialize_headers(self, request, action): return self.headers_deserializer.deserialize(request, action) def deserialize_body(self, request, action): try: content_type = request.get_content_type() except exception.InvalidContentType: LOG.debug(_("Unrecognized Content-Type provided in request")) return {} if content_type is None: LOG.debug(_("No Content-Type provided in request")) return {} if not len(request.body) > 0: LOG.debug(_("Empty body provided in request")) return {} try: deserializer = self.get_body_deserializer(content_type) except exception.InvalidContentType: LOG.debug(_("Unable to deserialize body as provided Content-Type")) raise return deserializer.deserialize(request.body, action) def get_body_deserializer(self, content_type): try: return self.body_deserializers[content_type] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) def get_expected_content_type(self, request): return request.best_match_content_type(self.supported_content_types) def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" try: args = request_environment['wsgiorg.routing_args'][1].copy() except Exception: return {} try: del args['controller'] except KeyError: pass try: del args['format'] except KeyError: pass return args class DictSerializer(ActionDispatcher): """Default request body serialization""" def serialize(self, data, action='default'): return self.dispatch(data, action=action) def default(self, data): return "" class JSONDictSerializer(DictSerializer): """Default JSON request body serialization""" def default(self, data): return utils.dumps(data) class XMLDictSerializer(DictSerializer): def __init__(self, metadata=None, xmlns=None): """ :param metadata: information needed to deserialize xml into a dictionary. :param xmlns: XML namespace to include with serialized xml """ super(XMLDictSerializer, self).__init__() self.metadata = metadata or {} self.xmlns = xmlns def default(self, data): # We expect data to contain a single key which is the XML root. root_key = data.keys()[0] doc = minidom.Document() node = self._to_xml_node(doc, self.metadata, root_key, data[root_key]) return self.to_xml_string(node) def to_xml_string(self, node, has_atom=False): self._add_xmlns(node, has_atom) return node.toprettyxml(indent=' ', encoding='UTF-8') #NOTE (ameade): the has_atom should be removed after all of the # xml serializers and view builders have been updated to the current # spec that required all responses include the xmlns:atom, the has_atom # flag is to prevent current tests from breaking def _add_xmlns(self, node, has_atom=False): if self.xmlns is not None: node.setAttribute('xmlns', self.xmlns) if has_atom: node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom") def _to_xml_node(self, doc, metadata, nodename, data): """Recursive method to convert data members to XML nodes.""" result = doc.createElement(nodename) # Set the xml namespace if one is specified # TODO(justinsb): We could also use prefixes on the keys xmlns = metadata.get('xmlns', None) if xmlns: result.setAttribute('xmlns', xmlns) #TODO(bcwaldon): accomplish this without a type-check if type(data) is list: collections = metadata.get('list_collections', {}) if nodename in collections: metadata = collections[nodename] for item in data: node = doc.createElement(metadata['item_name']) node.setAttribute(metadata['item_key'], str(item)) result.appendChild(node) return result singular = metadata.get('plurals', {}).get(nodename, None) if singular is None: if nodename.endswith('s'): singular = nodename[:-1] else: singular = 'item' for item in data: node = self._to_xml_node(doc, metadata, singular, item) result.appendChild(node) #TODO(bcwaldon): accomplish this without a type-check elif type(data) is dict: collections = metadata.get('dict_collections', {}) if nodename in collections: metadata = collections[nodename] for k, v in data.items(): node = doc.createElement(metadata['item_name']) node.setAttribute(metadata['item_key'], str(k)) text = doc.createTextNode(str(v)) node.appendChild(text) result.appendChild(node) return result attrs = metadata.get('attributes', {}).get(nodename, {}) for k, v in data.items(): if k in attrs: result.setAttribute(k, str(v)) else: node = self._to_xml_node(doc, metadata, k, v) result.appendChild(node) else: # Type is atom node = doc.createTextNode(str(data)) result.appendChild(node) return result def _create_link_nodes(self, xml_doc, links): link_nodes = [] for link in links: link_node = xml_doc.createElement('atom:link') link_node.setAttribute('rel', link['rel']) link_node.setAttribute('href', link['href']) if 'type' in link: link_node.setAttribute('type', link['type']) link_nodes.append(link_node) return link_nodes class ResponseHeadersSerializer(ActionDispatcher): """Default response headers serialization""" def serialize(self, response, data, action): self.dispatch(response, data, action=action) def default(self, response, data): response.status_int = 200 class ResponseSerializer(object): """Encode the necessary pieces into a response object""" def __init__(self, body_serializers=None, headers_serializer=None): self.body_serializers = { 'application/xml': XMLDictSerializer(), 'application/json': JSONDictSerializer(), } self.body_serializers.update(body_serializers or {}) self.headers_serializer = headers_serializer or \ ResponseHeadersSerializer() def serialize(self, response_data, content_type, action='default'): """Serialize a dict into a string and wrap in a wsgi.Request object. :param response_data: dict produced by the Controller :param content_type: expected mimetype of serialized response body """ response = webob.Response() self.serialize_headers(response, response_data, action) self.serialize_body(response, response_data, content_type, action) return response def serialize_headers(self, response, data, action): self.headers_serializer.serialize(response, data, action) def serialize_body(self, response, data, content_type, action): response.headers['Content-Type'] = content_type if data is not None: serializer = self.get_body_serializer(content_type) response.body = serializer.serialize(data, action) def get_body_serializer(self, content_type): try: return self.body_serializers[content_type] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) class Resource(wsgi.Application): """WSGI app that handles (de)serialization and controller dispatch. WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method upon its controller. All controller action methods must accept a 'req' argument, which is the incoming wsgi.Request. If the operation is a PUT or POST, the controller method must also accept a 'body' argument (the deserialized request body). They may raise a webob.exc exception or return a dict, which will be serialized by requested content type. """ def __init__(self, controller, deserializer=None, serializer=None): """ :param controller: object that implement methods created by routes lib :param deserializer: object that can serialize the output of a controller into a webob response :param serializer: object that can deserialize a webob request into necessary pieces """ self.controller = controller self.deserializer = deserializer or RequestDeserializer() self.serializer = serializer or ResponseSerializer() @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" LOG.info("%(method)s %(url)s" % {"method": request.method, "url": request.url}) try: action, args, accept = self.deserializer.deserialize(request) except exception.InvalidContentType: msg = _("Unsupported Content-Type") return faults.Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") return faults.Fault(webob.exc.HTTPBadRequest(explanation=msg)) project_id = args.pop("project_id", None) if ('nova.context' in request.environ and project_id and project_id != request.environ['nova.context'].project_id): msg = _("Malformed request url") return faults.Fault(webob.exc.HTTPBadRequest(explanation=msg)) try: action_result = self.dispatch(request, action, args) except faults.Fault as ex: LOG.info(_("Fault thrown: %s"), unicode(ex)) action_result = ex except webob.exc.HTTPException as ex: LOG.info(_("HTTP exception thrown: %s"), unicode(ex)) action_result = faults.Fault(ex) if type(action_result) is dict or action_result is None: response = self.serializer.serialize(action_result, accept, action=action) else: response = action_result try: msg_dict = dict(url=request.url, status=response.status_int) msg = _("%(url)s returned with HTTP %(status)d") % msg_dict except AttributeError, e: msg_dict = dict(url=request.url, e=e) msg = _("%(url)s returned a fault: %(e)s" % msg_dict) LOG.info(msg) return response def dispatch(self, request, action, action_args): """Find action-spefic method on controller and call it.""" controller_method = getattr(self.controller, action) try: return controller_method(req=request, **action_args) except TypeError as exc: LOG.exception(exc) return faults.Fault(webob.exc.HTTPBadRequest())
./CrossVul/dataset_final_sorted/CWE-264/py/good_3565_0
crossvul-python_data_bad_1622_2
from configparser import RawConfigParser from attic.remote import cache_if_remote import msgpack import os from binascii import hexlify import shutil from .helpers import Error, get_cache_dir, decode_dict, st_mtime_ns, unhexlify, UpgradableLock, int_to_bigint, \ bigint_to_int from .hashindex import ChunkIndex class Cache(object): """Client Side cache """ class RepositoryReplay(Error): """Cache is newer than repository, refusing to continue""" def __init__(self, repository, key, manifest, path=None, sync=True): self.timestamp = None self.txn_active = False self.repository = repository self.key = key self.manifest = manifest self.path = path or os.path.join(get_cache_dir(), hexlify(repository.id).decode('ascii')) if not os.path.exists(self.path): self.create() self.open() if sync and self.manifest.id != self.manifest_id: # If repository is older than the cache something fishy is going on if self.timestamp and self.timestamp > manifest.timestamp: raise self.RepositoryReplay() self.sync() self.commit() def __del__(self): self.close() def create(self): """Create a new empty cache at `path` """ os.makedirs(self.path) with open(os.path.join(self.path, 'README'), 'w') as fd: fd.write('This is an Attic cache') config = RawConfigParser() config.add_section('cache') config.set('cache', 'version', '1') config.set('cache', 'repository', hexlify(self.repository.id).decode('ascii')) config.set('cache', 'manifest', '') with open(os.path.join(self.path, 'config'), 'w') as fd: config.write(fd) ChunkIndex().write(os.path.join(self.path, 'chunks').encode('utf-8')) with open(os.path.join(self.path, 'files'), 'w') as fd: pass # empty file def open(self): if not os.path.isdir(self.path): raise Exception('%s Does not look like an Attic cache' % self.path) self.lock = UpgradableLock(os.path.join(self.path, 'config'), exclusive=True) self.rollback() self.config = RawConfigParser() self.config.read(os.path.join(self.path, 'config')) if self.config.getint('cache', 'version') != 1: raise Exception('%s Does not look like an Attic cache') self.id = self.config.get('cache', 'repository') self.manifest_id = unhexlify(self.config.get('cache', 'manifest')) self.timestamp = self.config.get('cache', 'timestamp', fallback=None) self.chunks = ChunkIndex.read(os.path.join(self.path, 'chunks').encode('utf-8')) self.files = None def close(self): self.lock.release() def _read_files(self): self.files = {} self._newest_mtime = 0 with open(os.path.join(self.path, 'files'), 'rb') as fd: u = msgpack.Unpacker(use_list=True) while True: data = fd.read(64 * 1024) if not data: break u.feed(data) for path_hash, item in u: item[0] += 1 self.files[path_hash] = msgpack.packb(item) def begin_txn(self): # Initialize transaction snapshot txn_dir = os.path.join(self.path, 'txn.tmp') os.mkdir(txn_dir) shutil.copy(os.path.join(self.path, 'config'), txn_dir) shutil.copy(os.path.join(self.path, 'chunks'), txn_dir) shutil.copy(os.path.join(self.path, 'files'), txn_dir) os.rename(os.path.join(self.path, 'txn.tmp'), os.path.join(self.path, 'txn.active')) self.txn_active = True def commit(self): """Commit transaction """ if not self.txn_active: return if self.files is not None: with open(os.path.join(self.path, 'files'), 'wb') as fd: for path_hash, item in self.files.items(): # Discard cached files with the newest mtime to avoid # issues with filesystem snapshots and mtime precision item = msgpack.unpackb(item) if item[0] < 10 and bigint_to_int(item[3]) < self._newest_mtime: msgpack.pack((path_hash, item), fd) self.config.set('cache', 'manifest', hexlify(self.manifest.id).decode('ascii')) self.config.set('cache', 'timestamp', self.manifest.timestamp) with open(os.path.join(self.path, 'config'), 'w') as fd: self.config.write(fd) self.chunks.write(os.path.join(self.path, 'chunks').encode('utf-8')) os.rename(os.path.join(self.path, 'txn.active'), os.path.join(self.path, 'txn.tmp')) shutil.rmtree(os.path.join(self.path, 'txn.tmp')) self.txn_active = False def rollback(self): """Roll back partial and aborted transactions """ # Remove partial transaction if os.path.exists(os.path.join(self.path, 'txn.tmp')): shutil.rmtree(os.path.join(self.path, 'txn.tmp')) # Roll back active transaction txn_dir = os.path.join(self.path, 'txn.active') if os.path.exists(txn_dir): shutil.copy(os.path.join(txn_dir, 'config'), self.path) shutil.copy(os.path.join(txn_dir, 'chunks'), self.path) shutil.copy(os.path.join(txn_dir, 'files'), self.path) os.rename(txn_dir, os.path.join(self.path, 'txn.tmp')) if os.path.exists(os.path.join(self.path, 'txn.tmp')): shutil.rmtree(os.path.join(self.path, 'txn.tmp')) self.txn_active = False def sync(self): """Initializes cache by fetching and reading all archive indicies """ def add(id, size, csize): try: count, size, csize = self.chunks[id] self.chunks[id] = count + 1, size, csize except KeyError: self.chunks[id] = 1, size, csize self.begin_txn() print('Initializing cache...') self.chunks.clear() unpacker = msgpack.Unpacker() repository = cache_if_remote(self.repository) for name, info in self.manifest.archives.items(): archive_id = info[b'id'] cdata = repository.get(archive_id) data = self.key.decrypt(archive_id, cdata) add(archive_id, len(data), len(cdata)) archive = msgpack.unpackb(data) if archive[b'version'] != 1: raise Exception('Unknown archive metadata version') decode_dict(archive, (b'name',)) print('Analyzing archive:', archive[b'name']) for key, chunk in zip(archive[b'items'], repository.get_many(archive[b'items'])): data = self.key.decrypt(key, chunk) add(key, len(data), len(chunk)) unpacker.feed(data) for item in unpacker: if b'chunks' in item: for chunk_id, size, csize in item[b'chunks']: add(chunk_id, size, csize) def add_chunk(self, id, data, stats): if not self.txn_active: self.begin_txn() if self.seen_chunk(id): return self.chunk_incref(id, stats) size = len(data) data = self.key.encrypt(data) csize = len(data) self.repository.put(id, data, wait=False) self.chunks[id] = (1, size, csize) stats.update(size, csize, True) return id, size, csize def seen_chunk(self, id): return self.chunks.get(id, (0, 0, 0))[0] def chunk_incref(self, id, stats): if not self.txn_active: self.begin_txn() count, size, csize = self.chunks[id] self.chunks[id] = (count + 1, size, csize) stats.update(size, csize, False) return id, size, csize def chunk_decref(self, id, stats): if not self.txn_active: self.begin_txn() count, size, csize = self.chunks[id] if count == 1: del self.chunks[id] self.repository.delete(id, wait=False) stats.update(-size, -csize, True) else: self.chunks[id] = (count - 1, size, csize) stats.update(-size, -csize, False) def file_known_and_unchanged(self, path_hash, st): if self.files is None: self._read_files() entry = self.files.get(path_hash) if not entry: return None entry = msgpack.unpackb(entry) if entry[2] == st.st_size and bigint_to_int(entry[3]) == st_mtime_ns(st) and entry[1] == st.st_ino: # reset entry age entry[0] = 0 self.files[path_hash] = msgpack.packb(entry) return entry[4] else: return None def memorize_file(self, path_hash, st, ids): # Entry: Age, inode, size, mtime, chunk ids mtime_ns = st_mtime_ns(st) self.files[path_hash] = msgpack.packb((0, st.st_ino, st.st_size, int_to_bigint(mtime_ns), ids)) self._newest_mtime = max(self._newest_mtime, mtime_ns)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_1622_2
crossvul-python_data_good_3634_1
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova.api.openstack import extensions from nova.db.sqlalchemy import api as sqlalchemy_api from nova import db from nova import exception from nova import quota authorize = extensions.extension_authorizer('compute', 'quotas') quota_resources = ['metadata_items', 'injected_file_content_bytes', 'volumes', 'gigabytes', 'ram', 'floating_ips', 'instances', 'injected_files', 'cores', 'security_groups', 'security_group_rules'] class QuotaTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('quota_set', selector='quota_set') root.set('id') for resource in quota_resources: elem = xmlutil.SubTemplateElement(root, resource) elem.text = resource return xmlutil.MasterTemplate(root, 1) class QuotaSetsController(object): def _format_quota_set(self, project_id, quota_set): """Convert the quota object to a result dict""" result = dict(id=str(project_id)) for resource in quota_resources: result[resource] = quota_set[resource] return dict(quota_set=result) @wsgi.serializers(xml=QuotaTemplate) def show(self, req, id): context = req.environ['nova.context'] authorize(context) try: sqlalchemy_api.authorize_project_context(context, id) return self._format_quota_set(id, quota.get_project_quotas(context, id)) except exception.NotAuthorized: raise webob.exc.HTTPForbidden() @wsgi.serializers(xml=QuotaTemplate) def update(self, req, id, body): context = req.environ['nova.context'] authorize(context) project_id = id for key in body['quota_set'].keys(): if key in quota_resources: value = int(body['quota_set'][key]) try: db.quota_update(context, project_id, key, value) except exception.ProjectQuotaNotFound: db.quota_create(context, project_id, key, value) except exception.AdminRequired: raise webob.exc.HTTPForbidden() return {'quota_set': quota.get_project_quotas(context, project_id)} @wsgi.serializers(xml=QuotaTemplate) def defaults(self, req, id): authorize(req.environ['nova.context']) return self._format_quota_set(id, quota._get_default_quotas()) class Quotas(extensions.ExtensionDescriptor): """Quotas management support""" name = "Quotas" alias = "os-quota-sets" namespace = "http://docs.openstack.org/compute/ext/quotas-sets/api/v1.1" updated = "2011-08-08T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension('os-quota-sets', QuotaSetsController(), member_actions={'defaults': 'GET'}) resources.append(res) return resources
./CrossVul/dataset_final_sorted/CWE-264/py/good_3634_1
crossvul-python_data_bad_3771_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ /images endpoint for Glance v1 API """ import sys import traceback import eventlet from webob.exc import (HTTPError, HTTPNotFound, HTTPConflict, HTTPBadRequest, HTTPForbidden, HTTPRequestEntityTooLarge, HTTPServiceUnavailable) from glance.api import common from glance.api import policy import glance.api.v1 from glance import context from glance.api.v1 import controller from glance.api.v1 import filters from glance.common import exception from glance.common import utils from glance.common import wsgi from glance import notifier from glance.openstack.common import cfg import glance.openstack.common.log as logging from glance import registry from glance.store import (create_stores, get_from_backend, get_size_from_backend, safe_delete_from_backend, schedule_delayed_delete_from_backend, get_store_from_location, get_store_from_scheme) LOG = logging.getLogger(__name__) SUPPORTED_PARAMS = glance.api.v1.SUPPORTED_PARAMS SUPPORTED_FILTERS = glance.api.v1.SUPPORTED_FILTERS CONTAINER_FORMATS = ['ami', 'ari', 'aki', 'bare', 'ovf'] DISK_FORMATS = ['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso'] # Defined at module level due to _is_opt_registered # identity check (not equality). default_store_opt = cfg.StrOpt('default_store', default='file') CONF = cfg.CONF CONF.register_opt(default_store_opt) def validate_image_meta(req, values): name = values.get('name') disk_format = values.get('disk_format') container_format = values.get('container_format') if 'disk_format' in values: if not disk_format in DISK_FORMATS: msg = "Invalid disk format '%s' for image." % disk_format raise HTTPBadRequest(explanation=msg, request=req) if 'container_format' in values: if not container_format in CONTAINER_FORMATS: msg = "Invalid container format '%s' for image." % container_format raise HTTPBadRequest(explanation=msg, request=req) if name and len(name) > 255: msg = _('Image name too long: %d') % len(name) raise HTTPBadRequest(explanation=msg, request=req) amazon_formats = ('aki', 'ari', 'ami') if disk_format in amazon_formats or container_format in amazon_formats: if disk_format is None: values['disk_format'] = container_format elif container_format is None: values['container_format'] = disk_format elif container_format != disk_format: msg = ("Invalid mix of disk and container formats. " "When setting a disk or container format to " "one of 'aki', 'ari', or 'ami', the container " "and disk formats must match.") raise HTTPBadRequest(explanation=msg, request=req) return values class Controller(controller.BaseController): """ WSGI controller for images resource in Glance v1 API The images resource API is a RESTful web service for image data. The API is as follows:: GET /images -- Returns a set of brief metadata about images GET /images/detail -- Returns a set of detailed metadata about images HEAD /images/<ID> -- Return metadata about an image with id <ID> GET /images/<ID> -- Return image data for image with id <ID> POST /images -- Store image data and return metadata about the newly-stored image PUT /images/<ID> -- Update image metadata and/or upload image data for a previously-reserved image DELETE /images/<ID> -- Delete the image with id <ID> """ def __init__(self): create_stores() self.verify_scheme_or_exit(CONF.default_store) self.notifier = notifier.Notifier() registry.configure_registry_client() self.policy = policy.Enforcer() self.pool = eventlet.GreenPool(size=1024) def _enforce(self, req, action): """Authorize an action against our policies""" try: self.policy.enforce(req.context, action, {}) except exception.Forbidden: raise HTTPForbidden() def index(self, req): """ Returns the following information for all public, available images: * id -- The opaque image identifier * name -- The name of the image * disk_format -- The disk image format * container_format -- The "container" format of the image * checksum -- MD5 checksum of the image data * size -- Size of image data in bytes :param req: The WSGI/Webob Request object :retval The response body is a mapping of the following form:: {'images': [ {'id': <ID>, 'name': <NAME>, 'disk_format': <DISK_FORMAT>, 'container_format': <DISK_FORMAT>, 'checksum': <CHECKSUM> 'size': <SIZE>}, ... ]} """ self._enforce(req, 'get_images') params = self._get_query_params(req) try: images = registry.get_images_list(req.context, **params) except exception.Invalid, e: raise HTTPBadRequest(explanation="%s" % e) return dict(images=images) def detail(self, req): """ Returns detailed information for all public, available images :param req: The WSGI/Webob Request object :retval The response body is a mapping of the following form:: {'images': [ {'id': <ID>, 'name': <NAME>, 'size': <SIZE>, 'disk_format': <DISK_FORMAT>, 'container_format': <CONTAINER_FORMAT>, 'checksum': <CHECKSUM>, 'min_disk': <MIN_DISK>, 'min_ram': <MIN_RAM>, 'store': <STORE>, 'status': <STATUS>, 'created_at': <TIMESTAMP>, 'updated_at': <TIMESTAMP>, 'deleted_at': <TIMESTAMP>|<NONE>, 'properties': {'distro': 'Ubuntu 10.04 LTS', ...}}, ... ]} """ self._enforce(req, 'get_images') params = self._get_query_params(req) try: images = registry.get_images_detail(req.context, **params) # Strip out the Location attribute. Temporary fix for # LP Bug #755916. This information is still coming back # from the registry, since the API server still needs access # to it, however we do not return this potential security # information to the API end user... for image in images: del image['location'] except exception.Invalid, e: raise HTTPBadRequest(explanation="%s" % e) return dict(images=images) def _get_query_params(self, req): """ Extracts necessary query params from request. :param req: the WSGI Request object :retval dict of parameters that can be used by registry client """ params = {'filters': self._get_filters(req)} for PARAM in SUPPORTED_PARAMS: if PARAM in req.params: params[PARAM] = req.params.get(PARAM) return params def _get_filters(self, req): """ Return a dictionary of query param filters from the request :param req: the Request object coming from the wsgi layer :retval a dict of key/value filters """ query_filters = {} for param in req.params: if param in SUPPORTED_FILTERS or param.startswith('property-'): query_filters[param] = req.params.get(param) if not filters.validate(param, query_filters[param]): raise HTTPBadRequest('Bad value passed to filter %s ' 'got %s' % (param, query_filters[param])) return query_filters def meta(self, req, id): """ Returns metadata about an image in the HTTP headers of the response object :param req: The WSGI/Webob Request object :param id: The opaque image identifier :retval similar to 'show' method but without image_data :raises HTTPNotFound if image metadata is not available to user """ self._enforce(req, 'get_image') image_meta = self.get_image_meta_or_404(req, id) del image_meta['location'] return { 'image_meta': image_meta } @staticmethod def _validate_source(source, req): """ External sources (as specified via the location or copy-from headers) are supported only over non-local store types, i.e. S3, Swift, HTTP. Note the absence of file:// for security reasons, see LP bug #942118. If the above constraint is violated, we reject with 400 "Bad Request". """ if source: for scheme in ['s3', 'swift', 'http']: if source.lower().startswith(scheme): return source msg = _("External sourcing not supported for store %s") % source LOG.error(msg) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") @staticmethod def _copy_from(req): return req.headers.get('x-glance-api-copy-from') @staticmethod def _external_source(image_meta, req): source = image_meta.get('location', Controller._copy_from(req)) return Controller._validate_source(source, req) @staticmethod def _get_from_store(context, where): try: image_data, image_size = get_from_backend(context, where) except exception.NotFound, e: raise HTTPNotFound(explanation="%s" % e) image_size = int(image_size) if image_size else None return image_data, image_size def show(self, req, id): """ Returns an iterator that can be used to retrieve an image's data along with the image metadata. :param req: The WSGI/Webob Request object :param id: The opaque image identifier :raises HTTPNotFound if image is not available to user """ self._enforce(req, 'get_image') self._enforce(req, 'download_image') image_meta = self.get_active_image_meta_or_404(req, id) if image_meta.get('size') == 0: image_iterator = iter([]) else: image_iterator, size = self._get_from_store(req.context, image_meta['location']) image_iterator = utils.cooperative_iter(image_iterator) image_meta['size'] = size or image_meta['size'] del image_meta['location'] return { 'image_iterator': image_iterator, 'image_meta': image_meta, } def _reserve(self, req, image_meta): """ Adds the image metadata to the registry and assigns an image identifier if one is not supplied in the request headers. Sets the image's status to `queued`. :param req: The WSGI/Webob Request object :param id: The opaque image identifier :param image_meta: The image metadata :raises HTTPConflict if image already exists :raises HTTPBadRequest if image metadata is not valid """ location = self._external_source(image_meta, req) image_meta['status'] = ('active' if image_meta.get('size') == 0 else 'queued') if location: store = get_store_from_location(location) # check the store exists before we hit the registry, but we # don't actually care what it is at this point self.get_store_or_400(req, store) # retrieve the image size from remote store (if not provided) image_meta['size'] = self._get_size(req.context, image_meta, location) else: # Ensure that the size attribute is set to zero for directly # uploadable images (if not provided). The size will be set # to a non-zero value during upload image_meta['size'] = image_meta.get('size', 0) try: image_meta = registry.add_image_metadata(req.context, image_meta) return image_meta except exception.Duplicate: msg = (_("An image with identifier %s already exists") % image_meta['id']) LOG.error(msg) raise HTTPConflict(explanation=msg, request=req, content_type="text/plain") except exception.Invalid, e: msg = (_("Failed to reserve image. Got error: %(e)s") % locals()) for line in msg.split('\n'): LOG.error(line) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") except exception.Forbidden: msg = _("Forbidden to reserve image.") LOG.error(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") def _upload(self, req, image_meta): """ Uploads the payload of the request to a backend store in Glance. If the `x-image-meta-store` header is set, Glance will attempt to use that scheme; if not, Glance will use the scheme set by the flag `default_store` to find the backing store. :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about image :raises HTTPConflict if image already exists :retval The location where the image was stored """ copy_from = self._copy_from(req) if copy_from: try: image_data, image_size = self._get_from_store(req.context, copy_from) except Exception as e: self._safe_kill(req, image_meta['id']) msg = _("Copy from external source failed: %s") % e LOG.error(msg) return image_meta['size'] = image_size or image_meta['size'] else: try: req.get_content_type('application/octet-stream') except exception.InvalidContentType: self._safe_kill(req, image_meta['id']) msg = _("Content-Type must be application/octet-stream") LOG.error(msg) raise HTTPBadRequest(explanation=msg) image_data = req.body_file scheme = req.headers.get('x-image-meta-store', CONF.default_store) store = self.get_store_or_400(req, scheme) image_id = image_meta['id'] LOG.debug(_("Setting image %s to status 'saving'"), image_id) registry.update_image_metadata(req.context, image_id, {'status': 'saving'}) LOG.debug(_("Uploading image data for image %(image_id)s " "to %(scheme)s store"), locals()) try: location, size, checksum = store.add( image_meta['id'], utils.CooperativeReader(image_data), image_meta['size']) # Verify any supplied checksum value matches checksum # returned from store when adding image supplied_checksum = image_meta.get('checksum') if supplied_checksum and supplied_checksum != checksum: msg = _("Supplied checksum (%(supplied_checksum)s) and " "checksum generated from uploaded image " "(%(checksum)s) did not match. Setting image " "status to 'killed'.") % locals() LOG.error(msg) self._safe_kill(req, image_id) raise HTTPBadRequest(explanation=msg, content_type="text/plain", request=req) # Update the database with the checksum returned # from the backend store LOG.debug(_("Updating image %(image_id)s data. " "Checksum set to %(checksum)s, size set " "to %(size)d"), locals()) update_data = {'checksum': checksum, 'size': size} image_meta = registry.update_image_metadata(req.context, image_id, update_data) self.notifier.info('image.upload', image_meta) return location except exception.Duplicate, e: msg = _("Attempt to upload duplicate image: %s") % e LOG.error(msg) self._safe_kill(req, image_id) raise HTTPConflict(explanation=msg, request=req) except exception.Forbidden, e: msg = _("Forbidden upload attempt: %s") % e LOG.error(msg) self._safe_kill(req, image_id) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") except exception.StorageFull, e: msg = _("Image storage media is full: %s") % e LOG.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPRequestEntityTooLarge(explanation=msg, request=req, content_type='text/plain') except exception.StorageWriteDenied, e: msg = _("Insufficient permissions on image storage media: %s") % e LOG.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPServiceUnavailable(explanation=msg, request=req, content_type='text/plain') except exception.ImageSizeLimitExceeded, e: msg = _("Denying attempt to upload image larger than %d bytes.") self._safe_kill(req, image_id) raise HTTPBadRequest(explanation=msg % CONF.image_size_cap, request=req, content_type='text/plain') except HTTPError, e: self._safe_kill(req, image_id) #NOTE(bcwaldon): Ideally, we would just call 'raise' here, # but something in the above function calls is affecting the # exception context and we must explicitly re-raise the # caught exception. raise e except Exception, e: tb_info = traceback.format_exc() LOG.error(tb_info) self._safe_kill(req, image_id) msg = _("Error uploading image: (%(class_name)s): " "%(exc)s") % ({'class_name': e.__class__.__name__, 'exc': str(e)}) raise HTTPBadRequest(explanation=msg, request=req) def _activate(self, req, image_id, location): """ Sets the image status to `active` and the image's location attribute. :param req: The WSGI/Webob Request object :param image_id: Opaque image identifier :param location: Location of where Glance stored this image """ image_meta = {} image_meta['location'] = location image_meta['status'] = 'active' try: image_meta_data = registry.update_image_metadata(req.context, image_id, image_meta) self.notifier.info("image.update", image_meta_data) return image_meta_data except exception.Invalid, e: msg = (_("Failed to activate image. Got error: %(e)s") % locals()) for line in msg.split('\n'): LOG.error(line) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") def _kill(self, req, image_id): """ Marks the image status to `killed`. :param req: The WSGI/Webob Request object :param image_id: Opaque image identifier """ registry.update_image_metadata(req.context, image_id, {'status': 'killed'}) def _safe_kill(self, req, image_id): """ Mark image killed without raising exceptions if it fails. Since _kill is meant to be called from exceptions handlers, it should not raise itself, rather it should just log its error. :param req: The WSGI/Webob Request object :param image_id: Opaque image identifier """ try: self._kill(req, image_id) except Exception, e: LOG.error(_("Unable to kill image %(id)s: " "%(exc)s") % ({'id': image_id, 'exc': repr(e)})) def _upload_and_activate(self, req, image_meta): """ Safely uploads the image data in the request payload and activates the image in the registry after a successful upload. :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about image :retval Mapping of updated image data """ image_id = image_meta['id'] # This is necessary because of a bug in Webob 1.0.2 - 1.0.7 # See: https://bitbucket.org/ianb/webob/ # issue/12/fix-for-issue-6-broke-chunked-transfer req.is_body_readable = True location = self._upload(req, image_meta) return self._activate(req, image_id, location) if location else None def _get_size(self, context, image_meta, location): # retrieve the image size from remote store (if not provided) return image_meta.get('size', 0) or get_size_from_backend(context, location) def _handle_source(self, req, image_id, image_meta, image_data): if image_data: image_meta = self._validate_image_for_activation(req, image_id, image_meta) image_meta = self._upload_and_activate(req, image_meta) elif self._copy_from(req): msg = _('Triggering asynchronous copy from external source') LOG.info(msg) self.pool.spawn_n(self._upload_and_activate, req, image_meta) else: location = image_meta.get('location') if location: self._validate_image_for_activation(req, image_id, image_meta) image_meta = self._activate(req, image_id, location) return image_meta def _validate_image_for_activation(self, req, id, values): """Ensures that all required image metadata values are valid.""" image = self.get_image_meta_or_404(req, id) if not 'disk_format' in values: values['disk_format'] = image['disk_format'] if not 'container_format' in values: values['container_format'] = image['container_format'] if not 'name' in values: values['name'] = image['name'] values = validate_image_meta(req, values) return values @utils.mutating def create(self, req, image_meta, image_data): """ Adds a new image to Glance. Four scenarios exist when creating an image: 1. If the image data is available directly for upload, create can be passed the image data as the request body and the metadata as the request headers. The image will initially be 'queued', during upload it will be in the 'saving' status, and then 'killed' or 'active' depending on whether the upload completed successfully. 2. If the image data exists somewhere else, you can upload indirectly from the external source using the x-glance-api-copy-from header. Once the image is uploaded, the external store is not subsequently consulted, i.e. the image content is served out from the configured glance image store. State transitions are as for option #1. 3. If the image data exists somewhere else, you can reference the source using the x-image-meta-location header. The image content will be served out from the external store, i.e. is never uploaded to the configured glance image store. 4. If the image data is not available yet, but you'd like reserve a spot for it, you can omit the data and a record will be created in the 'queued' state. This exists primarily to maintain backwards compatibility with OpenStack/Rackspace API semantics. The request body *must* be encoded as application/octet-stream, otherwise an HTTPBadRequest is returned. Upon a successful save of the image data and metadata, a response containing metadata about the image is returned, including its opaque identifier. :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about image :param image_data: Actual image data that is to be stored :raises HTTPBadRequest if x-image-meta-location is missing and the request body is not application/octet-stream image data. """ self._enforce(req, 'add_image') is_public = image_meta.get('is_public') if is_public: self._enforce(req, 'publicize_image') image_meta = self._reserve(req, image_meta) id = image_meta['id'] image_meta = self._handle_source(req, id, image_meta, image_data) location_uri = image_meta.get('location') if location_uri: self.update_store_acls(req, id, location_uri, public=is_public) # Prevent client from learning the location, as it # could contain security credentials image_meta.pop('location', None) return {'image_meta': image_meta} @utils.mutating def update(self, req, id, image_meta, image_data): """ Updates an existing image with the registry. :param request: The WSGI/Webob Request object :param id: The opaque image identifier :retval Returns the updated image information as a mapping """ self._enforce(req, 'modify_image') is_public = image_meta.get('is_public') if is_public: self._enforce(req, 'publicize_image') orig_image_meta = self.get_image_meta_or_404(req, id) orig_status = orig_image_meta['status'] # Do not allow any updates on a deleted image. # Fix for LP Bug #1060930 if orig_status == 'deleted': msg = _("Forbidden to update deleted image.") raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") # The default behaviour for a PUT /images/<IMAGE_ID> is to # override any properties that were previously set. This, however, # leads to a number of issues for the common use case where a caller # registers an image with some properties and then almost immediately # uploads an image file along with some more properties. Here, we # check for a special header value to be false in order to force # properties NOT to be purged. However we also disable purging of # properties if an image file is being uploaded... purge_props = req.headers.get('x-glance-registry-purge-props', True) purge_props = (utils.bool_from_string(purge_props) and image_data is None) if image_data is not None and orig_status != 'queued': raise HTTPConflict(_("Cannot upload to an unqueued image")) # Only allow the Location|Copy-From fields to be modified if the # image is in queued status, which indicates that the user called # POST /images but originally supply neither a Location|Copy-From # field NOR image data location = self._external_source(image_meta, req) reactivating = orig_status != 'queued' and location activating = orig_status == 'queued' and (location or image_data) # Make image public in the backend store (if implemented) orig_or_updated_loc = location or orig_image_meta.get('location', None) if orig_or_updated_loc: self.update_store_acls(req, id, orig_or_updated_loc, public=is_public) if reactivating: msg = _("Attempted to update Location field for an image " "not in queued status.") raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") try: if location: image_meta['size'] = self._get_size(req.context, image_meta, location) image_meta = registry.update_image_metadata(req.context, id, image_meta, purge_props) if activating: image_meta = self._handle_source(req, id, image_meta, image_data) except exception.Invalid, e: msg = (_("Failed to update image metadata. Got error: %(e)s") % locals()) for line in msg.split('\n'): LOG.error(line) raise HTTPBadRequest(explanation=msg, request=req, content_type="text/plain") except exception.NotFound, e: msg = ("Failed to find image to update: %(e)s" % locals()) for line in msg.split('\n'): LOG.info(line) raise HTTPNotFound(explanation=msg, request=req, content_type="text/plain") except exception.Forbidden, e: msg = ("Forbidden to update image: %(e)s" % locals()) for line in msg.split('\n'): LOG.info(line) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") else: self.notifier.info('image.update', image_meta) # Prevent client from learning the location, as it # could contain security credentials image_meta.pop('location', None) return {'image_meta': image_meta} @utils.mutating def delete(self, req, id): """ Deletes the image and all its chunks from the Glance :param req: The WSGI/Webob Request object :param id: The opaque image identifier :raises HttpBadRequest if image registry is invalid :raises HttpNotFound if image or any chunk is not available :raises HttpUnauthorized if image or any chunk is not deleteable by the requesting user """ self._enforce(req, 'delete_image') image = self.get_image_meta_or_404(req, id) if image['protected']: msg = _("Image is protected") LOG.debug(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") if image['status'] == 'deleted': msg = _("Forbidden to delete a deleted image.") LOG.debug(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") status = 'deleted' try: # The image's location field may be None in the case # of a saving or queued image, therefore don't ask a backend # to delete the image if the backend doesn't yet store it. # See https://bugs.launchpad.net/glance/+bug/747799 if image['location']: if CONF.delayed_delete: status = 'pending_delete' schedule_delayed_delete_from_backend(image['location'], id) else: safe_delete_from_backend(image['location'], req.context, id) registry.update_image_metadata(req.context, id, {'status': status}) registry.delete_image_metadata(req.context, id) except exception.NotFound, e: msg = ("Failed to find image to delete: %(e)s" % locals()) for line in msg.split('\n'): LOG.info(line) raise HTTPNotFound(explanation=msg, request=req, content_type="text/plain") except exception.Forbidden, e: msg = ("Forbidden to delete image: %(e)s" % locals()) for line in msg.split('\n'): LOG.info(line) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") else: self.notifier.info('image.delete', image) def get_store_or_400(self, request, scheme): """ Grabs the storage backend for the supplied store name or raises an HTTPBadRequest (400) response :param request: The WSGI/Webob Request object :param scheme: The backend store scheme :raises HTTPNotFound if store does not exist """ try: return get_store_from_scheme(request.context, scheme) except exception.UnknownScheme: msg = _("Store for scheme %s not found") LOG.error(msg % scheme) raise HTTPBadRequest(explanation=msg, request=request, content_type='text/plain') def verify_scheme_or_exit(self, scheme): """ Verifies availability of the storage backend for the given scheme or exits :param scheme: The backend store scheme """ try: get_store_from_scheme(context.RequestContext(), scheme) except exception.UnknownScheme: msg = _("Store for scheme %s not found") LOG.error(msg % scheme) # message on stderr will only be visible if started directly via # bin/glance-api, as opposed to being daemonized by glance-control sys.stderr.write(msg % scheme) sys.exit(255) class ImageDeserializer(wsgi.JSONRequestDeserializer): """Handles deserialization of specific controller method requests.""" def _deserialize(self, request): result = {} try: result['image_meta'] = utils.get_image_meta_from_headers(request) except exception.Invalid: image_size_str = request.headers['x-image-meta-size'] msg = _("Incoming image size of %s was not convertible to " "an integer.") % image_size_str raise HTTPBadRequest(explanation=msg, request=request) image_meta = result['image_meta'] image_meta = validate_image_meta(request, image_meta) if request.content_length: image_size = request.content_length elif 'size' in image_meta: image_size = image_meta['size'] else: image_size = None data = request.body_file if self.has_body(request) else None if image_size is None and data is not None: data = utils.LimitingReader(data, CONF.image_size_cap) #NOTE(bcwaldon): this is a hack to make sure the downstream code # gets the correct image data request.body_file = data elif image_size > CONF.image_size_cap: max_image_size = CONF.image_size_cap msg = _("Denying attempt to upload image larger than %d bytes.") LOG.warn(msg % max_image_size) raise HTTPBadRequest(explanation=msg % max_image_size, request=request) result['image_data'] = data return result def create(self, request): return self._deserialize(request) def update(self, request): return self._deserialize(request) class ImageSerializer(wsgi.JSONResponseSerializer): """Handles serialization of specific controller method responses.""" def __init__(self): self.notifier = notifier.Notifier() def _inject_location_header(self, response, image_meta): location = self._get_image_location(image_meta) response.headers['Location'] = location.encode('utf-8') def _inject_checksum_header(self, response, image_meta): if image_meta['checksum'] is not None: response.headers['ETag'] = image_meta['checksum'].encode('utf-8') def _inject_image_meta_headers(self, response, image_meta): """ Given a response and mapping of image metadata, injects the Response with a set of HTTP headers for the image metadata. Each main image metadata field is injected as a HTTP header with key 'x-image-meta-<FIELD>' except for the properties field, which is further broken out into a set of 'x-image-meta-property-<KEY>' headers :param response: The Webob Response object :param image_meta: Mapping of image metadata """ headers = utils.image_meta_to_http_headers(image_meta) for k, v in headers.items(): response.headers[k.encode('utf-8')] = v.encode('utf-8') def _get_image_location(self, image_meta): """Build a relative url to reach the image defined by image_meta.""" return "/v1/images/%s" % image_meta['id'] def meta(self, response, result): image_meta = result['image_meta'] self._inject_image_meta_headers(response, image_meta) self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def show(self, response, result): image_meta = result['image_meta'] image_id = image_meta['id'] image_iter = result['image_iterator'] # image_meta['size'] should be an int, but could possibly be a str expected_size = int(image_meta['size']) response.app_iter = common.size_checked_iter( response, image_meta, expected_size, image_iter, self.notifier) # Using app_iter blanks content-length, so we set it here... response.headers['Content-Length'] = str(image_meta['size']) response.headers['Content-Type'] = 'application/octet-stream' self._inject_image_meta_headers(response, image_meta) self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def update(self, response, result): image_meta = result['image_meta'] response.body = self.to_json(dict(image=image_meta)) response.headers['Content-Type'] = 'application/json' self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def create(self, response, result): image_meta = result['image_meta'] response.status = 201 response.headers['Content-Type'] = 'application/json' response.body = self.to_json(dict(image=image_meta)) self._inject_location_header(response, image_meta) self._inject_checksum_header(response, image_meta) return response def create_resource(): """Images resource factory method""" deserializer = ImageDeserializer() serializer = ImageSerializer() return wsgi.Resource(Controller(), deserializer, serializer)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3771_0
crossvul-python_data_bad_3634_3
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Defines interface for DB access. The underlying driver is loaded as a :class:`LazyPluggable`. Functions in this module are imported into the nova.db namespace. Call these functions from nova.db namespace, not the nova.db.api namespace. All functions in this module return objects that implement a dictionary-like interface. Currently, many of these objects are sqlalchemy objects that implement a dictionary interface. However, a future goal is to have all of these objects be simple dictionaries. **Related Flags** :db_backend: string to lookup in the list of LazyPluggable backends. `sqlalchemy` is the only supported backend right now. :sql_connection: string specifying the sqlalchemy connection to use, like: `sqlite:///var/lib/nova/nova.sqlite`. :enable_new_services: when adding a new service to the database, is it in the pool of available hardware (Default: True) """ from nova import exception from nova import flags from nova.openstack.common import cfg from nova import utils db_opts = [ cfg.StrOpt('db_backend', default='sqlalchemy', help='The backend to use for db'), cfg.BoolOpt('enable_new_services', default=True, help='Services to be added to the available pool on create'), cfg.StrOpt('instance_name_template', default='instance-%08x', help='Template string to be used to generate instance names'), cfg.StrOpt('volume_name_template', default='volume-%08x', help='Template string to be used to generate instance names'), cfg.StrOpt('snapshot_name_template', default='snapshot-%08x', help='Template string to be used to generate snapshot names'), ] FLAGS = flags.FLAGS FLAGS.register_opts(db_opts) IMPL = utils.LazyPluggable('db_backend', sqlalchemy='nova.db.sqlalchemy.api') class NoMoreNetworks(exception.Error): """No more available networks.""" pass class NoMoreTargets(exception.Error): """No more available targets""" pass ################### def service_destroy(context, instance_id): """Destroy the service or raise if it does not exist.""" return IMPL.service_destroy(context, instance_id) def service_get(context, service_id): """Get a service or raise if it does not exist.""" return IMPL.service_get(context, service_id) def service_get_by_host_and_topic(context, host, topic): """Get a service by host it's on and topic it listens to.""" return IMPL.service_get_by_host_and_topic(context, host, topic) def service_get_all(context, disabled=None): """Get all services.""" return IMPL.service_get_all(context, disabled) def service_get_all_by_topic(context, topic): """Get all services for a given topic.""" return IMPL.service_get_all_by_topic(context, topic) def service_get_all_by_host(context, host): """Get all services for a given host.""" return IMPL.service_get_all_by_host(context, host) def service_get_all_compute_by_host(context, host): """Get all compute services for a given host.""" return IMPL.service_get_all_compute_by_host(context, host) def service_get_all_compute_sorted(context): """Get all compute services sorted by instance count. :returns: a list of (Service, instance_count) tuples. """ return IMPL.service_get_all_compute_sorted(context) def service_get_all_volume_sorted(context): """Get all volume services sorted by volume count. :returns: a list of (Service, volume_count) tuples. """ return IMPL.service_get_all_volume_sorted(context) def service_get_by_args(context, host, binary): """Get the state of an service by node name and binary.""" return IMPL.service_get_by_args(context, host, binary) def service_create(context, values): """Create a service from the values dictionary.""" return IMPL.service_create(context, values) def service_update(context, service_id, values): """Set the given properties on an service and update it. Raises NotFound if service does not exist. """ return IMPL.service_update(context, service_id, values) ################### def compute_node_get(context, compute_id): """Get an computeNode or raise if it does not exist.""" return IMPL.compute_node_get(context, compute_id) def compute_node_get_all(context): """Get all computeNodes.""" return IMPL.compute_node_get_all(context) def compute_node_create(context, values): """Create a computeNode from the values dictionary.""" return IMPL.compute_node_create(context, values) def compute_node_update(context, compute_id, values, auto_adjust=True): """Set the given properties on an computeNode and update it. Raises NotFound if computeNode does not exist. """ return IMPL.compute_node_update(context, compute_id, values, auto_adjust) def compute_node_get_by_host(context, host): return IMPL.compute_node_get_by_host(context, host) def compute_node_utilization_update(context, host, free_ram_mb_delta=0, free_disk_gb_delta=0, work_delta=0, vm_delta=0): return IMPL.compute_node_utilization_update(context, host, free_ram_mb_delta, free_disk_gb_delta, work_delta, vm_delta) def compute_node_utilization_set(context, host, free_ram_mb=None, free_disk_gb=None, work=None, vms=None): return IMPL.compute_node_utilization_set(context, host, free_ram_mb, free_disk_gb, work, vms) ################### def certificate_create(context, values): """Create a certificate from the values dictionary.""" return IMPL.certificate_create(context, values) def certificate_get_all_by_project(context, project_id): """Get all certificates for a project.""" return IMPL.certificate_get_all_by_project(context, project_id) def certificate_get_all_by_user(context, user_id): """Get all certificates for a user.""" return IMPL.certificate_get_all_by_user(context, user_id) def certificate_get_all_by_user_and_project(context, user_id, project_id): """Get all certificates for a user and project.""" return IMPL.certificate_get_all_by_user_and_project(context, user_id, project_id) ################### def floating_ip_get(context, id): return IMPL.floating_ip_get(context, id) def floating_ip_get_pools(context): """Returns a list of floating ip pools""" return IMPL.floating_ip_get_pools(context) def floating_ip_allocate_address(context, project_id, pool): """Allocate free floating ip from specified pool and return the address. Raises if one is not available. """ return IMPL.floating_ip_allocate_address(context, project_id, pool) def floating_ip_create(context, values): """Create a floating ip from the values dictionary.""" return IMPL.floating_ip_create(context, values) def floating_ip_count_by_project(context, project_id): """Count floating ips used by project.""" return IMPL.floating_ip_count_by_project(context, project_id) def floating_ip_deallocate(context, address): """Deallocate an floating ip by address.""" return IMPL.floating_ip_deallocate(context, address) def floating_ip_destroy(context, address): """Destroy the floating_ip or raise if it does not exist.""" return IMPL.floating_ip_destroy(context, address) def floating_ip_disassociate(context, address): """Disassociate an floating ip from a fixed ip by address. :returns: the address of the existing fixed ip. """ return IMPL.floating_ip_disassociate(context, address) def floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host): """Associate an floating ip to a fixed_ip by address.""" return IMPL.floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host) def floating_ip_get_all(context): """Get all floating ips.""" return IMPL.floating_ip_get_all(context) def floating_ip_get_all_by_host(context, host): """Get all floating ips by host.""" return IMPL.floating_ip_get_all_by_host(context, host) def floating_ip_get_all_by_project(context, project_id): """Get all floating ips by project.""" return IMPL.floating_ip_get_all_by_project(context, project_id) def floating_ip_get_by_address(context, address): """Get a floating ip by address or raise if it doesn't exist.""" return IMPL.floating_ip_get_by_address(context, address) def floating_ip_get_by_fixed_address(context, fixed_address): """Get a floating ips by fixed address""" return IMPL.floating_ip_get_by_fixed_address(context, fixed_address) def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id): """Get a floating ips by fixed address""" return IMPL.floating_ip_get_by_fixed_ip_id(context, fixed_ip_id) def floating_ip_update(context, address, values): """Update a floating ip by address or raise if it doesn't exist.""" return IMPL.floating_ip_update(context, address, values) def floating_ip_set_auto_assigned(context, address): """Set auto_assigned flag to floating ip""" return IMPL.floating_ip_set_auto_assigned(context, address) def dnsdomain_list(context): """Get a list of all zones in our database, public and private.""" return IMPL.dnsdomain_list(context) def dnsdomain_register_for_zone(context, fqdomain, zone): """Associated a DNS domain with an availability zone""" return IMPL.dnsdomain_register_for_zone(context, fqdomain, zone) def dnsdomain_register_for_project(context, fqdomain, project): """Associated a DNS domain with a project id""" return IMPL.dnsdomain_register_for_project(context, fqdomain, project) def dnsdomain_unregister(context, fqdomain): """Purge associations for the specified DNS zone""" return IMPL.dnsdomain_unregister(context, fqdomain) def dnsdomain_get(context, fqdomain): """Get the db record for the specified domain.""" return IMPL.dnsdomain_get(context, fqdomain) #################### def migration_update(context, id, values): """Update a migration instance.""" return IMPL.migration_update(context, id, values) def migration_create(context, values): """Create a migration record.""" return IMPL.migration_create(context, values) def migration_get(context, migration_id): """Finds a migration by the id.""" return IMPL.migration_get(context, migration_id) def migration_get_by_instance_and_status(context, instance_uuid, status): """Finds a migration by the instance uuid its migrating.""" return IMPL.migration_get_by_instance_and_status(context, instance_uuid, status) def migration_get_all_unconfirmed(context, confirm_window): """Finds all unconfirmed migrations within the confirmation window.""" return IMPL.migration_get_all_unconfirmed(context, confirm_window) #################### def fixed_ip_associate(context, address, instance_id, network_id=None, reserved=False): """Associate fixed ip to instance. Raises if fixed ip is not available. """ return IMPL.fixed_ip_associate(context, address, instance_id, network_id, reserved) def fixed_ip_associate_pool(context, network_id, instance_id=None, host=None): """Find free ip in network and associate it to instance or host. Raises if one is not available. """ return IMPL.fixed_ip_associate_pool(context, network_id, instance_id, host) def fixed_ip_create(context, values): """Create a fixed ip from the values dictionary.""" return IMPL.fixed_ip_create(context, values) def fixed_ip_bulk_create(context, ips): """Create a lot of fixed ips from the values dictionary.""" return IMPL.fixed_ip_bulk_create(context, ips) def fixed_ip_disassociate(context, address): """Disassociate a fixed ip from an instance by address.""" return IMPL.fixed_ip_disassociate(context, address) def fixed_ip_disassociate_all_by_timeout(context, host, time): """Disassociate old fixed ips from host.""" return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time) def fixed_ip_get(context, id): """Get fixed ip by id or raise if it does not exist.""" return IMPL.fixed_ip_get(context, id) def fixed_ip_get_all(context): """Get all defined fixed ips.""" return IMPL.fixed_ip_get_all(context) def fixed_ip_get_by_address(context, address): """Get a fixed ip by address or raise if it does not exist.""" return IMPL.fixed_ip_get_by_address(context, address) def fixed_ip_get_by_instance(context, instance_id): """Get fixed ips by instance or raise if none exist.""" return IMPL.fixed_ip_get_by_instance(context, instance_id) def fixed_ip_get_by_network_host(context, network_id, host): """Get fixed ip for a host in a network.""" return IMPL.fixed_ip_get_by_network_host(context, network_id, host) def fixed_ips_by_virtual_interface(context, vif_id): """Get fixed ips by virtual interface or raise if none exist.""" return IMPL.fixed_ips_by_virtual_interface(context, vif_id) def fixed_ip_get_network(context, address): """Get a network for a fixed ip by address.""" return IMPL.fixed_ip_get_network(context, address) def fixed_ip_update(context, address, values): """Create a fixed ip from the values dictionary.""" return IMPL.fixed_ip_update(context, address, values) #################### def virtual_interface_create(context, values): """Create a virtual interface record in the database.""" return IMPL.virtual_interface_create(context, values) def virtual_interface_get(context, vif_id): """Gets a virtual interface from the table,""" return IMPL.virtual_interface_get(context, vif_id) def virtual_interface_get_by_address(context, address): """Gets a virtual interface from the table filtering on address.""" return IMPL.virtual_interface_get_by_address(context, address) def virtual_interface_get_by_uuid(context, vif_uuid): """Gets a virtual interface from the table filtering on vif uuid.""" return IMPL.virtual_interface_get_by_uuid(context, vif_uuid) def virtual_interface_get_by_instance(context, instance_id): """Gets all virtual_interfaces for instance.""" return IMPL.virtual_interface_get_by_instance(context, instance_id) def virtual_interface_get_by_instance_and_network(context, instance_id, network_id): """Gets all virtual interfaces for instance.""" return IMPL.virtual_interface_get_by_instance_and_network(context, instance_id, network_id) def virtual_interface_delete(context, vif_id): """Delete virtual interface record from the database.""" return IMPL.virtual_interface_delete(context, vif_id) def virtual_interface_delete_by_instance(context, instance_id): """Delete virtual interface records associated with instance.""" return IMPL.virtual_interface_delete_by_instance(context, instance_id) def virtual_interface_get_all(context): """Gets all virtual interfaces from the table""" return IMPL.virtual_interface_get_all(context) #################### def instance_create(context, values): """Create an instance from the values dictionary.""" return IMPL.instance_create(context, values) def instance_data_get_for_project(context, project_id): """Get (instance_count, total_cores, total_ram) for project.""" return IMPL.instance_data_get_for_project(context, project_id) def instance_destroy(context, instance_id): """Destroy the instance or raise if it does not exist.""" return IMPL.instance_destroy(context, instance_id) def instance_get_by_uuid(context, uuid): """Get an instance or raise if it does not exist.""" return IMPL.instance_get_by_uuid(context, uuid) def instance_get(context, instance_id): """Get an instance or raise if it does not exist.""" return IMPL.instance_get(context, instance_id) def instance_get_all(context): """Get all instances.""" return IMPL.instance_get_all(context) def instance_get_all_by_filters(context, filters, sort_key='created_at', sort_dir='desc'): """Get all instances that match all filters.""" return IMPL.instance_get_all_by_filters(context, filters, sort_key, sort_dir) def instance_get_active_by_window(context, begin, end=None, project_id=None): """Get instances active during a certain time window. Specifying a project_id will filter for a certain project.""" return IMPL.instance_get_active_by_window(context, begin, end, project_id) def instance_get_active_by_window_joined(context, begin, end=None, project_id=None): """Get instances and joins active during a certain time window. Specifying a project_id will filter for a certain project.""" return IMPL.instance_get_active_by_window_joined(context, begin, end, project_id) def instance_get_all_by_project(context, project_id): """Get all instance belonging to a project.""" return IMPL.instance_get_all_by_project(context, project_id) def instance_get_all_by_host(context, host): """Get all instance belonging to a host.""" return IMPL.instance_get_all_by_host(context, host) def instance_get_all_by_reservation(context, reservation_id): """Get all instances belonging to a reservation.""" return IMPL.instance_get_all_by_reservation(context, reservation_id) def instance_get_floating_address(context, instance_id): """Get the first floating ip address of an instance.""" return IMPL.instance_get_floating_address(context, instance_id) def instance_get_all_hung_in_rebooting(context, reboot_window): """Get all instances stuck in a rebooting state.""" return IMPL.instance_get_all_hung_in_rebooting(context, reboot_window) def instance_test_and_set(context, instance_id, attr, ok_states, new_state): """Atomically check if an instance is in a valid state, and if it is, set the instance into a new state. """ return IMPL.instance_test_and_set( context, instance_id, attr, ok_states, new_state) def instance_update(context, instance_id, values): """Set the given properties on an instance and update it. Raises NotFound if instance does not exist. """ return IMPL.instance_update(context, instance_id, values) def instance_add_security_group(context, instance_id, security_group_id): """Associate the given security group with the given instance.""" return IMPL.instance_add_security_group(context, instance_id, security_group_id) def instance_remove_security_group(context, instance_id, security_group_id): """Disassociate the given security group from the given instance.""" return IMPL.instance_remove_security_group(context, instance_id, security_group_id) def instance_action_create(context, values): """Create an instance action from the values dictionary.""" return IMPL.instance_action_create(context, values) def instance_get_actions(context, instance_uuid): """Get instance actions by instance uuid.""" return IMPL.instance_get_actions(context, instance_uuid) def instance_get_id_to_uuid_mapping(context, ids): """Return a dictionary containing 'ID: UUID' given the ids""" return IMPL.instance_get_id_to_uuid_mapping(context, ids) ################### def instance_info_cache_create(context, values): """Create a new instance cache record in the table. :param context: = request context object :param values: = dict containing column values """ return IMPL.instance_info_cache_create(context, values) def instance_info_cache_get(context, instance_uuid): """Gets an instance info cache from the table. :param instance_uuid: = uuid of the info cache's instance """ return IMPL.instance_info_cache_get(context, instance_uuid) def instance_info_cache_update(context, instance_uuid, values): """Update an instance info cache record in the table. :param instance_uuid: = uuid of info cache's instance :param values: = dict containing column values to update """ return IMPL.instance_info_cache_update(context, instance_uuid, values) def instance_info_cache_delete(context, instance_uuid): """Deletes an existing instance_info_cache record :param instance_uuid: = uuid of the instance tied to the cache record """ return IMPL.instance_info_cache_delete(context, instance_uuid) ################### def key_pair_create(context, values): """Create a key_pair from the values dictionary.""" return IMPL.key_pair_create(context, values) def key_pair_destroy(context, user_id, name): """Destroy the key_pair or raise if it does not exist.""" return IMPL.key_pair_destroy(context, user_id, name) def key_pair_destroy_all_by_user(context, user_id): """Destroy all key_pairs by user.""" return IMPL.key_pair_destroy_all_by_user(context, user_id) def key_pair_get(context, user_id, name): """Get a key_pair or raise if it does not exist.""" return IMPL.key_pair_get(context, user_id, name) def key_pair_get_all_by_user(context, user_id): """Get all key_pairs by user.""" return IMPL.key_pair_get_all_by_user(context, user_id) #################### def network_associate(context, project_id, force=False): """Associate a free network to a project.""" return IMPL.network_associate(context, project_id, force) def network_count(context): """Return the number of networks.""" return IMPL.network_count(context) def network_count_reserved_ips(context, network_id): """Return the number of reserved ips in the network.""" return IMPL.network_count_reserved_ips(context, network_id) def network_create_safe(context, values): """Create a network from the values dict. The network is only returned if the create succeeds. If the create violates constraints because the network already exists, no exception is raised. """ return IMPL.network_create_safe(context, values) def network_delete_safe(context, network_id): """Delete network with key network_id. This method assumes that the network is not associated with any project """ return IMPL.network_delete_safe(context, network_id) def network_create_fixed_ips(context, network_id, num_vpn_clients): """Create the ips for the network, reserving sepecified ips.""" return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients) def network_disassociate(context, network_id): """Disassociate the network from project or raise if it does not exist.""" return IMPL.network_disassociate(context, network_id) def network_get(context, network_id): """Get an network or raise if it does not exist.""" return IMPL.network_get(context, network_id) def network_get_all(context): """Return all defined networks.""" return IMPL.network_get_all(context) def network_get_all_by_uuids(context, network_uuids, project_id=None): """Return networks by ids.""" return IMPL.network_get_all_by_uuids(context, network_uuids, project_id) # pylint: disable=C0103 def network_get_associated_fixed_ips(context, network_id, host=None): """Get all network's ips that have been associated.""" return IMPL.network_get_associated_fixed_ips(context, network_id, host) def network_get_by_bridge(context, bridge): """Get a network by bridge or raise if it does not exist.""" return IMPL.network_get_by_bridge(context, bridge) def network_get_by_uuid(context, uuid): """Get a network by uuid or raise if it does not exist.""" return IMPL.network_get_by_uuid(context, uuid) def network_get_by_cidr(context, cidr): """Get a network by cidr or raise if it does not exist""" return IMPL.network_get_by_cidr(context, cidr) def network_get_by_instance(context, instance_id): """Get a network by instance id or raise if it does not exist.""" return IMPL.network_get_by_instance(context, instance_id) def network_get_all_by_instance(context, instance_id): """Get all networks by instance id or raise if none exist.""" return IMPL.network_get_all_by_instance(context, instance_id) def network_get_all_by_host(context, host): """All networks for which the given host is the network host.""" return IMPL.network_get_all_by_host(context, host) def network_get_index(context, network_id): """Get non-conflicting index for network.""" return IMPL.network_get_index(context, network_id) def network_set_cidr(context, network_id, cidr): """Set the Classless Inner Domain Routing for the network.""" return IMPL.network_set_cidr(context, network_id, cidr) def network_set_host(context, network_id, host_id): """Safely set the host for network.""" return IMPL.network_set_host(context, network_id, host_id) def network_update(context, network_id, values): """Set the given properties on an network and update it. Raises NotFound if network does not exist. """ return IMPL.network_update(context, network_id, values) ################### def queue_get_for(context, topic, physical_node_id): """Return a channel to send a message to a node with a topic.""" return IMPL.queue_get_for(context, topic, physical_node_id) ################### def iscsi_target_count_by_host(context, host): """Return count of export devices.""" return IMPL.iscsi_target_count_by_host(context, host) def iscsi_target_create_safe(context, values): """Create an iscsi_target from the values dictionary. The device is not returned. If the create violates the unique constraints because the iscsi_target and host already exist, no exception is raised. """ return IMPL.iscsi_target_create_safe(context, values) ############### def auth_token_destroy(context, token_id): """Destroy an auth token.""" return IMPL.auth_token_destroy(context, token_id) def auth_token_get(context, token_hash): """Retrieves a token given the hash representing it.""" return IMPL.auth_token_get(context, token_hash) def auth_token_update(context, token_hash, values): """Updates a token given the hash representing it.""" return IMPL.auth_token_update(context, token_hash, values) def auth_token_create(context, token): """Creates a new token.""" return IMPL.auth_token_create(context, token) ################### def quota_create(context, project_id, resource, limit): """Create a quota for the given project and resource.""" return IMPL.quota_create(context, project_id, resource, limit) def quota_get(context, project_id, resource): """Retrieve a quota or raise if it does not exist.""" return IMPL.quota_get(context, project_id, resource) def quota_get_all_by_project(context, project_id): """Retrieve all quotas associated with a given project.""" return IMPL.quota_get_all_by_project(context, project_id) def quota_update(context, project_id, resource, limit): """Update a quota or raise if it does not exist.""" return IMPL.quota_update(context, project_id, resource, limit) def quota_destroy(context, project_id, resource): """Destroy the quota or raise if it does not exist.""" return IMPL.quota_destroy(context, project_id, resource) def quota_destroy_all_by_project(context, project_id): """Destroy all quotas associated with a given project.""" return IMPL.quota_get_all_by_project(context, project_id) ################### def volume_allocate_iscsi_target(context, volume_id, host): """Atomically allocate a free iscsi_target from the pool.""" return IMPL.volume_allocate_iscsi_target(context, volume_id, host) def volume_attached(context, volume_id, instance_id, mountpoint): """Ensure that a volume is set as attached.""" return IMPL.volume_attached(context, volume_id, instance_id, mountpoint) def volume_create(context, values): """Create a volume from the values dictionary.""" return IMPL.volume_create(context, values) def volume_data_get_for_project(context, project_id): """Get (volume_count, gigabytes) for project.""" return IMPL.volume_data_get_for_project(context, project_id) def volume_destroy(context, volume_id): """Destroy the volume or raise if it does not exist.""" return IMPL.volume_destroy(context, volume_id) def volume_detached(context, volume_id): """Ensure that a volume is set as detached.""" return IMPL.volume_detached(context, volume_id) def volume_get(context, volume_id): """Get a volume or raise if it does not exist.""" return IMPL.volume_get(context, volume_id) def volume_get_all(context): """Get all volumes.""" return IMPL.volume_get_all(context) def volume_get_all_by_host(context, host): """Get all volumes belonging to a host.""" return IMPL.volume_get_all_by_host(context, host) def volume_get_all_by_instance(context, instance_id): """Get all volumes belonging to a instance.""" return IMPL.volume_get_all_by_instance(context, instance_id) def volume_get_all_by_project(context, project_id): """Get all volumes belonging to a project.""" return IMPL.volume_get_all_by_project(context, project_id) def volume_get_by_ec2_id(context, ec2_id): """Get a volume by ec2 id.""" return IMPL.volume_get_by_ec2_id(context, ec2_id) def volume_get_instance(context, volume_id): """Get the instance that a volume is attached to.""" return IMPL.volume_get_instance(context, volume_id) def volume_get_iscsi_target_num(context, volume_id): """Get the target num (tid) allocated to the volume.""" return IMPL.volume_get_iscsi_target_num(context, volume_id) def volume_update(context, volume_id, values): """Set the given properties on an volume and update it. Raises NotFound if volume does not exist. """ return IMPL.volume_update(context, volume_id, values) #################### def snapshot_create(context, values): """Create a snapshot from the values dictionary.""" return IMPL.snapshot_create(context, values) def snapshot_destroy(context, snapshot_id): """Destroy the snapshot or raise if it does not exist.""" return IMPL.snapshot_destroy(context, snapshot_id) def snapshot_get(context, snapshot_id): """Get a snapshot or raise if it does not exist.""" return IMPL.snapshot_get(context, snapshot_id) def snapshot_get_all(context): """Get all snapshots.""" return IMPL.snapshot_get_all(context) def snapshot_get_all_by_project(context, project_id): """Get all snapshots belonging to a project.""" return IMPL.snapshot_get_all_by_project(context, project_id) def snapshot_get_all_for_volume(context, volume_id): """Get all snapshots for a volume.""" return IMPL.snapshot_get_all_for_volume(context, volume_id) def snapshot_update(context, snapshot_id, values): """Set the given properties on an snapshot and update it. Raises NotFound if snapshot does not exist. """ return IMPL.snapshot_update(context, snapshot_id, values) #################### def block_device_mapping_create(context, values): """Create an entry of block device mapping""" return IMPL.block_device_mapping_create(context, values) def block_device_mapping_update(context, bdm_id, values): """Update an entry of block device mapping""" return IMPL.block_device_mapping_update(context, bdm_id, values) def block_device_mapping_update_or_create(context, values): """Update an entry of block device mapping. If not existed, create a new entry""" return IMPL.block_device_mapping_update_or_create(context, values) def block_device_mapping_get_all_by_instance(context, instance_id): """Get all block device mapping belonging to a instance""" return IMPL.block_device_mapping_get_all_by_instance(context, instance_id) def block_device_mapping_destroy(context, bdm_id): """Destroy the block device mapping.""" return IMPL.block_device_mapping_destroy(context, bdm_id) def block_device_mapping_destroy_by_instance_and_volume(context, instance_id, volume_id): """Destroy the block device mapping or raise if it does not exist.""" return IMPL.block_device_mapping_destroy_by_instance_and_volume( context, instance_id, volume_id) #################### def security_group_get_all(context): """Get all security groups.""" return IMPL.security_group_get_all(context) def security_group_get(context, security_group_id): """Get security group by its id.""" return IMPL.security_group_get(context, security_group_id) def security_group_get_by_name(context, project_id, group_name): """Returns a security group with the specified name from a project.""" return IMPL.security_group_get_by_name(context, project_id, group_name) def security_group_get_by_project(context, project_id): """Get all security groups belonging to a project.""" return IMPL.security_group_get_by_project(context, project_id) def security_group_get_by_instance(context, instance_id): """Get security groups to which the instance is assigned.""" return IMPL.security_group_get_by_instance(context, instance_id) def security_group_exists(context, project_id, group_name): """Indicates if a group name exists in a project.""" return IMPL.security_group_exists(context, project_id, group_name) def security_group_in_use(context, group_id): """Indicates if a security group is currently in use.""" return IMPL.security_group_in_use(context, group_id) def security_group_create(context, values): """Create a new security group.""" return IMPL.security_group_create(context, values) def security_group_destroy(context, security_group_id): """Deletes a security group.""" return IMPL.security_group_destroy(context, security_group_id) #################### def security_group_rule_create(context, values): """Create a new security group.""" return IMPL.security_group_rule_create(context, values) def security_group_rule_get_by_security_group(context, security_group_id): """Get all rules for a a given security group.""" return IMPL.security_group_rule_get_by_security_group(context, security_group_id) def security_group_rule_get_by_security_group_grantee(context, security_group_id): """Get all rules that grant access to the given security group.""" return IMPL.security_group_rule_get_by_security_group_grantee(context, security_group_id) def security_group_rule_destroy(context, security_group_rule_id): """Deletes a security group rule.""" return IMPL.security_group_rule_destroy(context, security_group_rule_id) def security_group_rule_get(context, security_group_rule_id): """Gets a security group rule.""" return IMPL.security_group_rule_get(context, security_group_rule_id) ################### def provider_fw_rule_create(context, rule): """Add a firewall rule at the provider level (all hosts & instances).""" return IMPL.provider_fw_rule_create(context, rule) def provider_fw_rule_get_all(context): """Get all provider-level firewall rules.""" return IMPL.provider_fw_rule_get_all(context) def provider_fw_rule_destroy(context, rule_id): """Delete a provider firewall rule from the database.""" return IMPL.provider_fw_rule_destroy(context, rule_id) ################### def user_get(context, id): """Get user by id.""" return IMPL.user_get(context, id) def user_get_by_uid(context, uid): """Get user by uid.""" return IMPL.user_get_by_uid(context, uid) def user_get_by_access_key(context, access_key): """Get user by access key.""" return IMPL.user_get_by_access_key(context, access_key) def user_create(context, values): """Create a new user.""" return IMPL.user_create(context, values) def user_delete(context, id): """Delete a user.""" return IMPL.user_delete(context, id) def user_get_all(context): """Create a new user.""" return IMPL.user_get_all(context) def user_add_role(context, user_id, role): """Add another global role for user.""" return IMPL.user_add_role(context, user_id, role) def user_remove_role(context, user_id, role): """Remove global role from user.""" return IMPL.user_remove_role(context, user_id, role) def user_get_roles(context, user_id): """Get global roles for user.""" return IMPL.user_get_roles(context, user_id) def user_add_project_role(context, user_id, project_id, role): """Add project role for user.""" return IMPL.user_add_project_role(context, user_id, project_id, role) def user_remove_project_role(context, user_id, project_id, role): """Remove project role from user.""" return IMPL.user_remove_project_role(context, user_id, project_id, role) def user_get_roles_for_project(context, user_id, project_id): """Return list of roles a user holds on project.""" return IMPL.user_get_roles_for_project(context, user_id, project_id) def user_update(context, user_id, values): """Update user.""" return IMPL.user_update(context, user_id, values) ################### def project_get(context, id): """Get project by id.""" return IMPL.project_get(context, id) def project_create(context, values): """Create a new project.""" return IMPL.project_create(context, values) def project_add_member(context, project_id, user_id): """Add user to project.""" return IMPL.project_add_member(context, project_id, user_id) def project_get_all(context): """Get all projects.""" return IMPL.project_get_all(context) def project_get_by_user(context, user_id): """Get all projects of which the given user is a member.""" return IMPL.project_get_by_user(context, user_id) def project_remove_member(context, project_id, user_id): """Remove the given user from the given project.""" return IMPL.project_remove_member(context, project_id, user_id) def project_update(context, project_id, values): """Update Remove the given user from the given project.""" return IMPL.project_update(context, project_id, values) def project_delete(context, project_id): """Delete project.""" return IMPL.project_delete(context, project_id) def project_get_networks(context, project_id, associate=True): """Return the network associated with the project. If associate is true, it will attempt to associate a new network if one is not found, otherwise it returns None. """ return IMPL.project_get_networks(context, project_id, associate) ################### def console_pool_create(context, values): """Create console pool.""" return IMPL.console_pool_create(context, values) def console_pool_get(context, pool_id): """Get a console pool.""" return IMPL.console_pool_get(context, pool_id) def console_pool_get_by_host_type(context, compute_host, proxy_host, console_type): """Fetch a console pool for a given proxy host, compute host, and type.""" return IMPL.console_pool_get_by_host_type(context, compute_host, proxy_host, console_type) def console_pool_get_all_by_host_type(context, host, console_type): """Fetch all pools for given proxy host and type.""" return IMPL.console_pool_get_all_by_host_type(context, host, console_type) def console_create(context, values): """Create a console.""" return IMPL.console_create(context, values) def console_delete(context, console_id): """Delete a console.""" return IMPL.console_delete(context, console_id) def console_get_by_pool_instance(context, pool_id, instance_id): """Get console entry for a given instance and pool.""" return IMPL.console_get_by_pool_instance(context, pool_id, instance_id) def console_get_all_by_instance(context, instance_id): """Get consoles for a given instance.""" return IMPL.console_get_all_by_instance(context, instance_id) def console_get(context, console_id, instance_id=None): """Get a specific console (possibly on a given instance).""" return IMPL.console_get(context, console_id, instance_id) ################## def instance_type_create(context, values): """Create a new instance type.""" return IMPL.instance_type_create(context, values) def instance_type_get_all(context, inactive=False, filters=None): """Get all instance types.""" return IMPL.instance_type_get_all( context, inactive=inactive, filters=filters) def instance_type_get(context, id): """Get instance type by id.""" return IMPL.instance_type_get(context, id) def instance_type_get_by_name(context, name): """Get instance type by name.""" return IMPL.instance_type_get_by_name(context, name) def instance_type_get_by_flavor_id(context, id): """Get instance type by name.""" return IMPL.instance_type_get_by_flavor_id(context, id) def instance_type_destroy(context, name): """Delete a instance type.""" return IMPL.instance_type_destroy(context, name) #################### def cell_create(context, values): """Create a new child Cell entry.""" return IMPL.cell_create(context, values) def cell_update(context, cell_id, values): """Update a child Cell entry.""" return IMPL.cell_update(context, cell_id, values) def cell_delete(context, cell_id): """Delete a child Cell.""" return IMPL.cell_delete(context, cell_id) def cell_get(context, cell_id): """Get a specific child Cell.""" return IMPL.cell_get(context, cell_id) def cell_get_all(context): """Get all child Cells.""" return IMPL.cell_get_all(context) #################### def instance_metadata_get(context, instance_id): """Get all metadata for an instance.""" return IMPL.instance_metadata_get(context, instance_id) def instance_metadata_delete(context, instance_id, key): """Delete the given metadata item.""" IMPL.instance_metadata_delete(context, instance_id, key) def instance_metadata_update(context, instance_id, metadata, delete): """Update metadata if it exists, otherwise create it.""" IMPL.instance_metadata_update(context, instance_id, metadata, delete) #################### def agent_build_create(context, values): """Create a new agent build entry.""" return IMPL.agent_build_create(context, values) def agent_build_get_by_triple(context, hypervisor, os, architecture): """Get agent build by hypervisor/OS/architecture triple.""" return IMPL.agent_build_get_by_triple(context, hypervisor, os, architecture) def agent_build_get_all(context): """Get all agent builds.""" return IMPL.agent_build_get_all(context) def agent_build_destroy(context, agent_update_id): """Destroy agent build entry.""" IMPL.agent_build_destroy(context, agent_update_id) def agent_build_update(context, agent_build_id, values): """Update agent build entry.""" IMPL.agent_build_update(context, agent_build_id, values) #################### def bw_usage_get_by_macs(context, macs, start_period): """Return bw usages for an instance in a given audit period.""" return IMPL.bw_usage_get_by_macs(context, macs, start_period) def bw_usage_update(context, mac, start_period, bw_in, bw_out): """Update cached bw usage for an instance and network Creates new record if needed.""" return IMPL.bw_usage_update(context, mac, start_period, bw_in, bw_out) #################### def instance_type_extra_specs_get(context, instance_type_id): """Get all extra specs for an instance type.""" return IMPL.instance_type_extra_specs_get(context, instance_type_id) def instance_type_extra_specs_delete(context, instance_type_id, key): """Delete the given extra specs item.""" IMPL.instance_type_extra_specs_delete(context, instance_type_id, key) def instance_type_extra_specs_update_or_create(context, instance_type_id, extra_specs): """Create or update instance type extra specs. This adds or modifies the key/value pairs specified in the extra specs dict argument""" IMPL.instance_type_extra_specs_update_or_create(context, instance_type_id, extra_specs) ################## def volume_metadata_get(context, volume_id): """Get all metadata for a volume.""" return IMPL.volume_metadata_get(context, volume_id) def volume_metadata_delete(context, volume_id, key): """Delete the given metadata item.""" IMPL.volume_metadata_delete(context, volume_id, key) def volume_metadata_update(context, volume_id, metadata, delete): """Update metadata if it exists, otherwise create it.""" IMPL.volume_metadata_update(context, volume_id, metadata, delete) ################## def volume_type_create(context, values): """Create a new volume type.""" return IMPL.volume_type_create(context, values) def volume_type_get_all(context, inactive=False): """Get all volume types.""" return IMPL.volume_type_get_all(context, inactive) def volume_type_get(context, id): """Get volume type by id.""" return IMPL.volume_type_get(context, id) def volume_type_get_by_name(context, name): """Get volume type by name.""" return IMPL.volume_type_get_by_name(context, name) def volume_type_destroy(context, name): """Delete a volume type.""" return IMPL.volume_type_destroy(context, name) #################### def volume_type_extra_specs_get(context, volume_type_id): """Get all extra specs for a volume type.""" return IMPL.volume_type_extra_specs_get(context, volume_type_id) def volume_type_extra_specs_delete(context, volume_type_id, key): """Delete the given extra specs item.""" IMPL.volume_type_extra_specs_delete(context, volume_type_id, key) def volume_type_extra_specs_update_or_create(context, volume_type_id, extra_specs): """Create or update volume type extra specs. This adds or modifies the key/value pairs specified in the extra specs dict argument""" IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id, extra_specs) ################### def s3_image_get(context, image_id): """Find local s3 image represented by the provided id""" return IMPL.s3_image_get(context, image_id) def s3_image_get_by_uuid(context, image_uuid): """Find local s3 image represented by the provided uuid""" return IMPL.s3_image_get_by_uuid(context, image_uuid) def s3_image_create(context, image_uuid): """Create local s3 image represented by provided uuid""" return IMPL.s3_image_create(context, image_uuid) #################### def sm_backend_conf_create(context, values): """Create a new SM Backend Config entry.""" return IMPL.sm_backend_conf_create(context, values) def sm_backend_conf_update(context, sm_backend_conf_id, values): """Update a SM Backend Config entry.""" return IMPL.sm_backend_conf_update(context, sm_backend_conf_id, values) def sm_backend_conf_delete(context, sm_backend_conf_id): """Delete a SM Backend Config.""" return IMPL.sm_backend_conf_delete(context, sm_backend_conf_id) def sm_backend_conf_get(context, sm_backend_conf_id): """Get a specific SM Backend Config.""" return IMPL.sm_backend_conf_get(context, sm_backend_conf_id) def sm_backend_conf_get_by_sr(context, sr_uuid): """Get a specific SM Backend Config.""" return IMPL.sm_backend_conf_get_by_sr(context, sr_uuid) def sm_backend_conf_get_all(context): """Get all SM Backend Configs.""" return IMPL.sm_backend_conf_get_all(context) #################### def sm_flavor_create(context, values): """Create a new SM Flavor entry.""" return IMPL.sm_flavor_create(context, values) def sm_flavor_update(context, sm_flavor_id, values): """Update a SM Flavor entry.""" return IMPL.sm_flavor_update(context, values) def sm_flavor_delete(context, sm_flavor_id): """Delete a SM Flavor.""" return IMPL.sm_flavor_delete(context, sm_flavor_id) def sm_flavor_get(context, sm_flavor): """Get a specific SM Flavor.""" return IMPL.sm_flavor_get(context, sm_flavor) def sm_flavor_get_all(context): """Get all SM Flavors.""" return IMPL.sm_flavor_get_all(context) #################### def sm_volume_create(context, values): """Create a new child Zone entry.""" return IMPL.sm_volume_create(context, values) def sm_volume_update(context, volume_id, values): """Update a child Zone entry.""" return IMPL.sm_volume_update(context, values) def sm_volume_delete(context, volume_id): """Delete a child Zone.""" return IMPL.sm_volume_delete(context, volume_id) def sm_volume_get(context, volume_id): """Get a specific child Zone.""" return IMPL.sm_volume_get(context, volume_id) def sm_volume_get_all(context): """Get all child Zones.""" return IMPL.sm_volume_get_all(context) #################### def aggregate_create(context, values, metadata=None): """Create a new aggregate with metadata.""" return IMPL.aggregate_create(context, values, metadata) def aggregate_get(context, aggregate_id, read_deleted='no'): """Get a specific aggregate by id.""" return IMPL.aggregate_get(context, aggregate_id, read_deleted) def aggregate_get_by_host(context, host, read_deleted='no'): """Get a specific aggregate by host""" return IMPL.aggregate_get_by_host(context, host, read_deleted) def aggregate_update(context, aggregate_id, values): """Update the attributes of an aggregates. If values contains a metadata key, it updates the aggregate metadata too.""" return IMPL.aggregate_update(context, aggregate_id, values) def aggregate_delete(context, aggregate_id): """Delete an aggregate.""" return IMPL.aggregate_delete(context, aggregate_id) def aggregate_get_all(context, read_deleted='yes'): """Get all aggregates.""" return IMPL.aggregate_get_all(context, read_deleted) def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False): """Add/update metadata. If set_delete=True, it adds only.""" IMPL.aggregate_metadata_add(context, aggregate_id, metadata, set_delete) def aggregate_metadata_get(context, aggregate_id, read_deleted='no'): """Get metadata for the specified aggregate.""" return IMPL.aggregate_metadata_get(context, aggregate_id, read_deleted) def aggregate_metadata_delete(context, aggregate_id, key): """Delete the given metadata key.""" IMPL.aggregate_metadata_delete(context, aggregate_id, key) def aggregate_host_add(context, aggregate_id, host): """Add host to the aggregate.""" IMPL.aggregate_host_add(context, aggregate_id, host) def aggregate_host_get_all(context, aggregate_id, read_deleted='yes'): """Get hosts for the specified aggregate.""" return IMPL.aggregate_host_get_all(context, aggregate_id, read_deleted) def aggregate_host_delete(context, aggregate_id, host): """Delete the given host from the aggregate.""" IMPL.aggregate_host_delete(context, aggregate_id, host) #################### def instance_fault_create(context, values): """Create a new Instance Fault.""" return IMPL.instance_fault_create(context, values) def instance_fault_get_by_instance_uuids(context, instance_uuids): """Get all instance faults for the provided instance_uuids.""" return IMPL.instance_fault_get_by_instance_uuids(context, instance_uuids)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3634_3
crossvul-python_data_bad_3695_1
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Identity service.""" import uuid import urllib import urlparse from keystone import config from keystone import exception from keystone import policy from keystone import token from keystone.common import manager from keystone.common import wsgi CONF = config.CONF class Manager(manager.Manager): """Default pivot point for the Identity backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ def __init__(self): super(Manager, self).__init__(CONF.identity.driver) class Driver(object): """Interface description for an Identity driver.""" def authenticate(self, user_id=None, tenant_id=None, password=None): """Authenticate a given user, tenant and password. Returns: (user, tenant, metadata). """ raise exception.NotImplemented() def get_tenant(self, tenant_id): """Get a tenant by id. Returns: tenant_ref or None. """ raise exception.NotImplemented() def get_tenant_by_name(self, tenant_name): """Get a tenant by name. Returns: tenant_ref or None. """ raise exception.NotImplemented() def get_user(self, user_id): """Get a user by id. Returns: user_ref or None. """ raise exception.NotImplemented() def get_user_by_name(self, user_name): """Get a user by name. Returns: user_ref or None. """ raise exception.NotImplemented() def get_role(self, role_id): """Get a role by id. Returns: role_ref or None. """ raise exception.NotImplemented() def list_users(self): """List all users in the system. NOTE(termie): I'd prefer if this listed only the users for a given tenant. Returns: a list of user_refs or an empty list. """ raise exception.NotImplemented() def list_roles(self): """List all roles in the system. Returns: a list of role_refs or an empty list. """ raise exception.NotImplemented() # NOTE(termie): seven calls below should probably be exposed by the api # more clearly when the api redesign happens def add_user_to_tenant(self, tenant_id, user_id): raise exception.NotImplemented() def remove_user_from_tenant(self, tenant_id, user_id): raise exception.NotImplemented() def get_all_tenants(self): raise exception.NotImplemented() def get_tenants_for_user(self, user_id): """Get the tenants associated with a given user. Returns: a list of tenant ids. """ raise exception.NotImplemented() def get_roles_for_user_and_tenant(self, user_id, tenant_id): """Get the roles associated with a user within given tenant. Returns: a list of role ids. """ raise exception.NotImplemented() def add_role_to_user_and_tenant(self, user_id, tenant_id, role_id): """Add a role to a user within given tenant.""" raise exception.NotImplemented() def remove_role_from_user_and_tenant(self, user_id, tenant_id, role_id): """Remove a role from a user within given tenant.""" raise exception.NotImplemented() # user crud def create_user(self, user_id, user): raise exception.NotImplemented() def update_user(self, user_id, user): raise exception.NotImplemented() def delete_user(self, user_id): raise exception.NotImplemented() # tenant crud def create_tenant(self, tenant_id, tenant): raise exception.NotImplemented() def update_tenant(self, tenant_id, tenant): raise exception.NotImplemented() def delete_tenant(self, tenant_id, tenant): raise exception.NotImplemented() # metadata crud def get_metadata(self, user_id, tenant_id): raise exception.NotImplemented() def create_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def update_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() def delete_metadata(self, user_id, tenant_id, metadata): raise exception.NotImplemented() # role crud def create_role(self, role_id, role): raise exception.NotImplemented() def update_role(self, role_id, role): raise exception.NotImplemented() def delete_role(self, role_id): raise exception.NotImplemented() class PublicRouter(wsgi.ComposableRouter): def add_routes(self, mapper): tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_tenants_for_token', conditions=dict(methods=['GET'])) class AdminRouter(wsgi.ComposableRouter): def add_routes(self, mapper): # Tenant Operations tenant_controller = TenantController() mapper.connect('/tenants', controller=tenant_controller, action='get_all_tenants', conditions=dict(method=['GET'])) mapper.connect('/tenants/{tenant_id}', controller=tenant_controller, action='get_tenant', conditions=dict(method=['GET'])) # User Operations user_controller = UserController() mapper.connect('/users/{user_id}', controller=user_controller, action='get_user', conditions=dict(method=['GET'])) # Role Operations roles_controller = RoleController() mapper.connect('/tenants/{tenant_id}/users/{user_id}/roles', controller=roles_controller, action='get_user_roles', conditions=dict(method=['GET'])) mapper.connect('/users/{user_id}/roles', controller=user_controller, action='get_user_roles', conditions=dict(method=['GET'])) class TenantController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(TenantController, self).__init__() def get_all_tenants(self, context, **kw): """Gets a list of all tenants for an admin user.""" self.assert_admin(context) tenant_refs = self.identity_api.get_tenants(context) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenants_for_token(self, context, **kw): """Get valid tenants for token based on token used to authenticate. Pulls the token from the context, validates it and gets the valid tenants for the user in the token. Doesn't care about token scopedness. """ try: token_ref = self.token_api.get_token(context=context, token_id=context['token_id']) except exception.NotFound: raise exception.Unauthorized() user_ref = token_ref['user'] tenant_ids = self.identity_api.get_tenants_for_user( context, user_ref['id']) tenant_refs = [] for tenant_id in tenant_ids: tenant_refs.append(self.identity_api.get_tenant( context=context, tenant_id=tenant_id)) params = { 'limit': context['query_string'].get('limit'), 'marker': context['query_string'].get('marker'), } return self._format_tenant_list(tenant_refs, **params) def get_tenant(self, context, tenant_id): # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) tenant = self.identity_api.get_tenant(context, tenant_id) if tenant is None: raise exception.TenantNotFound(tenant_id=tenant_id) return {'tenant': tenant} # CRUD Extension def create_tenant(self, context, tenant): tenant_ref = self._normalize_dict(tenant) self.assert_admin(context) tenant_id = (tenant_ref.get('id') and tenant_ref.get('id') or uuid.uuid4().hex) tenant_ref['id'] = tenant_id tenant = self.identity_api.create_tenant( context, tenant_id, tenant_ref) return {'tenant': tenant} def update_tenant(self, context, tenant_id, tenant): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) tenant_ref = self.identity_api.update_tenant( context, tenant_id, tenant) return {'tenant': tenant_ref} def delete_tenant(self, context, tenant_id, **kw): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) self.identity_api.delete_tenant(context, tenant_id) def get_tenant_users(self, context, tenant_id, **kw): self.assert_admin(context) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) user_refs = self.identity_api.get_tenant_users(context, tenant_id) return {'users': user_refs} def _format_tenant_list(self, tenant_refs, **kwargs): marker = kwargs.get('marker') page_idx = 0 if marker is not None: for (marker_idx, tenant) in enumerate(tenant_refs): if tenant['id'] == marker: # we start pagination after the marker page_idx = marker_idx + 1 break else: msg = 'Marker could not be found' raise exception.ValidationError(message=msg) limit = kwargs.get('limit') if limit is not None: try: limit = int(limit) if limit < 0: raise AssertionError() except (ValueError, AssertionError): msg = 'Invalid limit value' raise exception.ValidationError(message=msg) tenant_refs = tenant_refs[page_idx:limit] for x in tenant_refs: if 'enabled' not in x: x['enabled'] = True o = {'tenants': tenant_refs, 'tenants_links': []} return o class UserController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.policy_api = policy.Manager() self.token_api = token.Manager() super(UserController, self).__init__() def get_user(self, context, user_id): self.assert_admin(context) user_ref = self.identity_api.get_user(context, user_id) if not user_ref: raise exception.UserNotFound(user_id=user_id) return {'user': user_ref} def get_users(self, context): # NOTE(termie): i can't imagine that this really wants all the data # about every single user in the system... self.assert_admin(context) user_refs = self.identity_api.list_users(context) return {'users': user_refs} # CRUD extension def create_user(self, context, user): user = self._normalize_dict(user) self.assert_admin(context) tenant_id = user.get('tenantId', None) if (tenant_id is not None and self.identity_api.get_tenant(context, tenant_id) is None): raise exception.TenantNotFound(tenant_id=tenant_id) user_id = uuid.uuid4().hex user_ref = user.copy() user_ref['id'] = user_id new_user_ref = self.identity_api.create_user( context, user_id, user_ref) if tenant_id: self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return {'user': new_user_ref} def update_user(self, context, user_id, user): # NOTE(termie): this is really more of a patch than a put self.assert_admin(context) if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) user_ref = self.identity_api.update_user(context, user_id, user) return {'user': user_ref} def delete_user(self, context, user_id): self.assert_admin(context) if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) self.identity_api.delete_user(context, user_id) def set_user_enabled(self, context, user_id, user): return self.update_user(context, user_id, user) def set_user_password(self, context, user_id, user): return self.update_user(context, user_id, user) def update_user_tenant(self, context, user_id, user): """Update the default tenant.""" # ensure that we're a member of that tenant tenant_id = user.get('tenantId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) return self.update_user(context, user_id, user) class RoleController(wsgi.Application): def __init__(self): self.identity_api = Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(RoleController, self).__init__() # COMPAT(essex-3) def get_user_roles(self, context, user_id, tenant_id=None): """Get the roles for a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant ID required') user = self.identity_api.get_user(context, user_id) if user is None: raise exception.UserNotFound(user_id=user_id) tenant = self.identity_api.get_tenant(context, tenant_id) if tenant is None: raise exception.TenantNotFound(tenant_id=tenant_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) return {'roles': [self.identity_api.get_role(context, x) for x in roles]} # CRUD extension def get_role(self, context, role_id): self.assert_admin(context) role_ref = self.identity_api.get_role(context, role_id) if not role_ref: raise exception.RoleNotFound(role_id=role_id) return {'role': role_ref} def create_role(self, context, role): role = self._normalize_dict(role) self.assert_admin(context) role_id = uuid.uuid4().hex role['id'] = role_id role_ref = self.identity_api.create_role(context, role_id, role) return {'role': role_ref} def delete_role(self, context, role_id): self.assert_admin(context) self.get_role(context, role_id) self.identity_api.delete_role(context, role_id) def get_roles(self, context): self.assert_admin(context) roles = self.identity_api.list_roles(context) # TODO(termie): probably inefficient at some point return {'roles': roles} def add_role_to_user(self, context, user_id, role_id, tenant_id=None): """Add a role to a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) if self.identity_api.get_role(context, role_id) is None: raise exception.RoleNotFound(role_id=role_id) # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} def remove_role_from_user(self, context, user_id, role_id, tenant_id=None): """Remove a role from a user and tenant pair. Since we're trying to ignore the idea of user-only roles we're not implementing them in hopes that the idea will die off. """ self.assert_admin(context) if tenant_id is None: raise exception.NotImplemented(message='User roles not supported: ' 'tenant_id required') if self.identity_api.get_user(context, user_id) is None: raise exception.UserNotFound(user_id=user_id) if self.identity_api.get_tenant(context, tenant_id) is None: raise exception.TenantNotFound(tenant_id=tenant_id) if self.identity_api.get_role(context, role_id) is None: raise exception.RoleNotFound(role_id=role_id) # This still has the weird legacy semantics that adding a role to # a user also adds them to a tenant, so we must follow up on that self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id) return # COMPAT(diablo): CRUD extension def get_role_refs(self, context, user_id): """Ultimate hack to get around having to make role_refs first-class. This will basically iterate over the various roles the user has in all tenants the user is a member of and create fake role_refs where the id encodes the user-tenant-role information so we can look up the appropriate data when we need to delete them. """ self.assert_admin(context) user_ref = self.identity_api.get_user(context, user_id) tenant_ids = self.identity_api.get_tenants_for_user(context, user_id) o = [] for tenant_id in tenant_ids: role_ids = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) for role_id in role_ids: ref = {'roleId': role_id, 'tenantId': tenant_id, 'userId': user_id} ref['id'] = urllib.urlencode(ref) o.append(ref) return {'roles': o} # COMPAT(diablo): CRUD extension def create_role_ref(self, context, user_id, role): """This is actually used for adding a user to a tenant. In the legacy data model adding a user to a tenant required setting a role. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role tenant_id = role.get('tenantId') role_id = role.get('roleId') self.identity_api.add_user_to_tenant(context, tenant_id, user_id) self.identity_api.add_role_to_user_and_tenant( context, user_id, tenant_id, role_id) role_ref = self.identity_api.get_role(context, role_id) return {'role': role_ref} # COMPAT(diablo): CRUD extension def delete_role_ref(self, context, user_id, role_ref_id): """This is actually used for deleting a user from a tenant. In the legacy data model removing a user from a tenant required deleting a role. To emulate this, we encode the tenant and role in the role_ref_id, and if this happens to be the last role for the user-tenant pair, we remove the user from the tenant. """ self.assert_admin(context) # TODO(termie): for now we're ignoring the actual role role_ref_ref = urlparse.parse_qs(role_ref_id) tenant_id = role_ref_ref.get('tenantId')[0] role_id = role_ref_ref.get('roleId')[0] self.identity_api.remove_role_from_user_and_tenant( context, user_id, tenant_id, role_id) roles = self.identity_api.get_roles_for_user_and_tenant( context, user_id, tenant_id) if not roles: self.identity_api.remove_user_from_tenant( context, tenant_id, user_id)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_3695_1
crossvul-python_data_bad_2042_2
from collections import OrderedDict import sys import warnings from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured from django.core.paginator import InvalidPage from django.core.urlresolvers import reverse from django.db import models from django.db.models.fields import FieldDoesNotExist from django.utils import six from django.utils.deprecation import RenameMethodsBase, RemovedInDjango18Warning from django.utils.encoding import force_text from django.utils.translation import ugettext, ugettext_lazy from django.utils.http import urlencode from django.contrib.admin import FieldListFilter from django.contrib.admin.exceptions import DisallowedModelAdminLookup from django.contrib.admin.options import IncorrectLookupParameters, IS_POPUP_VAR, TO_FIELD_VAR from django.contrib.admin.utils import (quote, get_fields_from_path, lookup_needs_distinct, prepare_lookup_value) # Changelist settings ALL_VAR = 'all' ORDER_VAR = 'o' ORDER_TYPE_VAR = 'ot' PAGE_VAR = 'p' SEARCH_VAR = 'q' ERROR_FLAG = 'e' IGNORED_PARAMS = ( ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR, TO_FIELD_VAR) # Text to display within change-list table cells if the value is blank. EMPTY_CHANGELIST_VALUE = ugettext_lazy('(None)') def _is_changelist_popup(request): """ Returns True if the popup GET parameter is set. This function is introduced to facilitate deprecating the legacy value for IS_POPUP_VAR and should be removed at the end of the deprecation cycle. """ if IS_POPUP_VAR in request.GET: return True IS_LEGACY_POPUP_VAR = 'pop' if IS_LEGACY_POPUP_VAR in request.GET: warnings.warn( "The `%s` GET parameter has been renamed to `%s`." % (IS_LEGACY_POPUP_VAR, IS_POPUP_VAR), RemovedInDjango18Warning, 2) return True return False class RenameChangeListMethods(RenameMethodsBase): renamed_methods = ( ('get_query_set', 'get_queryset', RemovedInDjango18Warning), ) class ChangeList(six.with_metaclass(RenameChangeListMethods)): def __init__(self, request, model, list_display, list_display_links, list_filter, date_hierarchy, search_fields, list_select_related, list_per_page, list_max_show_all, list_editable, model_admin): self.model = model self.opts = model._meta self.lookup_opts = self.opts self.root_queryset = model_admin.get_queryset(request) self.list_display = list_display self.list_display_links = list_display_links self.list_filter = list_filter self.date_hierarchy = date_hierarchy self.search_fields = search_fields self.list_select_related = list_select_related self.list_per_page = list_per_page self.list_max_show_all = list_max_show_all self.model_admin = model_admin self.preserved_filters = model_admin.get_preserved_filters(request) # Get search parameters from the query string. try: self.page_num = int(request.GET.get(PAGE_VAR, 0)) except ValueError: self.page_num = 0 self.show_all = ALL_VAR in request.GET self.is_popup = _is_changelist_popup(request) self.to_field = request.GET.get(TO_FIELD_VAR) self.params = dict(request.GET.items()) if PAGE_VAR in self.params: del self.params[PAGE_VAR] if ERROR_FLAG in self.params: del self.params[ERROR_FLAG] if self.is_popup: self.list_editable = () else: self.list_editable = list_editable self.query = request.GET.get(SEARCH_VAR, '') self.queryset = self.get_queryset(request) self.get_results(request) if self.is_popup: title = ugettext('Select %s') else: title = ugettext('Select %s to change') self.title = title % force_text(self.opts.verbose_name) self.pk_attname = self.lookup_opts.pk.attname @property def root_query_set(self): warnings.warn("`ChangeList.root_query_set` is deprecated, " "use `root_queryset` instead.", RemovedInDjango18Warning, 2) return self.root_queryset @property def query_set(self): warnings.warn("`ChangeList.query_set` is deprecated, " "use `queryset` instead.", RemovedInDjango18Warning, 2) return self.queryset def get_filters_params(self, params=None): """ Returns all params except IGNORED_PARAMS """ if not params: params = self.params lookup_params = params.copy() # a dictionary of the query string # Remove all the parameters that are globally and systematically # ignored. for ignored in IGNORED_PARAMS: if ignored in lookup_params: del lookup_params[ignored] return lookup_params def get_filters(self, request): lookup_params = self.get_filters_params() use_distinct = False for key, value in lookup_params.items(): if not self.model_admin.lookup_allowed(key, value): raise DisallowedModelAdminLookup("Filtering by %s not allowed" % key) filter_specs = [] if self.list_filter: for list_filter in self.list_filter: if callable(list_filter): # This is simply a custom list filter class. spec = list_filter(request, lookup_params, self.model, self.model_admin) else: field_path = None if isinstance(list_filter, (tuple, list)): # This is a custom FieldListFilter class for a given field. field, field_list_filter_class = list_filter else: # This is simply a field name, so use the default # FieldListFilter class that has been registered for # the type of the given field. field, field_list_filter_class = list_filter, FieldListFilter.create if not isinstance(field, models.Field): field_path = field field = get_fields_from_path(self.model, field_path)[-1] spec = field_list_filter_class(field, request, lookup_params, self.model, self.model_admin, field_path=field_path) # Check if we need to use distinct() use_distinct = (use_distinct or lookup_needs_distinct(self.lookup_opts, field_path)) if spec and spec.has_output(): filter_specs.append(spec) # At this point, all the parameters used by the various ListFilters # have been removed from lookup_params, which now only contains other # parameters passed via the query string. We now loop through the # remaining parameters both to ensure that all the parameters are valid # fields and to determine if at least one of them needs distinct(). If # the lookup parameters aren't real fields, then bail out. try: for key, value in lookup_params.items(): lookup_params[key] = prepare_lookup_value(key, value) use_distinct = (use_distinct or lookup_needs_distinct(self.lookup_opts, key)) return filter_specs, bool(filter_specs), lookup_params, use_distinct except FieldDoesNotExist as e: six.reraise(IncorrectLookupParameters, IncorrectLookupParameters(e), sys.exc_info()[2]) def get_query_string(self, new_params=None, remove=None): if new_params is None: new_params = {} if remove is None: remove = [] p = self.params.copy() for r in remove: for k in list(p): if k.startswith(r): del p[k] for k, v in new_params.items(): if v is None: if k in p: del p[k] else: p[k] = v return '?%s' % urlencode(sorted(p.items())) def get_results(self, request): paginator = self.model_admin.get_paginator(request, self.queryset, self.list_per_page) # Get the number of objects, with admin filters applied. result_count = paginator.count # Get the total number of objects, with no admin filters applied. # Perform a slight optimization: # full_result_count is equal to paginator.count if no filters # were applied if self.get_filters_params() or self.params.get(SEARCH_VAR): full_result_count = self.root_queryset.count() else: full_result_count = result_count can_show_all = result_count <= self.list_max_show_all multi_page = result_count > self.list_per_page # Get the list of objects to display on this page. if (self.show_all and can_show_all) or not multi_page: result_list = self.queryset._clone() else: try: result_list = paginator.page(self.page_num + 1).object_list except InvalidPage: raise IncorrectLookupParameters self.result_count = result_count self.full_result_count = full_result_count self.result_list = result_list self.can_show_all = can_show_all self.multi_page = multi_page self.paginator = paginator def _get_default_ordering(self): ordering = [] if self.model_admin.ordering: ordering = self.model_admin.ordering elif self.lookup_opts.ordering: ordering = self.lookup_opts.ordering return ordering def get_ordering_field(self, field_name): """ Returns the proper model field name corresponding to the given field_name to use for ordering. field_name may either be the name of a proper model field or the name of a method (on the admin or model) or a callable with the 'admin_order_field' attribute. Returns None if no proper model field name can be matched. """ try: field = self.lookup_opts.get_field(field_name) return field.name except models.FieldDoesNotExist: # See whether field_name is a name of a non-field # that allows sorting. if callable(field_name): attr = field_name elif hasattr(self.model_admin, field_name): attr = getattr(self.model_admin, field_name) else: attr = getattr(self.model, field_name) return getattr(attr, 'admin_order_field', None) def get_ordering(self, request, queryset): """ Returns the list of ordering fields for the change list. First we check the get_ordering() method in model admin, then we check the object's default ordering. Then, any manually-specified ordering from the query string overrides anything. Finally, a deterministic order is guaranteed by ensuring the primary key is used as the last ordering field. """ params = self.params ordering = list(self.model_admin.get_ordering(request) or self._get_default_ordering()) if ORDER_VAR in params: # Clear ordering and used params ordering = [] order_params = params[ORDER_VAR].split('.') for p in order_params: try: none, pfx, idx = p.rpartition('-') field_name = self.list_display[int(idx)] order_field = self.get_ordering_field(field_name) if not order_field: continue # No 'admin_order_field', skip it # reverse order if order_field has already "-" as prefix if order_field.startswith('-') and pfx == "-": ordering.append(order_field[1:]) else: ordering.append(pfx + order_field) except (IndexError, ValueError): continue # Invalid ordering specified, skip it. # Add the given query's ordering fields, if any. ordering.extend(queryset.query.order_by) # Ensure that the primary key is systematically present in the list of # ordering fields so we can guarantee a deterministic order across all # database backends. pk_name = self.lookup_opts.pk.name if not (set(ordering) & set(['pk', '-pk', pk_name, '-' + pk_name])): # The two sets do not intersect, meaning the pk isn't present. So # we add it. ordering.append('-pk') return ordering def get_ordering_field_columns(self): """ Returns an OrderedDict of ordering field column numbers and asc/desc """ # We must cope with more than one column having the same underlying sort # field, so we base things on column numbers. ordering = self._get_default_ordering() ordering_fields = OrderedDict() if ORDER_VAR not in self.params: # for ordering specified on ModelAdmin or model Meta, we don't know # the right column numbers absolutely, because there might be more # than one column associated with that ordering, so we guess. for field in ordering: if field.startswith('-'): field = field[1:] order_type = 'desc' else: order_type = 'asc' for index, attr in enumerate(self.list_display): if self.get_ordering_field(attr) == field: ordering_fields[index] = order_type break else: for p in self.params[ORDER_VAR].split('.'): none, pfx, idx = p.rpartition('-') try: idx = int(idx) except ValueError: continue # skip it ordering_fields[idx] = 'desc' if pfx == '-' else 'asc' return ordering_fields def get_queryset(self, request): # First, we collect all the declared list filters. (self.filter_specs, self.has_filters, remaining_lookup_params, filters_use_distinct) = self.get_filters(request) # Then, we let every list filter modify the queryset to its liking. qs = self.root_queryset for filter_spec in self.filter_specs: new_qs = filter_spec.queryset(request, qs) if new_qs is not None: qs = new_qs try: # Finally, we apply the remaining lookup parameters from the query # string (i.e. those that haven't already been processed by the # filters). qs = qs.filter(**remaining_lookup_params) except (SuspiciousOperation, ImproperlyConfigured): # Allow certain types of errors to be re-raised as-is so that the # caller can treat them in a special way. raise except Exception as e: # Every other error is caught with a naked except, because we don't # have any other way of validating lookup parameters. They might be # invalid if the keyword arguments are incorrect, or if the values # are not in the correct type, so we might get FieldError, # ValueError, ValidationError, or ?. raise IncorrectLookupParameters(e) if not qs.query.select_related: qs = self.apply_select_related(qs) # Set ordering. ordering = self.get_ordering(request, qs) qs = qs.order_by(*ordering) # Apply search results qs, search_use_distinct = self.model_admin.get_search_results( request, qs, self.query) # Remove duplicates from results, if necessary if filters_use_distinct | search_use_distinct: return qs.distinct() else: return qs def apply_select_related(self, qs): if self.list_select_related is True: return qs.select_related() if self.list_select_related is False: if self.has_related_field_in_list_display(): return qs.select_related() if self.list_select_related: return qs.select_related(*self.list_select_related) return qs def has_related_field_in_list_display(self): for field_name in self.list_display: try: field = self.lookup_opts.get_field(field_name) except models.FieldDoesNotExist: pass else: if isinstance(field.rel, models.ManyToOneRel): return True return False def url_for_result(self, result): pk = getattr(result, self.pk_attname) return reverse('admin:%s_%s_change' % (self.opts.app_label, self.opts.model_name), args=(quote(pk),), current_app=self.model_admin.admin_site.name)
./CrossVul/dataset_final_sorted/CWE-264/py/bad_2042_2
crossvul-python_data_good_3694_1
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import routes from keystone import catalog from keystone import exception from keystone import identity from keystone import policy from keystone import token from keystone.common import logging from keystone.common import utils from keystone.common import wsgi LOG = logging.getLogger(__name__) class AdminRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('admin') mapper.connect('/', controller=version_controller, action='get_version') # Token Operations auth_controller = TokenController() mapper.connect('/tokens', controller=auth_controller, action='authenticate', conditions=dict(method=['POST'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token', conditions=dict(method=['GET'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='validate_token_head', conditions=dict(method=['HEAD'])) mapper.connect('/tokens/{token_id}', controller=auth_controller, action='delete_token', conditions=dict(method=['DELETE'])) mapper.connect('/tokens/{token_id}/endpoints', controller=auth_controller, action='endpoints', conditions=dict(method=['GET'])) # Miscellaneous Operations extensions_controller = AdminExtensionsController() mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) identity_router = identity.AdminRouter() routers = [identity_router] super(AdminRouter, self).__init__(mapper, routers) class PublicRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('public') mapper.connect('/', controller=version_controller, action='get_version') # Token Operations auth_controller = TokenController() mapper.connect('/tokens', controller=auth_controller, action='authenticate', conditions=dict(method=['POST'])) # Miscellaneous extensions_controller = PublicExtensionsController() mapper.connect('/extensions', controller=extensions_controller, action='get_extensions_info', conditions=dict(method=['GET'])) mapper.connect('/extensions/{extension_alias}', controller=extensions_controller, action='get_extension_info', conditions=dict(method=['GET'])) identity_router = identity.PublicRouter() routers = [identity_router] super(PublicRouter, self).__init__(mapper, routers) class PublicVersionRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('public') mapper.connect('/', controller=version_controller, action='get_versions') routers = [] super(PublicVersionRouter, self).__init__(mapper, routers) class AdminVersionRouter(wsgi.ComposingRouter): def __init__(self): mapper = routes.Mapper() version_controller = VersionController('admin') mapper.connect('/', controller=version_controller, action='get_versions') routers = [] super(AdminVersionRouter, self).__init__(mapper, routers) class VersionController(wsgi.Application): def __init__(self, version_type): self.catalog_api = catalog.Manager() self.url_key = "%sURL" % version_type super(VersionController, self).__init__() def _get_identity_url(self, context): catalog_ref = self.catalog_api.get_catalog( context=context, user_id=None, tenant_id=None) for region, region_ref in catalog_ref.iteritems(): for service, service_ref in region_ref.iteritems(): if service == 'identity': return service_ref[self.url_key] raise exception.NotImplemented() def _get_versions_list(self, context): """The list of versions is dependent on the context.""" identity_url = self._get_identity_url(context) if not identity_url.endswith('/'): identity_url = identity_url + '/' versions = {} versions['v2.0'] = { "id": "v2.0", "status": "beta", "updated": "2011-11-19T00:00:00Z", "links": [ { "rel": "self", "href": identity_url, }, { "rel": "describedby", "type": "text/html", "href": "http://docs.openstack.org/api/openstack-" "identity-service/2.0/content/" }, { "rel": "describedby", "type": "application/pdf", "href": "http://docs.openstack.org/api/openstack-" "identity-service/2.0/identity-dev-guide-" "2.0.pdf" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.identity-v2.0" "+json" }, { "base": "application/xml", "type": "application/vnd.openstack.identity-v2.0" "+xml" } ] } return versions def get_versions(self, context): versions = self._get_versions_list(context) return wsgi.render_response(status=(300, 'Multiple Choices'), body={ "versions": { "values": versions.values() } }) def get_version(self, context): versions = self._get_versions_list(context) return wsgi.render_response(body={ "version": versions['v2.0'] }) class NoopController(wsgi.Application): def __init__(self): super(NoopController, self).__init__() def noop(self, context): return {} class TokenController(wsgi.Application): def __init__(self): self.catalog_api = catalog.Manager() self.identity_api = identity.Manager() self.token_api = token.Manager() self.policy_api = policy.Manager() super(TokenController, self).__init__() def authenticate(self, context, auth=None): """Authenticate credentials and return a token. Accept auth as a dict that looks like:: { "auth":{ "passwordCredentials":{ "username":"test_user", "password":"mypass" }, "tenantName":"customer-x" } } In this case, tenant is optional, if not provided the token will be considered "unscoped" and can later be used to get a scoped token. Alternatively, this call accepts auth with only a token and tenant that will return a token that is scoped to that tenant. """ token_id = uuid.uuid4().hex if 'passwordCredentials' in auth: username = auth['passwordCredentials'].get('username', '') password = auth['passwordCredentials'].get('password', '') tenant_name = auth.get('tenantName', None) user_id = auth['passwordCredentials'].get('userId', None) if username: user_ref = self.identity_api.get_user_by_name( context=context, user_name=username) if user_ref: user_id = user_ref['id'] # more compat tenant_id = auth.get('tenantId', None) if tenant_name: tenant_ref = self.identity_api.get_tenant_by_name( context=context, tenant_name=tenant_name) if tenant_ref: tenant_id = tenant_ref['id'] try: auth_info = self.identity_api.authenticate(context=context, user_id=user_id, password=password, tenant_id=tenant_id) (user_ref, tenant_ref, metadata_ref) = auth_info # If the user is disabled don't allow them to authenticate if not user_ref.get('enabled', True): LOG.warning('User %s is disabled' % user_id) raise exception.Unauthorized() except AssertionError as e: raise exception.Unauthorized(e.message) token_ref = self.token_api.create_token( context, token_id, dict(id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref)) if tenant_ref: catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) else: catalog_ref = {} elif 'token' in auth: token = auth['token'].get('id', None) tenant_name = auth.get('tenantName') # more compat if tenant_name: tenant_ref = self.identity_api.get_tenant_by_name( context=context, tenant_name=tenant_name) tenant_id = tenant_ref['id'] else: tenant_id = auth.get('tenantId', None) try: old_token_ref = self.token_api.get_token(context=context, token_id=token) except exception.NotFound: raise exception.Unauthorized() user_ref = old_token_ref['user'] # If the user is disabled don't allow them to authenticate current_user_ref = self.identity_api.get_user( context=context, user_id=user_ref['id']) if not current_user_ref.get('enabled', True): LOG.warning('User %s is disabled' % user_ref['id']) raise exception.Unauthorized() tenants = self.identity_api.get_tenants_for_user(context, user_ref['id']) if tenant_id: assert tenant_id in tenants tenant_ref = self.identity_api.get_tenant(context=context, tenant_id=tenant_id) if tenant_ref: metadata_ref = self.identity_api.get_metadata( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id']) catalog_ref = self.catalog_api.get_catalog( context=context, user_id=user_ref['id'], tenant_id=tenant_ref['id'], metadata=metadata_ref) else: metadata_ref = {} catalog_ref = {} token_ref = self.token_api.create_token( context, token_id, dict(id=token_id, user=user_ref, tenant=tenant_ref, metadata=metadata_ref)) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata roles_ref = [] for role_id in metadata_ref.get('roles', []): roles_ref.append(self.identity_api.get_role(context, role_id)) logging.debug('TOKEN_REF %s', token_ref) return self._format_authenticate(token_ref, roles_ref, catalog_ref) def _get_token_ref(self, context, token_id, belongs_to=None): """Returns a token if a valid one exists. Optionally, limited to a token owned by a specific tenant. """ # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) token_ref = self.token_api.get_token(context=context, token_id=token_id) if belongs_to: assert token_ref['tenant']['id'] == belongs_to return token_ref # admin only def validate_token_head(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Identical to ``validate_token``, except does not return a response. """ belongs_to = context['query_string'].get("belongsTo") assert self._get_token_ref(context, token_id, belongs_to) # admin only def validate_token(self, context, token_id): """Check that a token is valid. Optionally, also ensure that it is owned by a specific tenant. Returns metadata about the token along any associated roles. """ belongs_to = context['query_string'].get("belongsTo") token_ref = self._get_token_ref(context, token_id, belongs_to) # TODO(termie): optimize this call at some point and put it into the # the return for metadata # fill out the roles in the metadata metadata_ref = token_ref['metadata'] roles_ref = [] for role_id in metadata_ref.get('roles', []): roles_ref.append(self.identity_api.get_role(context, role_id)) # Get a service catalog if belongs_to is not none # This is needed for on-behalf-of requests catalog_ref = None if belongs_to is not None: catalog_ref = self.catalog_api.get_catalog( context=context, user_id=token_ref['user']['id'], tenant_id=token_ref['tenant']['id'], metadata=metadata_ref) return self._format_token(token_ref, roles_ref, catalog_ref) def delete_token(self, context, token_id): """Delete a token, effectively invalidating it for authz.""" # TODO(termie): this stuff should probably be moved to middleware self.assert_admin(context) self.token_api.delete_token(context=context, token_id=token_id) def endpoints(self, context, token_id): """Return a list of endpoints available to the token.""" raise exception.NotImplemented() def _format_authenticate(self, token_ref, roles_ref, catalog_ref): o = self._format_token(token_ref, roles_ref) o['access']['serviceCatalog'] = self._format_catalog(catalog_ref) return o def _format_token(self, token_ref, roles_ref, catalog_ref=None): user_ref = token_ref['user'] metadata_ref = token_ref['metadata'] expires = token_ref['expires'] if expires is not None: expires = utils.isotime(expires) o = {'access': {'token': {'id': token_ref['id'], 'expires': expires, }, 'user': {'id': user_ref['id'], 'name': user_ref['name'], 'username': user_ref['name'], 'roles': roles_ref, 'roles_links': metadata_ref.get('roles_links', []) } } } if 'tenant' in token_ref and token_ref['tenant']: token_ref['tenant']['enabled'] = True o['access']['token']['tenant'] = token_ref['tenant'] if catalog_ref is not None: o['access']['serviceCatalog'] = self._format_catalog(catalog_ref) return o def _format_catalog(self, catalog_ref): """Munge catalogs from internal to output format Internal catalogs look like: {$REGION: { {$SERVICE: { $key1: $value1, ... } } } The legacy api wants them to look like [{'name': $SERVICE[name], 'type': $SERVICE, 'endpoints': [{ 'tenantId': $tenant_id, ... 'region': $REGION, }], 'endpoints_links': [], }] """ if not catalog_ref: return {} services = {} for region, region_ref in catalog_ref.iteritems(): for service, service_ref in region_ref.iteritems(): new_service_ref = services.get(service, {}) new_service_ref['name'] = service_ref.pop('name') new_service_ref['type'] = service new_service_ref['endpoints_links'] = [] service_ref['region'] = region endpoints_ref = new_service_ref.get('endpoints', []) endpoints_ref.append(service_ref) new_service_ref['endpoints'] = endpoints_ref services[service] = new_service_ref return services.values() class ExtensionsController(wsgi.Application): """Base extensions controller to be extended by public and admin API's.""" def __init__(self, extensions=None): super(ExtensionsController, self).__init__() self.extensions = extensions or {} def get_extensions_info(self, context): return {'extensions': {'values': self.extensions.values()}} def get_extension_info(self, context, extension_alias): try: return {'extension': self.extensions[extension_alias]} except KeyError: raise exception.NotFound(target=extension_alias) class PublicExtensionsController(ExtensionsController): pass class AdminExtensionsController(ExtensionsController): def __init__(self, *args, **kwargs): super(AdminExtensionsController, self).__init__(*args, **kwargs) # TODO(dolph): Extensions should obviously provide this information # themselves, but hardcoding it here allows us to match # the API spec in the short term with minimal complexity. self.extensions['OS-KSADM'] = { 'name': 'Openstack Keystone Admin', 'namespace': 'http://docs.openstack.org/identity/api/ext/' 'OS-KSADM/v1.0', 'alias': 'OS-KSADM', 'updated': '2011-08-19T13:25:27-06:00', 'description': 'Openstack extensions to Keystone v2.0 API ' 'enabling Admin Operations.', 'links': [ { 'rel': 'describedby', # TODO(dolph): link needs to be revised after # bug 928059 merges 'type': 'text/html', 'href': ('https://github.com/openstack/' 'identity-api'), } ] } @logging.fail_gracefully def public_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return PublicRouter() @logging.fail_gracefully def admin_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AdminRouter() @logging.fail_gracefully def public_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return PublicVersionRouter() @logging.fail_gracefully def admin_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return AdminVersionRouter()
./CrossVul/dataset_final_sorted/CWE-264/py/good_3694_1
crossvul-python_data_good_1123_0
############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """HTTP Request Parser This server uses asyncore to accept connections and do initial processing but threads to do work. """ import re from io import BytesIO from waitress.buffers import OverflowableBuffer from waitress.compat import tostr, unquote_bytes_to_wsgi, urlparse from waitress.receiver import ChunkedReceiver, FixedStreamReceiver from waitress.utilities import ( BadRequest, RequestEntityTooLarge, RequestHeaderFieldsTooLarge, find_double_newline, ) class ParsingError(Exception): pass class HTTPRequestParser(object): """A structure that collects the HTTP request. Once the stream is completed, the instance is passed to a server task constructor. """ completed = False # Set once request is completed. empty = False # Set if no request was made. expect_continue = False # client sent "Expect: 100-continue" header headers_finished = False # True when headers have been read header_plus = b"" chunked = False content_length = 0 header_bytes_received = 0 body_bytes_received = 0 body_rcv = None version = "1.0" error = None connection_close = False # Other attributes: first_line, header, headers, command, uri, version, # path, query, fragment def __init__(self, adj): """ adj is an Adjustments object. """ # headers is a mapping containing keys translated to uppercase # with dashes turned into underscores. self.headers = {} self.adj = adj def received(self, data): """ Receives the HTTP stream for one request. Returns the number of bytes consumed. Sets the completed flag once both the header and the body have been received. """ if self.completed: return 0 # Can't consume any more. datalen = len(data) br = self.body_rcv if br is None: # In header. max_header = self.adj.max_request_header_size s = self.header_plus + data index = find_double_newline(s) consumed = 0 if index >= 0: # If the headers have ended, and we also have part of the body # message in data we still want to validate we aren't going # over our limit for received headers. self.header_bytes_received += index consumed = datalen - (len(s) - index) else: self.header_bytes_received += datalen consumed = datalen # If the first line + headers is over the max length, we return a # RequestHeaderFieldsTooLarge error rather than continuing to # attempt to parse the headers. if self.header_bytes_received >= max_header: self.parse_header(b"GET / HTTP/1.0\r\n") self.error = RequestHeaderFieldsTooLarge( "exceeds max_header of %s" % max_header ) self.completed = True return consumed if index >= 0: # Header finished. header_plus = s[:index] # Remove preceeding blank lines. This is suggested by # https://tools.ietf.org/html/rfc7230#section-3.5 to support # clients sending an extra CR LF after another request when # using HTTP pipelining header_plus = header_plus.lstrip() if not header_plus: self.empty = True self.completed = True else: try: self.parse_header(header_plus) except ParsingError as e: self.error = BadRequest(e.args[0]) self.completed = True else: if self.body_rcv is None: # no content-length header and not a t-e: chunked # request self.completed = True if self.content_length > 0: max_body = self.adj.max_request_body_size # we won't accept this request if the content-length # is too large if self.content_length >= max_body: self.error = RequestEntityTooLarge( "exceeds max_body of %s" % max_body ) self.completed = True self.headers_finished = True return consumed # Header not finished yet. self.header_plus = s return datalen else: # In body. consumed = br.received(data) self.body_bytes_received += consumed max_body = self.adj.max_request_body_size if self.body_bytes_received >= max_body: # this will only be raised during t-e: chunked requests self.error = RequestEntityTooLarge("exceeds max_body of %s" % max_body) self.completed = True elif br.error: # garbage in chunked encoding input probably self.error = br.error self.completed = True elif br.completed: # The request (with the body) is ready to use. self.completed = True if self.chunked: # We've converted the chunked transfer encoding request # body into a normal request body, so we know its content # length; set the header here. We already popped the # TRANSFER_ENCODING header in parse_header, so this will # appear to the client to be an entirely non-chunked HTTP # request with a valid content-length. self.headers["CONTENT_LENGTH"] = str(br.__len__()) return consumed def parse_header(self, header_plus): """ Parses the header_plus block of text (the headers plus the first line of the request). """ index = header_plus.find(b"\r\n") if index >= 0: first_line = header_plus[:index].rstrip() header = header_plus[index + 2 :] else: raise ParsingError("HTTP message header invalid") if b"\r" in first_line or b"\n" in first_line: raise ParsingError("Bare CR or LF found in HTTP message") self.first_line = first_line # for testing lines = get_header_lines(header) headers = self.headers for line in lines: index = line.find(b":") if index > 0: key = line[:index] if key != key.strip(): raise ParsingError("Invalid whitespace after field-name") if b"_" in key: continue value = line[index + 1 :].strip() key1 = tostr(key.upper().replace(b"-", b"_")) # If a header already exists, we append subsequent values # seperated by a comma. Applications already need to handle # the comma seperated values, as HTTP front ends might do # the concatenation for you (behavior specified in RFC2616). try: headers[key1] += tostr(b", " + value) except KeyError: headers[key1] = tostr(value) # else there's garbage in the headers? # command, uri, version will be bytes command, uri, version = crack_first_line(first_line) version = tostr(version) command = tostr(command) self.command = command self.version = version ( self.proxy_scheme, self.proxy_netloc, self.path, self.query, self.fragment, ) = split_uri(uri) self.url_scheme = self.adj.url_scheme connection = headers.get("CONNECTION", "") if version == "1.0": if connection.lower() != "keep-alive": self.connection_close = True if version == "1.1": # since the server buffers data from chunked transfers and clients # never need to deal with chunked requests, downstream clients # should not see the HTTP_TRANSFER_ENCODING header; we pop it # here te = headers.pop("TRANSFER_ENCODING", "") if te.lower() == "chunked": self.chunked = True buf = OverflowableBuffer(self.adj.inbuf_overflow) self.body_rcv = ChunkedReceiver(buf) expect = headers.get("EXPECT", "").lower() self.expect_continue = expect == "100-continue" if connection.lower() == "close": self.connection_close = True if not self.chunked: try: cl = int(headers.get("CONTENT_LENGTH", 0)) except ValueError: raise ParsingError("Content-Length is invalid") self.content_length = cl if cl > 0: buf = OverflowableBuffer(self.adj.inbuf_overflow) self.body_rcv = FixedStreamReceiver(cl, buf) def get_body_stream(self): body_rcv = self.body_rcv if body_rcv is not None: return body_rcv.getfile() else: return BytesIO() def close(self): body_rcv = self.body_rcv if body_rcv is not None: body_rcv.getbuf().close() def split_uri(uri): # urlsplit handles byte input by returning bytes on py3, so # scheme, netloc, path, query, and fragment are bytes scheme = netloc = path = query = fragment = b"" # urlsplit below will treat this as a scheme-less netloc, thereby losing # the original intent of the request. Here we shamelessly stole 4 lines of # code from the CPython stdlib to parse out the fragment and query but # leave the path alone. See # https://github.com/python/cpython/blob/8c9e9b0cd5b24dfbf1424d1f253d02de80e8f5ef/Lib/urllib/parse.py#L465-L468 # and https://github.com/Pylons/waitress/issues/260 if uri[:2] == b"//": path = uri if b"#" in path: path, fragment = path.split(b"#", 1) if b"?" in path: path, query = path.split(b"?", 1) else: try: scheme, netloc, path, query, fragment = urlparse.urlsplit(uri) except UnicodeError: raise ParsingError("Bad URI") return ( tostr(scheme), tostr(netloc), unquote_bytes_to_wsgi(path), tostr(query), tostr(fragment), ) def get_header_lines(header): """ Splits the header into lines, putting multi-line headers together. """ r = [] lines = header.split(b"\r\n") for line in lines: if b"\r" in line or b"\n" in line: raise ParsingError('Bare CR or LF found in header line "%s"' % tostr(line)) if line.startswith((b" ", b"\t")): if not r: # https://corte.si/posts/code/pathod/pythonservers/index.html raise ParsingError('Malformed header line "%s"' % tostr(line)) r[-1] += line else: r.append(line) return r first_line_re = re.compile( b"([^ ]+) " b"((?:[^ :?#]+://[^ ?#/]*(?:[0-9]{1,5})?)?[^ ]+)" b"(( HTTP/([0-9.]+))$|$)" ) def crack_first_line(line): m = first_line_re.match(line) if m is not None and m.end() == len(line): if m.group(3): version = m.group(5) else: version = b"" method = m.group(1) # the request methods that are currently defined are all uppercase: # https://www.iana.org/assignments/http-methods/http-methods.xhtml and # the request method is case sensitive according to # https://tools.ietf.org/html/rfc7231#section-4.1 # By disallowing anything but uppercase methods we save poor # unsuspecting souls from sending lowercase HTTP methods to waitress # and having the request complete, while servers like nginx drop the # request onto the floor. if method != method.upper(): raise ParsingError('Malformed HTTP method "%s"' % tostr(method)) uri = m.group(2) return method, uri, version else: return b"", b"", b""
./CrossVul/dataset_final_sorted/CWE-444/py/good_1123_0
crossvul-python_data_good_1119_0
############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """HTTP Request Parser This server uses asyncore to accept connections and do initial processing but threads to do work. """ import re from io import BytesIO from waitress.buffers import OverflowableBuffer from waitress.compat import tostr, unquote_bytes_to_wsgi, urlparse from waitress.receiver import ChunkedReceiver, FixedStreamReceiver from waitress.utilities import ( BadRequest, RequestEntityTooLarge, RequestHeaderFieldsTooLarge, find_double_newline, ) class ParsingError(Exception): pass class HTTPRequestParser(object): """A structure that collects the HTTP request. Once the stream is completed, the instance is passed to a server task constructor. """ completed = False # Set once request is completed. empty = False # Set if no request was made. expect_continue = False # client sent "Expect: 100-continue" header headers_finished = False # True when headers have been read header_plus = b"" chunked = False content_length = 0 header_bytes_received = 0 body_bytes_received = 0 body_rcv = None version = "1.0" error = None connection_close = False # Other attributes: first_line, header, headers, command, uri, version, # path, query, fragment def __init__(self, adj): """ adj is an Adjustments object. """ # headers is a mapping containing keys translated to uppercase # with dashes turned into underscores. self.headers = {} self.adj = adj def received(self, data): """ Receives the HTTP stream for one request. Returns the number of bytes consumed. Sets the completed flag once both the header and the body have been received. """ if self.completed: return 0 # Can't consume any more. datalen = len(data) br = self.body_rcv if br is None: # In header. s = self.header_plus + data index = find_double_newline(s) if index >= 0: # Header finished. header_plus = s[:index] consumed = len(data) - (len(s) - index) # Remove preceeding blank lines. This is suggested by # https://tools.ietf.org/html/rfc7230#section-3.5 to support # clients sending an extra CR LF after another request when # using HTTP pipelining header_plus = header_plus.lstrip() if not header_plus: self.empty = True self.completed = True else: try: self.parse_header(header_plus) except ParsingError as e: self.error = BadRequest(e.args[0]) self.completed = True else: if self.body_rcv is None: # no content-length header and not a t-e: chunked # request self.completed = True if self.content_length > 0: max_body = self.adj.max_request_body_size # we won't accept this request if the content-length # is too large if self.content_length >= max_body: self.error = RequestEntityTooLarge( "exceeds max_body of %s" % max_body ) self.completed = True self.headers_finished = True return consumed else: # Header not finished yet. self.header_bytes_received += datalen max_header = self.adj.max_request_header_size if self.header_bytes_received >= max_header: # malformed header, we need to construct some request # on our own. we disregard the incoming(?) requests HTTP # version and just use 1.0. IOW someone just sent garbage # over the wire self.parse_header(b"GET / HTTP/1.0\n") self.error = RequestHeaderFieldsTooLarge( "exceeds max_header of %s" % max_header ) self.completed = True self.header_plus = s return datalen else: # In body. consumed = br.received(data) self.body_bytes_received += consumed max_body = self.adj.max_request_body_size if self.body_bytes_received >= max_body: # this will only be raised during t-e: chunked requests self.error = RequestEntityTooLarge("exceeds max_body of %s" % max_body) self.completed = True elif br.error: # garbage in chunked encoding input probably self.error = br.error self.completed = True elif br.completed: # The request (with the body) is ready to use. self.completed = True if self.chunked: # We've converted the chunked transfer encoding request # body into a normal request body, so we know its content # length; set the header here. We already popped the # TRANSFER_ENCODING header in parse_header, so this will # appear to the client to be an entirely non-chunked HTTP # request with a valid content-length. self.headers["CONTENT_LENGTH"] = str(br.__len__()) return consumed def parse_header(self, header_plus): """ Parses the header_plus block of text (the headers plus the first line of the request). """ index = header_plus.find(b"\r\n") if index >= 0: first_line = header_plus[:index].rstrip() header = header_plus[index + 2 :] else: raise ParsingError("HTTP message header invalid") if b"\r" in first_line or b"\n" in first_line: raise ParsingError("Bare CR or LF found in HTTP message") self.first_line = first_line # for testing lines = get_header_lines(header) headers = self.headers for line in lines: index = line.find(b":") if index > 0: key = line[:index] if b"_" in key: continue value = line[index + 1 :].strip() key1 = tostr(key.upper().replace(b"-", b"_")) # If a header already exists, we append subsequent values # seperated by a comma. Applications already need to handle # the comma seperated values, as HTTP front ends might do # the concatenation for you (behavior specified in RFC2616). try: headers[key1] += tostr(b", " + value) except KeyError: headers[key1] = tostr(value) # else there's garbage in the headers? # command, uri, version will be bytes command, uri, version = crack_first_line(first_line) version = tostr(version) command = tostr(command) self.command = command self.version = version ( self.proxy_scheme, self.proxy_netloc, self.path, self.query, self.fragment, ) = split_uri(uri) self.url_scheme = self.adj.url_scheme connection = headers.get("CONNECTION", "") if version == "1.0": if connection.lower() != "keep-alive": self.connection_close = True if version == "1.1": # since the server buffers data from chunked transfers and clients # never need to deal with chunked requests, downstream clients # should not see the HTTP_TRANSFER_ENCODING header; we pop it # here te = headers.pop("TRANSFER_ENCODING", "") if te.lower() == "chunked": self.chunked = True buf = OverflowableBuffer(self.adj.inbuf_overflow) self.body_rcv = ChunkedReceiver(buf) expect = headers.get("EXPECT", "").lower() self.expect_continue = expect == "100-continue" if connection.lower() == "close": self.connection_close = True if not self.chunked: try: cl = int(headers.get("CONTENT_LENGTH", 0)) except ValueError: cl = 0 self.content_length = cl if cl > 0: buf = OverflowableBuffer(self.adj.inbuf_overflow) self.body_rcv = FixedStreamReceiver(cl, buf) def get_body_stream(self): body_rcv = self.body_rcv if body_rcv is not None: return body_rcv.getfile() else: return BytesIO() def close(self): body_rcv = self.body_rcv if body_rcv is not None: body_rcv.getbuf().close() def split_uri(uri): # urlsplit handles byte input by returning bytes on py3, so # scheme, netloc, path, query, and fragment are bytes scheme = netloc = path = query = fragment = b"" # urlsplit below will treat this as a scheme-less netloc, thereby losing # the original intent of the request. Here we shamelessly stole 4 lines of # code from the CPython stdlib to parse out the fragment and query but # leave the path alone. See # https://github.com/python/cpython/blob/8c9e9b0cd5b24dfbf1424d1f253d02de80e8f5ef/Lib/urllib/parse.py#L465-L468 # and https://github.com/Pylons/waitress/issues/260 if uri[:2] == b"//": path = uri if b"#" in path: path, fragment = path.split(b"#", 1) if b"?" in path: path, query = path.split(b"?", 1) else: try: scheme, netloc, path, query, fragment = urlparse.urlsplit(uri) except UnicodeError: raise ParsingError("Bad URI") return ( tostr(scheme), tostr(netloc), unquote_bytes_to_wsgi(path), tostr(query), tostr(fragment), ) def get_header_lines(header): """ Splits the header into lines, putting multi-line headers together. """ r = [] lines = header.split(b"\r\n") for line in lines: if b"\r" in line or b"\n" in line: raise ParsingError('Bare CR or LF found in header line "%s"' % tostr(line)) if line.startswith((b" ", b"\t")): if not r: # https://corte.si/posts/code/pathod/pythonservers/index.html raise ParsingError('Malformed header line "%s"' % tostr(line)) r[-1] += line else: r.append(line) return r first_line_re = re.compile( b"([^ ]+) " b"((?:[^ :?#]+://[^ ?#/]*(?:[0-9]{1,5})?)?[^ ]+)" b"(( HTTP/([0-9.]+))$|$)" ) def crack_first_line(line): m = first_line_re.match(line) if m is not None and m.end() == len(line): if m.group(3): version = m.group(5) else: version = b"" method = m.group(1) # the request methods that are currently defined are all uppercase: # https://www.iana.org/assignments/http-methods/http-methods.xhtml and # the request method is case sensitive according to # https://tools.ietf.org/html/rfc7231#section-4.1 # By disallowing anything but uppercase methods we save poor # unsuspecting souls from sending lowercase HTTP methods to waitress # and having the request complete, while servers like nginx drop the # request onto the floor. if method != method.upper(): raise ParsingError('Malformed HTTP method "%s"' % tostr(method)) uri = m.group(2) return method, uri, version else: return b"", b"", b""
./CrossVul/dataset_final_sorted/CWE-444/py/good_1119_0
crossvul-python_data_bad_1120_2
############################################################################## # # Copyright (c) 2006 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## import os from setuptools import setup, find_packages here = os.path.abspath(os.path.dirname(__file__)) try: README = open(os.path.join(here, "README.rst")).read() CHANGES = open(os.path.join(here, "CHANGES.txt")).read() except IOError: README = CHANGES = "" docs_extras = [ "Sphinx>=1.8.1", "docutils", "pylons-sphinx-themes>=1.0.9", ] testing_extras = [ "nose", "coverage>=5.0", ] setup( name="waitress", version="1.3.1", author="Zope Foundation and Contributors", author_email="zope-dev@zope.org", maintainer="Pylons Project", maintainer_email="pylons-discuss@googlegroups.com", description="Waitress WSGI server", long_description=README + "\n\n" + CHANGES, license="ZPL 2.1", keywords="waitress wsgi server http", classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: Zope Public License", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Natural Language :: English", "Operating System :: OS Independent", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI", ], url="https://github.com/Pylons/waitress", packages=find_packages(), extras_require={"testing": testing_extras, "docs": docs_extras,}, include_package_data=True, test_suite="waitress", zip_safe=False, entry_points=""" [paste.server_runner] main = waitress:serve_paste [console_scripts] waitress-serve = waitress.runner:run """, )
./CrossVul/dataset_final_sorted/CWE-444/py/bad_1120_2
crossvul-python_data_bad_1119_1
############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """Data Chunk Receiver """ from waitress.utilities import find_double_newline from waitress.utilities import BadRequest class FixedStreamReceiver(object): # See IStreamConsumer completed = False error = None def __init__(self, cl, buf): self.remain = cl self.buf = buf def __len__(self): return self.buf.__len__() def received(self, data): "See IStreamConsumer" rm = self.remain if rm < 1: self.completed = True # Avoid any chance of spinning return 0 datalen = len(data) if rm <= datalen: self.buf.append(data[:rm]) self.remain = 0 self.completed = True return rm else: self.buf.append(data) self.remain -= datalen return datalen def getfile(self): return self.buf.getfile() def getbuf(self): return self.buf class ChunkedReceiver(object): chunk_remainder = 0 control_line = b"" all_chunks_received = False trailer = b"" completed = False error = None # max_control_line = 1024 # max_trailer = 65536 def __init__(self, buf): self.buf = buf def __len__(self): return self.buf.__len__() def received(self, s): # Returns the number of bytes consumed. if self.completed: return 0 orig_size = len(s) while s: rm = self.chunk_remainder if rm > 0: # Receive the remainder of a chunk. to_write = s[:rm] self.buf.append(to_write) written = len(to_write) s = s[written:] self.chunk_remainder -= written elif not self.all_chunks_received: # Receive a control line. s = self.control_line + s pos = s.find(b"\n") if pos < 0: # Control line not finished. self.control_line = s s = "" else: # Control line finished. line = s[:pos] s = s[pos + 1 :] self.control_line = b"" line = line.strip() if line: # Begin a new chunk. semi = line.find(b";") if semi >= 0: # discard extension info. line = line[:semi] try: sz = int(line.strip(), 16) # hexadecimal except ValueError: # garbage in input self.error = BadRequest("garbage in chunked encoding input") sz = 0 if sz > 0: # Start a new chunk. self.chunk_remainder = sz else: # Finished chunks. self.all_chunks_received = True # else expect a control line. else: # Receive the trailer. trailer = self.trailer + s if trailer.startswith(b"\r\n"): # No trailer. self.completed = True return orig_size - (len(trailer) - 2) elif trailer.startswith(b"\n"): # No trailer. self.completed = True return orig_size - (len(trailer) - 1) pos = find_double_newline(trailer) if pos < 0: # Trailer not finished. self.trailer = trailer s = b"" else: # Finished the trailer. self.completed = True self.trailer = trailer[:pos] return orig_size - (len(trailer) - pos) return orig_size def getfile(self): return self.buf.getfile() def getbuf(self): return self.buf
./CrossVul/dataset_final_sorted/CWE-444/py/bad_1119_1
crossvul-python_data_bad_1121_2
############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """HTTP Request Parser This server uses asyncore to accept connections and do initial processing but threads to do work. """ import re from io import BytesIO from waitress.buffers import OverflowableBuffer from waitress.compat import tostr, unquote_bytes_to_wsgi, urlparse from waitress.receiver import ChunkedReceiver, FixedStreamReceiver from waitress.utilities import ( BadRequest, RequestEntityTooLarge, RequestHeaderFieldsTooLarge, ServerNotImplemented, find_double_newline, ) class ParsingError(Exception): pass class TransferEncodingNotImplemented(Exception): pass class HTTPRequestParser(object): """A structure that collects the HTTP request. Once the stream is completed, the instance is passed to a server task constructor. """ completed = False # Set once request is completed. empty = False # Set if no request was made. expect_continue = False # client sent "Expect: 100-continue" header headers_finished = False # True when headers have been read header_plus = b"" chunked = False content_length = 0 header_bytes_received = 0 body_bytes_received = 0 body_rcv = None version = "1.0" error = None connection_close = False # Other attributes: first_line, header, headers, command, uri, version, # path, query, fragment def __init__(self, adj): """ adj is an Adjustments object. """ # headers is a mapping containing keys translated to uppercase # with dashes turned into underscores. self.headers = {} self.adj = adj def received(self, data): """ Receives the HTTP stream for one request. Returns the number of bytes consumed. Sets the completed flag once both the header and the body have been received. """ if self.completed: return 0 # Can't consume any more. datalen = len(data) br = self.body_rcv if br is None: # In header. max_header = self.adj.max_request_header_size s = self.header_plus + data index = find_double_newline(s) consumed = 0 if index >= 0: # If the headers have ended, and we also have part of the body # message in data we still want to validate we aren't going # over our limit for received headers. self.header_bytes_received += index consumed = datalen - (len(s) - index) else: self.header_bytes_received += datalen consumed = datalen # If the first line + headers is over the max length, we return a # RequestHeaderFieldsTooLarge error rather than continuing to # attempt to parse the headers. if self.header_bytes_received >= max_header: self.parse_header(b"GET / HTTP/1.0\r\n") self.error = RequestHeaderFieldsTooLarge( "exceeds max_header of %s" % max_header ) self.completed = True return consumed if index >= 0: # Header finished. header_plus = s[:index] # Remove preceeding blank lines. This is suggested by # https://tools.ietf.org/html/rfc7230#section-3.5 to support # clients sending an extra CR LF after another request when # using HTTP pipelining header_plus = header_plus.lstrip() if not header_plus: self.empty = True self.completed = True else: try: self.parse_header(header_plus) except ParsingError as e: self.error = BadRequest(e.args[0]) self.completed = True except TransferEncodingNotImplemented as e: self.error = ServerNotImplemented(e.args[0]) self.completed = True else: if self.body_rcv is None: # no content-length header and not a t-e: chunked # request self.completed = True if self.content_length > 0: max_body = self.adj.max_request_body_size # we won't accept this request if the content-length # is too large if self.content_length >= max_body: self.error = RequestEntityTooLarge( "exceeds max_body of %s" % max_body ) self.completed = True self.headers_finished = True return consumed # Header not finished yet. self.header_plus = s return datalen else: # In body. consumed = br.received(data) self.body_bytes_received += consumed max_body = self.adj.max_request_body_size if self.body_bytes_received >= max_body: # this will only be raised during t-e: chunked requests self.error = RequestEntityTooLarge("exceeds max_body of %s" % max_body) self.completed = True elif br.error: # garbage in chunked encoding input probably self.error = br.error self.completed = True elif br.completed: # The request (with the body) is ready to use. self.completed = True if self.chunked: # We've converted the chunked transfer encoding request # body into a normal request body, so we know its content # length; set the header here. We already popped the # TRANSFER_ENCODING header in parse_header, so this will # appear to the client to be an entirely non-chunked HTTP # request with a valid content-length. self.headers["CONTENT_LENGTH"] = str(br.__len__()) return consumed def parse_header(self, header_plus): """ Parses the header_plus block of text (the headers plus the first line of the request). """ index = header_plus.find(b"\r\n") if index >= 0: first_line = header_plus[:index].rstrip() header = header_plus[index + 2 :] else: raise ParsingError("HTTP message header invalid") if b"\r" in first_line or b"\n" in first_line: raise ParsingError("Bare CR or LF found in HTTP message") self.first_line = first_line # for testing lines = get_header_lines(header) headers = self.headers for line in lines: index = line.find(b":") if index > 0: key = line[:index] if key != key.strip(): raise ParsingError("Invalid whitespace after field-name") if b"_" in key: continue value = line[index + 1 :].strip() key1 = tostr(key.upper().replace(b"-", b"_")) # If a header already exists, we append subsequent values # seperated by a comma. Applications already need to handle # the comma seperated values, as HTTP front ends might do # the concatenation for you (behavior specified in RFC2616). try: headers[key1] += tostr(b", " + value) except KeyError: headers[key1] = tostr(value) # else there's garbage in the headers? # command, uri, version will be bytes command, uri, version = crack_first_line(first_line) version = tostr(version) command = tostr(command) self.command = command self.version = version ( self.proxy_scheme, self.proxy_netloc, self.path, self.query, self.fragment, ) = split_uri(uri) self.url_scheme = self.adj.url_scheme connection = headers.get("CONNECTION", "") if version == "1.0": if connection.lower() != "keep-alive": self.connection_close = True if version == "1.1": # since the server buffers data from chunked transfers and clients # never need to deal with chunked requests, downstream clients # should not see the HTTP_TRANSFER_ENCODING header; we pop it # here te = headers.pop("TRANSFER_ENCODING", "") encodings = [encoding.strip().lower() for encoding in te.split(",") if encoding] for encoding in encodings: # Out of the transfer-codings listed in # https://tools.ietf.org/html/rfc7230#section-4 we only support # chunked at this time. # Note: the identity transfer-coding was removed in RFC7230: # https://tools.ietf.org/html/rfc7230#appendix-A.2 and is thus # not supported if encoding not in {"chunked"}: raise TransferEncodingNotImplemented( "Transfer-Encoding requested is not supported." ) if encodings and encodings[-1] == "chunked": self.chunked = True buf = OverflowableBuffer(self.adj.inbuf_overflow) self.body_rcv = ChunkedReceiver(buf) elif encodings: # pragma: nocover raise TransferEncodingNotImplemented( "Transfer-Encoding requested is not supported." ) expect = headers.get("EXPECT", "").lower() self.expect_continue = expect == "100-continue" if connection.lower() == "close": self.connection_close = True if not self.chunked: try: cl = int(headers.get("CONTENT_LENGTH", 0)) except ValueError: raise ParsingError("Content-Length is invalid") self.content_length = cl if cl > 0: buf = OverflowableBuffer(self.adj.inbuf_overflow) self.body_rcv = FixedStreamReceiver(cl, buf) def get_body_stream(self): body_rcv = self.body_rcv if body_rcv is not None: return body_rcv.getfile() else: return BytesIO() def close(self): body_rcv = self.body_rcv if body_rcv is not None: body_rcv.getbuf().close() def split_uri(uri): # urlsplit handles byte input by returning bytes on py3, so # scheme, netloc, path, query, and fragment are bytes scheme = netloc = path = query = fragment = b"" # urlsplit below will treat this as a scheme-less netloc, thereby losing # the original intent of the request. Here we shamelessly stole 4 lines of # code from the CPython stdlib to parse out the fragment and query but # leave the path alone. See # https://github.com/python/cpython/blob/8c9e9b0cd5b24dfbf1424d1f253d02de80e8f5ef/Lib/urllib/parse.py#L465-L468 # and https://github.com/Pylons/waitress/issues/260 if uri[:2] == b"//": path = uri if b"#" in path: path, fragment = path.split(b"#", 1) if b"?" in path: path, query = path.split(b"?", 1) else: try: scheme, netloc, path, query, fragment = urlparse.urlsplit(uri) except UnicodeError: raise ParsingError("Bad URI") return ( tostr(scheme), tostr(netloc), unquote_bytes_to_wsgi(path), tostr(query), tostr(fragment), ) def get_header_lines(header): """ Splits the header into lines, putting multi-line headers together. """ r = [] lines = header.split(b"\r\n") for line in lines: if b"\r" in line or b"\n" in line: raise ParsingError('Bare CR or LF found in header line "%s"' % tostr(line)) if line.startswith((b" ", b"\t")): if not r: # https://corte.si/posts/code/pathod/pythonservers/index.html raise ParsingError('Malformed header line "%s"' % tostr(line)) r[-1] += line else: r.append(line) return r first_line_re = re.compile( b"([^ ]+) " b"((?:[^ :?#]+://[^ ?#/]*(?:[0-9]{1,5})?)?[^ ]+)" b"(( HTTP/([0-9.]+))$|$)" ) def crack_first_line(line): m = first_line_re.match(line) if m is not None and m.end() == len(line): if m.group(3): version = m.group(5) else: version = b"" method = m.group(1) # the request methods that are currently defined are all uppercase: # https://www.iana.org/assignments/http-methods/http-methods.xhtml and # the request method is case sensitive according to # https://tools.ietf.org/html/rfc7231#section-4.1 # By disallowing anything but uppercase methods we save poor # unsuspecting souls from sending lowercase HTTP methods to waitress # and having the request complete, while servers like nginx drop the # request onto the floor. if method != method.upper(): raise ParsingError('Malformed HTTP method "%s"' % tostr(method)) uri = m.group(2) return method, uri, version else: return b"", b"", b""
./CrossVul/dataset_final_sorted/CWE-444/py/bad_1121_2
crossvul-python_data_good_1119_1
############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """Data Chunk Receiver """ from waitress.utilities import BadRequest, find_double_newline class FixedStreamReceiver(object): # See IStreamConsumer completed = False error = None def __init__(self, cl, buf): self.remain = cl self.buf = buf def __len__(self): return self.buf.__len__() def received(self, data): "See IStreamConsumer" rm = self.remain if rm < 1: self.completed = True # Avoid any chance of spinning return 0 datalen = len(data) if rm <= datalen: self.buf.append(data[:rm]) self.remain = 0 self.completed = True return rm else: self.buf.append(data) self.remain -= datalen return datalen def getfile(self): return self.buf.getfile() def getbuf(self): return self.buf class ChunkedReceiver(object): chunk_remainder = 0 validate_chunk_end = False control_line = b"" all_chunks_received = False trailer = b"" completed = False error = None # max_control_line = 1024 # max_trailer = 65536 def __init__(self, buf): self.buf = buf def __len__(self): return self.buf.__len__() def received(self, s): # Returns the number of bytes consumed. if self.completed: return 0 orig_size = len(s) while s: rm = self.chunk_remainder if rm > 0: # Receive the remainder of a chunk. to_write = s[:rm] self.buf.append(to_write) written = len(to_write) s = s[written:] self.chunk_remainder -= written if self.chunk_remainder == 0: self.validate_chunk_end = True elif self.validate_chunk_end: pos = s.find(b"\r\n") if pos == 0: # Chop off the terminating CR LF from the chunk s = s[2:] else: self.error = BadRequest("Chunk not properly terminated") self.all_chunks_received = True # Always exit this loop self.validate_chunk_end = False elif not self.all_chunks_received: # Receive a control line. s = self.control_line + s pos = s.find(b"\r\n") if pos < 0: # Control line not finished. self.control_line = s s = "" else: # Control line finished. line = s[:pos] s = s[pos + 2 :] self.control_line = b"" line = line.strip() if line: # Begin a new chunk. semi = line.find(b";") if semi >= 0: # discard extension info. line = line[:semi] try: sz = int(line.strip(), 16) # hexadecimal except ValueError: # garbage in input self.error = BadRequest("garbage in chunked encoding input") sz = 0 if sz > 0: # Start a new chunk. self.chunk_remainder = sz else: # Finished chunks. self.all_chunks_received = True # else expect a control line. else: # Receive the trailer. trailer = self.trailer + s if trailer.startswith(b"\r\n"): # No trailer. self.completed = True return orig_size - (len(trailer) - 2) pos = find_double_newline(trailer) if pos < 0: # Trailer not finished. self.trailer = trailer s = b"" else: # Finished the trailer. self.completed = True self.trailer = trailer[:pos] return orig_size - (len(trailer) - pos) return orig_size def getfile(self): return self.buf.getfile() def getbuf(self): return self.buf
./CrossVul/dataset_final_sorted/CWE-444/py/good_1119_1
crossvul-python_data_bad_1121_3
404: Not Found
./CrossVul/dataset_final_sorted/CWE-444/py/bad_1121_3
crossvul-python_data_good_1121_3
""" This contains a bunch of RFC7230 definitions and regular expressions that are needed to properly parse HTTP messages. """ import re from .compat import tobytes WS = "[ \t]" OWS = WS + "{0,}?" RWS = WS + "{1,}?" BWS = OWS # RFC 7230 Section 3.2.6 "Field Value Components": # tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" # / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" # / DIGIT / ALPHA # obs-text = %x80-FF TCHAR = r"[!#$%&'*+\-.^_`|~0-9A-Za-z]" OBS_TEXT = r"\x80-\xff" TOKEN = TCHAR + "{1,}" # RFC 5234 Appendix B.1 "Core Rules": # VCHAR = %x21-7E # ; visible (printing) characters VCHAR = r"\x21-\x7e" # header-field = field-name ":" OWS field-value OWS # field-name = token # field-value = *( field-content / obs-fold ) # field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] # field-vchar = VCHAR / obs-text FIELD_VCHAR = "[" + VCHAR + OBS_TEXT + "]" FIELD_CONTENT = FIELD_VCHAR + "(" + RWS + FIELD_VCHAR + "){0,}" FIELD_VALUE = "(" + FIELD_CONTENT + "){0,}" HEADER_FIELD = re.compile( tobytes( "^(?P<name>" + TOKEN + "):" + OWS + "(?P<value>" + FIELD_VALUE + ")" + OWS + "$" ) )
./CrossVul/dataset_final_sorted/CWE-444/py/good_1121_3
crossvul-python_data_good_1121_5
############################################################################## # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """Utility functions """ import calendar import errno import logging import os import re import stat import time from .rfc7230 import OBS_TEXT, VCHAR logger = logging.getLogger("waitress") queue_logger = logging.getLogger("waitress.queue") def find_double_newline(s): """Returns the position just after a double newline in the given string.""" pos = s.find(b"\r\n\r\n") if pos >= 0: pos += 4 return pos def concat(*args): return "".join(args) def join(seq, field=" "): return field.join(seq) def group(s): return "(" + s + ")" short_days = ["sun", "mon", "tue", "wed", "thu", "fri", "sat"] long_days = [ "sunday", "monday", "tuesday", "wednesday", "thursday", "friday", "saturday", ] short_day_reg = group(join(short_days, "|")) long_day_reg = group(join(long_days, "|")) daymap = {} for i in range(7): daymap[short_days[i]] = i daymap[long_days[i]] = i hms_reg = join(3 * [group("[0-9][0-9]")], ":") months = [ "jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec", ] monmap = {} for i in range(12): monmap[months[i]] = i + 1 months_reg = group(join(months, "|")) # From draft-ietf-http-v11-spec-07.txt/3.3.1 # Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 # Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036 # Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format # rfc822 format rfc822_date = join( [ concat(short_day_reg, ","), # day group("[0-9][0-9]?"), # date months_reg, # month group("[0-9]+"), # year hms_reg, # hour minute second "gmt", ], " ", ) rfc822_reg = re.compile(rfc822_date) def unpack_rfc822(m): g = m.group return ( int(g(4)), # year monmap[g(3)], # month int(g(2)), # day int(g(5)), # hour int(g(6)), # minute int(g(7)), # second 0, 0, 0, ) # rfc850 format rfc850_date = join( [ concat(long_day_reg, ","), join([group("[0-9][0-9]?"), months_reg, group("[0-9]+")], "-"), hms_reg, "gmt", ], " ", ) rfc850_reg = re.compile(rfc850_date) # they actually unpack the same way def unpack_rfc850(m): g = m.group yr = g(4) if len(yr) == 2: yr = "19" + yr return ( int(yr), # year monmap[g(3)], # month int(g(2)), # day int(g(5)), # hour int(g(6)), # minute int(g(7)), # second 0, 0, 0, ) # parsdate.parsedate - ~700/sec. # parse_http_date - ~1333/sec. weekdayname = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] monthname = [ None, "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", ] def build_http_date(when): year, month, day, hh, mm, ss, wd, y, z = time.gmtime(when) return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( weekdayname[wd], day, monthname[month], year, hh, mm, ss, ) def parse_http_date(d): d = d.lower() m = rfc850_reg.match(d) if m and m.end() == len(d): retval = int(calendar.timegm(unpack_rfc850(m))) else: m = rfc822_reg.match(d) if m and m.end() == len(d): retval = int(calendar.timegm(unpack_rfc822(m))) else: return 0 return retval # RFC 5234 Appendix B.1 "Core Rules": # VCHAR = %x21-7E # ; visible (printing) characters vchar_re = VCHAR # RFC 7230 Section 3.2.6 "Field Value Components": # quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE # qdtext = HTAB / SP /%x21 / %x23-5B / %x5D-7E / obs-text # obs-text = %x80-FF # quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text ) obs_text_re = OBS_TEXT # The '\\' between \x5b and \x5d is needed to escape \x5d (']') qdtext_re = "[\t \x21\x23-\x5b\\\x5d-\x7e" + obs_text_re + "]" quoted_pair_re = r"\\" + "([\t " + vchar_re + obs_text_re + "])" quoted_string_re = '"(?:(?:' + qdtext_re + ")|(?:" + quoted_pair_re + '))*"' quoted_string = re.compile(quoted_string_re) quoted_pair = re.compile(quoted_pair_re) def undquote(value): if value.startswith('"') and value.endswith('"'): # So it claims to be DQUOTE'ed, let's validate that matches = quoted_string.match(value) if matches and matches.end() == len(value): # Remove the DQUOTE's from the value value = value[1:-1] # Remove all backslashes that are followed by a valid vchar or # obs-text value = quoted_pair.sub(r"\1", value) return value elif not value.startswith('"') and not value.endswith('"'): return value raise ValueError("Invalid quoting in value") def cleanup_unix_socket(path): try: st = os.stat(path) except OSError as exc: if exc.errno != errno.ENOENT: raise # pragma: no cover else: if stat.S_ISSOCK(st.st_mode): try: os.remove(path) except OSError: # pragma: no cover # avoid race condition error during tests pass class Error(object): code = 500 reason = "Internal Server Error" def __init__(self, body): self.body = body def to_response(self): status = "%s %s" % (self.code, self.reason) body = "%s\r\n\r\n%s" % (self.reason, self.body) tag = "\r\n\r\n(generated by waitress)" body = body + tag headers = [("Content-Type", "text/plain")] return status, headers, body def wsgi_response(self, environ, start_response): status, headers, body = self.to_response() start_response(status, headers) yield body class BadRequest(Error): code = 400 reason = "Bad Request" class RequestHeaderFieldsTooLarge(BadRequest): code = 431 reason = "Request Header Fields Too Large" class RequestEntityTooLarge(BadRequest): code = 413 reason = "Request Entity Too Large" class InternalServerError(Error): code = 500 reason = "Internal Server Error" class ServerNotImplemented(Error): code = 501 reason = "Not Implemented"
./CrossVul/dataset_final_sorted/CWE-444/py/good_1121_5
crossvul-python_data_bad_1121_1
############################################################################## # # Copyright (c) 2006 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## import os from setuptools import setup, find_packages here = os.path.abspath(os.path.dirname(__file__)) try: README = open(os.path.join(here, "README.rst")).read() CHANGES = open(os.path.join(here, "CHANGES.txt")).read() except IOError: README = CHANGES = "" docs_extras = [ "Sphinx>=1.8.1", "docutils", "pylons-sphinx-themes>=1.0.9", ] testing_extras = [ "nose", "coverage>=5.0", ] setup( name="waitress", version="1.4.0", author="Zope Foundation and Contributors", author_email="zope-dev@zope.org", maintainer="Pylons Project", maintainer_email="pylons-discuss@googlegroups.com", description="Waitress WSGI server", long_description=README + "\n\n" + CHANGES, license="ZPL 2.1", keywords="waitress wsgi server http", classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: Zope Public License", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Natural Language :: English", "Operating System :: OS Independent", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI", ], url="https://github.com/Pylons/waitress", packages=find_packages(), extras_require={"testing": testing_extras, "docs": docs_extras,}, include_package_data=True, test_suite="waitress", zip_safe=False, entry_points=""" [paste.server_runner] main = waitress:serve_paste [console_scripts] waitress-serve = waitress.runner:run """, )
./CrossVul/dataset_final_sorted/CWE-444/py/bad_1121_1
crossvul-python_data_good_1120_2
############################################################################## # # Copyright (c) 2006 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## import os from setuptools import setup, find_packages here = os.path.abspath(os.path.dirname(__file__)) try: README = open(os.path.join(here, "README.rst")).read() CHANGES = open(os.path.join(here, "CHANGES.txt")).read() except IOError: README = CHANGES = "" docs_extras = [ "Sphinx>=1.8.1", "docutils", "pylons-sphinx-themes>=1.0.9", ] testing_extras = [ "nose", "coverage>=5.0", ] setup( name="waitress", version="1.4.0", author="Zope Foundation and Contributors", author_email="zope-dev@zope.org", maintainer="Pylons Project", maintainer_email="pylons-discuss@googlegroups.com", description="Waitress WSGI server", long_description=README + "\n\n" + CHANGES, license="ZPL 2.1", keywords="waitress wsgi server http", classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: Zope Public License", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Natural Language :: English", "Operating System :: OS Independent", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI", ], url="https://github.com/Pylons/waitress", packages=find_packages(), extras_require={"testing": testing_extras, "docs": docs_extras,}, include_package_data=True, test_suite="waitress", zip_safe=False, entry_points=""" [paste.server_runner] main = waitress:serve_paste [console_scripts] waitress-serve = waitress.runner:run """, )
./CrossVul/dataset_final_sorted/CWE-444/py/good_1120_2
crossvul-python_data_bad_1119_0
############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """HTTP Request Parser This server uses asyncore to accept connections and do initial processing but threads to do work. """ import re from io import BytesIO from waitress.compat import ( tostr, urlparse, unquote_bytes_to_wsgi, ) from waitress.buffers import OverflowableBuffer from waitress.receiver import ( FixedStreamReceiver, ChunkedReceiver, ) from waitress.utilities import ( find_double_newline, RequestEntityTooLarge, RequestHeaderFieldsTooLarge, BadRequest, ) class ParsingError(Exception): pass class HTTPRequestParser(object): """A structure that collects the HTTP request. Once the stream is completed, the instance is passed to a server task constructor. """ completed = False # Set once request is completed. empty = False # Set if no request was made. expect_continue = False # client sent "Expect: 100-continue" header headers_finished = False # True when headers have been read header_plus = b"" chunked = False content_length = 0 header_bytes_received = 0 body_bytes_received = 0 body_rcv = None version = "1.0" error = None connection_close = False # Other attributes: first_line, header, headers, command, uri, version, # path, query, fragment def __init__(self, adj): """ adj is an Adjustments object. """ # headers is a mapping containing keys translated to uppercase # with dashes turned into underscores. self.headers = {} self.adj = adj def received(self, data): """ Receives the HTTP stream for one request. Returns the number of bytes consumed. Sets the completed flag once both the header and the body have been received. """ if self.completed: return 0 # Can't consume any more. datalen = len(data) br = self.body_rcv if br is None: # In header. s = self.header_plus + data index = find_double_newline(s) if index >= 0: # Header finished. header_plus = s[:index] consumed = len(data) - (len(s) - index) # Remove preceeding blank lines. header_plus = header_plus.lstrip() if not header_plus: self.empty = True self.completed = True else: try: self.parse_header(header_plus) except ParsingError as e: self.error = BadRequest(e.args[0]) self.completed = True else: if self.body_rcv is None: # no content-length header and not a t-e: chunked # request self.completed = True if self.content_length > 0: max_body = self.adj.max_request_body_size # we won't accept this request if the content-length # is too large if self.content_length >= max_body: self.error = RequestEntityTooLarge( "exceeds max_body of %s" % max_body ) self.completed = True self.headers_finished = True return consumed else: # Header not finished yet. self.header_bytes_received += datalen max_header = self.adj.max_request_header_size if self.header_bytes_received >= max_header: # malformed header, we need to construct some request # on our own. we disregard the incoming(?) requests HTTP # version and just use 1.0. IOW someone just sent garbage # over the wire self.parse_header(b"GET / HTTP/1.0\n") self.error = RequestHeaderFieldsTooLarge( "exceeds max_header of %s" % max_header ) self.completed = True self.header_plus = s return datalen else: # In body. consumed = br.received(data) self.body_bytes_received += consumed max_body = self.adj.max_request_body_size if self.body_bytes_received >= max_body: # this will only be raised during t-e: chunked requests self.error = RequestEntityTooLarge("exceeds max_body of %s" % max_body) self.completed = True elif br.error: # garbage in chunked encoding input probably self.error = br.error self.completed = True elif br.completed: # The request (with the body) is ready to use. self.completed = True if self.chunked: # We've converted the chunked transfer encoding request # body into a normal request body, so we know its content # length; set the header here. We already popped the # TRANSFER_ENCODING header in parse_header, so this will # appear to the client to be an entirely non-chunked HTTP # request with a valid content-length. self.headers["CONTENT_LENGTH"] = str(br.__len__()) return consumed def parse_header(self, header_plus): """ Parses the header_plus block of text (the headers plus the first line of the request). """ index = header_plus.find(b"\n") if index >= 0: first_line = header_plus[:index].rstrip() header = header_plus[index + 1 :] else: first_line = header_plus.rstrip() header = b"" self.first_line = first_line # for testing lines = get_header_lines(header) headers = self.headers for line in lines: index = line.find(b":") if index > 0: key = line[:index] if b"_" in key: continue value = line[index + 1 :].strip() key1 = tostr(key.upper().replace(b"-", b"_")) # If a header already exists, we append subsequent values # seperated by a comma. Applications already need to handle # the comma seperated values, as HTTP front ends might do # the concatenation for you (behavior specified in RFC2616). try: headers[key1] += tostr(b", " + value) except KeyError: headers[key1] = tostr(value) # else there's garbage in the headers? # command, uri, version will be bytes command, uri, version = crack_first_line(first_line) version = tostr(version) command = tostr(command) self.command = command self.version = version ( self.proxy_scheme, self.proxy_netloc, self.path, self.query, self.fragment, ) = split_uri(uri) self.url_scheme = self.adj.url_scheme connection = headers.get("CONNECTION", "") if version == "1.0": if connection.lower() != "keep-alive": self.connection_close = True if version == "1.1": # since the server buffers data from chunked transfers and clients # never need to deal with chunked requests, downstream clients # should not see the HTTP_TRANSFER_ENCODING header; we pop it # here te = headers.pop("TRANSFER_ENCODING", "") if te.lower() == "chunked": self.chunked = True buf = OverflowableBuffer(self.adj.inbuf_overflow) self.body_rcv = ChunkedReceiver(buf) expect = headers.get("EXPECT", "").lower() self.expect_continue = expect == "100-continue" if connection.lower() == "close": self.connection_close = True if not self.chunked: try: cl = int(headers.get("CONTENT_LENGTH", 0)) except ValueError: cl = 0 self.content_length = cl if cl > 0: buf = OverflowableBuffer(self.adj.inbuf_overflow) self.body_rcv = FixedStreamReceiver(cl, buf) def get_body_stream(self): body_rcv = self.body_rcv if body_rcv is not None: return body_rcv.getfile() else: return BytesIO() def close(self): body_rcv = self.body_rcv if body_rcv is not None: body_rcv.getbuf().close() def split_uri(uri): # urlsplit handles byte input by returning bytes on py3, so # scheme, netloc, path, query, and fragment are bytes scheme = netloc = path = query = fragment = b"" # urlsplit below will treat this as a scheme-less netloc, thereby losing # the original intent of the request. Here we shamelessly stole 4 lines of # code from the CPython stdlib to parse out the fragment and query but # leave the path alone. See # https://github.com/python/cpython/blob/8c9e9b0cd5b24dfbf1424d1f253d02de80e8f5ef/Lib/urllib/parse.py#L465-L468 # and https://github.com/Pylons/waitress/issues/260 if uri[:2] == b"//": path = uri if b"#" in path: path, fragment = path.split(b"#", 1) if b"?" in path: path, query = path.split(b"?", 1) else: try: scheme, netloc, path, query, fragment = urlparse.urlsplit(uri) except UnicodeError: raise ParsingError("Bad URI") return ( tostr(scheme), tostr(netloc), unquote_bytes_to_wsgi(path), tostr(query), tostr(fragment), ) def get_header_lines(header): """ Splits the header into lines, putting multi-line headers together. """ r = [] lines = header.split(b"\n") for line in lines: if line.startswith((b" ", b"\t")): if not r: # https://corte.si/posts/code/pathod/pythonservers/index.html raise ParsingError('Malformed header line "%s"' % tostr(line)) r[-1] += line else: r.append(line) return r first_line_re = re.compile( b"([^ ]+) " b"((?:[^ :?#]+://[^ ?#/]*(?:[0-9]{1,5})?)?[^ ]+)" b"(( HTTP/([0-9.]+))$|$)" ) def crack_first_line(line): m = first_line_re.match(line) if m is not None and m.end() == len(line): if m.group(3): version = m.group(5) else: version = b"" method = m.group(1) # the request methods that are currently defined are all uppercase: # https://www.iana.org/assignments/http-methods/http-methods.xhtml and # the request method is case sensitive according to # https://tools.ietf.org/html/rfc7231#section-4.1 # By disallowing anything but uppercase methods we save poor # unsuspecting souls from sending lowercase HTTP methods to waitress # and having the request complete, while servers like nginx drop the # request onto the floor. if method != method.upper(): raise ParsingError('Malformed HTTP method "%s"' % tostr(method)) uri = m.group(2) return method, uri, version else: return b"", b"", b""
./CrossVul/dataset_final_sorted/CWE-444/py/bad_1119_0
crossvul-python_data_good_1121_1
############################################################################## # # Copyright (c) 2006 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## import os from setuptools import setup, find_packages here = os.path.abspath(os.path.dirname(__file__)) try: README = open(os.path.join(here, "README.rst")).read() CHANGES = open(os.path.join(here, "CHANGES.txt")).read() except IOError: README = CHANGES = "" docs_extras = [ "Sphinx>=1.8.1", "docutils", "pylons-sphinx-themes>=1.0.9", ] testing_extras = [ "nose", "coverage>=5.0", ] setup( name="waitress", version="1.4.1", author="Zope Foundation and Contributors", author_email="zope-dev@zope.org", maintainer="Pylons Project", maintainer_email="pylons-discuss@googlegroups.com", description="Waitress WSGI server", long_description=README + "\n\n" + CHANGES, license="ZPL 2.1", keywords="waitress wsgi server http", classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: Zope Public License", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Natural Language :: English", "Operating System :: OS Independent", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI", ], url="https://github.com/Pylons/waitress", packages=find_packages(), extras_require={"testing": testing_extras, "docs": docs_extras,}, include_package_data=True, test_suite="waitress", zip_safe=False, entry_points=""" [paste.server_runner] main = waitress:serve_paste [console_scripts] waitress-serve = waitress.runner:run """, )
./CrossVul/dataset_final_sorted/CWE-444/py/good_1121_1
crossvul-python_data_good_1121_2
############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """HTTP Request Parser This server uses asyncore to accept connections and do initial processing but threads to do work. """ import re from io import BytesIO from waitress.buffers import OverflowableBuffer from waitress.compat import tostr, unquote_bytes_to_wsgi, urlparse from waitress.receiver import ChunkedReceiver, FixedStreamReceiver from waitress.utilities import ( BadRequest, RequestEntityTooLarge, RequestHeaderFieldsTooLarge, ServerNotImplemented, find_double_newline, ) from .rfc7230 import HEADER_FIELD class ParsingError(Exception): pass class TransferEncodingNotImplemented(Exception): pass class HTTPRequestParser(object): """A structure that collects the HTTP request. Once the stream is completed, the instance is passed to a server task constructor. """ completed = False # Set once request is completed. empty = False # Set if no request was made. expect_continue = False # client sent "Expect: 100-continue" header headers_finished = False # True when headers have been read header_plus = b"" chunked = False content_length = 0 header_bytes_received = 0 body_bytes_received = 0 body_rcv = None version = "1.0" error = None connection_close = False # Other attributes: first_line, header, headers, command, uri, version, # path, query, fragment def __init__(self, adj): """ adj is an Adjustments object. """ # headers is a mapping containing keys translated to uppercase # with dashes turned into underscores. self.headers = {} self.adj = adj def received(self, data): """ Receives the HTTP stream for one request. Returns the number of bytes consumed. Sets the completed flag once both the header and the body have been received. """ if self.completed: return 0 # Can't consume any more. datalen = len(data) br = self.body_rcv if br is None: # In header. max_header = self.adj.max_request_header_size s = self.header_plus + data index = find_double_newline(s) consumed = 0 if index >= 0: # If the headers have ended, and we also have part of the body # message in data we still want to validate we aren't going # over our limit for received headers. self.header_bytes_received += index consumed = datalen - (len(s) - index) else: self.header_bytes_received += datalen consumed = datalen # If the first line + headers is over the max length, we return a # RequestHeaderFieldsTooLarge error rather than continuing to # attempt to parse the headers. if self.header_bytes_received >= max_header: self.parse_header(b"GET / HTTP/1.0\r\n") self.error = RequestHeaderFieldsTooLarge( "exceeds max_header of %s" % max_header ) self.completed = True return consumed if index >= 0: # Header finished. header_plus = s[:index] # Remove preceeding blank lines. This is suggested by # https://tools.ietf.org/html/rfc7230#section-3.5 to support # clients sending an extra CR LF after another request when # using HTTP pipelining header_plus = header_plus.lstrip() if not header_plus: self.empty = True self.completed = True else: try: self.parse_header(header_plus) except ParsingError as e: self.error = BadRequest(e.args[0]) self.completed = True except TransferEncodingNotImplemented as e: self.error = ServerNotImplemented(e.args[0]) self.completed = True else: if self.body_rcv is None: # no content-length header and not a t-e: chunked # request self.completed = True if self.content_length > 0: max_body = self.adj.max_request_body_size # we won't accept this request if the content-length # is too large if self.content_length >= max_body: self.error = RequestEntityTooLarge( "exceeds max_body of %s" % max_body ) self.completed = True self.headers_finished = True return consumed # Header not finished yet. self.header_plus = s return datalen else: # In body. consumed = br.received(data) self.body_bytes_received += consumed max_body = self.adj.max_request_body_size if self.body_bytes_received >= max_body: # this will only be raised during t-e: chunked requests self.error = RequestEntityTooLarge("exceeds max_body of %s" % max_body) self.completed = True elif br.error: # garbage in chunked encoding input probably self.error = br.error self.completed = True elif br.completed: # The request (with the body) is ready to use. self.completed = True if self.chunked: # We've converted the chunked transfer encoding request # body into a normal request body, so we know its content # length; set the header here. We already popped the # TRANSFER_ENCODING header in parse_header, so this will # appear to the client to be an entirely non-chunked HTTP # request with a valid content-length. self.headers["CONTENT_LENGTH"] = str(br.__len__()) return consumed def parse_header(self, header_plus): """ Parses the header_plus block of text (the headers plus the first line of the request). """ index = header_plus.find(b"\r\n") if index >= 0: first_line = header_plus[:index].rstrip() header = header_plus[index + 2 :] else: raise ParsingError("HTTP message header invalid") if b"\r" in first_line or b"\n" in first_line: raise ParsingError("Bare CR or LF found in HTTP message") self.first_line = first_line # for testing lines = get_header_lines(header) headers = self.headers for line in lines: header = HEADER_FIELD.match(line) if not header: raise ParsingError("Invalid header") key, value = header.group('name', 'value') if b"_" in key: # TODO(xistence): Should we drop this request instead? continue value = value.strip() key1 = tostr(key.upper().replace(b"-", b"_")) # If a header already exists, we append subsequent values # seperated by a comma. Applications already need to handle # the comma seperated values, as HTTP front ends might do # the concatenation for you (behavior specified in RFC2616). try: headers[key1] += tostr(b", " + value) except KeyError: headers[key1] = tostr(value) # command, uri, version will be bytes command, uri, version = crack_first_line(first_line) version = tostr(version) command = tostr(command) self.command = command self.version = version ( self.proxy_scheme, self.proxy_netloc, self.path, self.query, self.fragment, ) = split_uri(uri) self.url_scheme = self.adj.url_scheme connection = headers.get("CONNECTION", "") if version == "1.0": if connection.lower() != "keep-alive": self.connection_close = True if version == "1.1": # since the server buffers data from chunked transfers and clients # never need to deal with chunked requests, downstream clients # should not see the HTTP_TRANSFER_ENCODING header; we pop it # here te = headers.pop("TRANSFER_ENCODING", "") encodings = [encoding.strip().lower() for encoding in te.split(",") if encoding] for encoding in encodings: # Out of the transfer-codings listed in # https://tools.ietf.org/html/rfc7230#section-4 we only support # chunked at this time. # Note: the identity transfer-coding was removed in RFC7230: # https://tools.ietf.org/html/rfc7230#appendix-A.2 and is thus # not supported if encoding not in {"chunked"}: raise TransferEncodingNotImplemented( "Transfer-Encoding requested is not supported." ) if encodings and encodings[-1] == "chunked": self.chunked = True buf = OverflowableBuffer(self.adj.inbuf_overflow) self.body_rcv = ChunkedReceiver(buf) elif encodings: # pragma: nocover raise TransferEncodingNotImplemented( "Transfer-Encoding requested is not supported." ) expect = headers.get("EXPECT", "").lower() self.expect_continue = expect == "100-continue" if connection.lower() == "close": self.connection_close = True if not self.chunked: try: cl = int(headers.get("CONTENT_LENGTH", 0)) except ValueError: raise ParsingError("Content-Length is invalid") self.content_length = cl if cl > 0: buf = OverflowableBuffer(self.adj.inbuf_overflow) self.body_rcv = FixedStreamReceiver(cl, buf) def get_body_stream(self): body_rcv = self.body_rcv if body_rcv is not None: return body_rcv.getfile() else: return BytesIO() def close(self): body_rcv = self.body_rcv if body_rcv is not None: body_rcv.getbuf().close() def split_uri(uri): # urlsplit handles byte input by returning bytes on py3, so # scheme, netloc, path, query, and fragment are bytes scheme = netloc = path = query = fragment = b"" # urlsplit below will treat this as a scheme-less netloc, thereby losing # the original intent of the request. Here we shamelessly stole 4 lines of # code from the CPython stdlib to parse out the fragment and query but # leave the path alone. See # https://github.com/python/cpython/blob/8c9e9b0cd5b24dfbf1424d1f253d02de80e8f5ef/Lib/urllib/parse.py#L465-L468 # and https://github.com/Pylons/waitress/issues/260 if uri[:2] == b"//": path = uri if b"#" in path: path, fragment = path.split(b"#", 1) if b"?" in path: path, query = path.split(b"?", 1) else: try: scheme, netloc, path, query, fragment = urlparse.urlsplit(uri) except UnicodeError: raise ParsingError("Bad URI") return ( tostr(scheme), tostr(netloc), unquote_bytes_to_wsgi(path), tostr(query), tostr(fragment), ) def get_header_lines(header): """ Splits the header into lines, putting multi-line headers together. """ r = [] lines = header.split(b"\r\n") for line in lines: if not line: continue if b"\r" in line or b"\n" in line: raise ParsingError('Bare CR or LF found in header line "%s"' % tostr(line)) if line.startswith((b" ", b"\t")): if not r: # https://corte.si/posts/code/pathod/pythonservers/index.html raise ParsingError('Malformed header line "%s"' % tostr(line)) r[-1] += line else: r.append(line) return r first_line_re = re.compile( b"([^ ]+) " b"((?:[^ :?#]+://[^ ?#/]*(?:[0-9]{1,5})?)?[^ ]+)" b"(( HTTP/([0-9.]+))$|$)" ) def crack_first_line(line): m = first_line_re.match(line) if m is not None and m.end() == len(line): if m.group(3): version = m.group(5) else: version = b"" method = m.group(1) # the request methods that are currently defined are all uppercase: # https://www.iana.org/assignments/http-methods/http-methods.xhtml and # the request method is case sensitive according to # https://tools.ietf.org/html/rfc7231#section-4.1 # By disallowing anything but uppercase methods we save poor # unsuspecting souls from sending lowercase HTTP methods to waitress # and having the request complete, while servers like nginx drop the # request onto the floor. if method != method.upper(): raise ParsingError('Malformed HTTP method "%s"' % tostr(method)) uri = m.group(2) return method, uri, version else: return b"", b"", b""
./CrossVul/dataset_final_sorted/CWE-444/py/good_1121_2
crossvul-python_data_bad_1121_5
############################################################################## # # Copyright (c) 2004 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """Utility functions """ import calendar import errno import logging import os import re import stat import time logger = logging.getLogger("waitress") queue_logger = logging.getLogger("waitress.queue") def find_double_newline(s): """Returns the position just after a double newline in the given string.""" pos = s.find(b"\r\n\r\n") if pos >= 0: pos += 4 return pos def concat(*args): return "".join(args) def join(seq, field=" "): return field.join(seq) def group(s): return "(" + s + ")" short_days = ["sun", "mon", "tue", "wed", "thu", "fri", "sat"] long_days = [ "sunday", "monday", "tuesday", "wednesday", "thursday", "friday", "saturday", ] short_day_reg = group(join(short_days, "|")) long_day_reg = group(join(long_days, "|")) daymap = {} for i in range(7): daymap[short_days[i]] = i daymap[long_days[i]] = i hms_reg = join(3 * [group("[0-9][0-9]")], ":") months = [ "jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec", ] monmap = {} for i in range(12): monmap[months[i]] = i + 1 months_reg = group(join(months, "|")) # From draft-ietf-http-v11-spec-07.txt/3.3.1 # Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 # Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036 # Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format # rfc822 format rfc822_date = join( [ concat(short_day_reg, ","), # day group("[0-9][0-9]?"), # date months_reg, # month group("[0-9]+"), # year hms_reg, # hour minute second "gmt", ], " ", ) rfc822_reg = re.compile(rfc822_date) def unpack_rfc822(m): g = m.group return ( int(g(4)), # year monmap[g(3)], # month int(g(2)), # day int(g(5)), # hour int(g(6)), # minute int(g(7)), # second 0, 0, 0, ) # rfc850 format rfc850_date = join( [ concat(long_day_reg, ","), join([group("[0-9][0-9]?"), months_reg, group("[0-9]+")], "-"), hms_reg, "gmt", ], " ", ) rfc850_reg = re.compile(rfc850_date) # they actually unpack the same way def unpack_rfc850(m): g = m.group yr = g(4) if len(yr) == 2: yr = "19" + yr return ( int(yr), # year monmap[g(3)], # month int(g(2)), # day int(g(5)), # hour int(g(6)), # minute int(g(7)), # second 0, 0, 0, ) # parsdate.parsedate - ~700/sec. # parse_http_date - ~1333/sec. weekdayname = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] monthname = [ None, "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", ] def build_http_date(when): year, month, day, hh, mm, ss, wd, y, z = time.gmtime(when) return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( weekdayname[wd], day, monthname[month], year, hh, mm, ss, ) def parse_http_date(d): d = d.lower() m = rfc850_reg.match(d) if m and m.end() == len(d): retval = int(calendar.timegm(unpack_rfc850(m))) else: m = rfc822_reg.match(d) if m and m.end() == len(d): retval = int(calendar.timegm(unpack_rfc822(m))) else: return 0 return retval # RFC 5234 Appendix B.1 "Core Rules": # VCHAR = %x21-7E # ; visible (printing) characters vchar_re = "\x21-\x7e" # RFC 7230 Section 3.2.6 "Field Value Components": # quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE # qdtext = HTAB / SP /%x21 / %x23-5B / %x5D-7E / obs-text # obs-text = %x80-FF # quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text ) obs_text_re = "\x80-\xff" # The '\\' between \x5b and \x5d is needed to escape \x5d (']') qdtext_re = "[\t \x21\x23-\x5b\\\x5d-\x7e" + obs_text_re + "]" quoted_pair_re = r"\\" + "([\t " + vchar_re + obs_text_re + "])" quoted_string_re = '"(?:(?:' + qdtext_re + ")|(?:" + quoted_pair_re + '))*"' quoted_string = re.compile(quoted_string_re) quoted_pair = re.compile(quoted_pair_re) def undquote(value): if value.startswith('"') and value.endswith('"'): # So it claims to be DQUOTE'ed, let's validate that matches = quoted_string.match(value) if matches and matches.end() == len(value): # Remove the DQUOTE's from the value value = value[1:-1] # Remove all backslashes that are followed by a valid vchar or # obs-text value = quoted_pair.sub(r"\1", value) return value elif not value.startswith('"') and not value.endswith('"'): return value raise ValueError("Invalid quoting in value") def cleanup_unix_socket(path): try: st = os.stat(path) except OSError as exc: if exc.errno != errno.ENOENT: raise # pragma: no cover else: if stat.S_ISSOCK(st.st_mode): try: os.remove(path) except OSError: # pragma: no cover # avoid race condition error during tests pass class Error(object): code = 500 reason = "Internal Server Error" def __init__(self, body): self.body = body def to_response(self): status = "%s %s" % (self.code, self.reason) body = "%s\r\n\r\n%s" % (self.reason, self.body) tag = "\r\n\r\n(generated by waitress)" body = body + tag headers = [("Content-Type", "text/plain")] return status, headers, body def wsgi_response(self, environ, start_response): status, headers, body = self.to_response() start_response(status, headers) yield body class BadRequest(Error): code = 400 reason = "Bad Request" class RequestHeaderFieldsTooLarge(BadRequest): code = 431 reason = "Request Header Fields Too Large" class RequestEntityTooLarge(BadRequest): code = 413 reason = "Request Entity Too Large" class InternalServerError(Error): code = 500 reason = "Internal Server Error" class ServerNotImplemented(Error): code = 501 reason = "Not Implemented"
./CrossVul/dataset_final_sorted/CWE-444/py/bad_1121_5
crossvul-python_data_bad_1123_0
############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """HTTP Request Parser This server uses asyncore to accept connections and do initial processing but threads to do work. """ import re from io import BytesIO from waitress.buffers import OverflowableBuffer from waitress.compat import tostr, unquote_bytes_to_wsgi, urlparse from waitress.receiver import ChunkedReceiver, FixedStreamReceiver from waitress.utilities import ( BadRequest, RequestEntityTooLarge, RequestHeaderFieldsTooLarge, find_double_newline, ) class ParsingError(Exception): pass class HTTPRequestParser(object): """A structure that collects the HTTP request. Once the stream is completed, the instance is passed to a server task constructor. """ completed = False # Set once request is completed. empty = False # Set if no request was made. expect_continue = False # client sent "Expect: 100-continue" header headers_finished = False # True when headers have been read header_plus = b"" chunked = False content_length = 0 header_bytes_received = 0 body_bytes_received = 0 body_rcv = None version = "1.0" error = None connection_close = False # Other attributes: first_line, header, headers, command, uri, version, # path, query, fragment def __init__(self, adj): """ adj is an Adjustments object. """ # headers is a mapping containing keys translated to uppercase # with dashes turned into underscores. self.headers = {} self.adj = adj def received(self, data): """ Receives the HTTP stream for one request. Returns the number of bytes consumed. Sets the completed flag once both the header and the body have been received. """ if self.completed: return 0 # Can't consume any more. datalen = len(data) br = self.body_rcv if br is None: # In header. max_header = self.adj.max_request_header_size s = self.header_plus + data index = find_double_newline(s) consumed = 0 if index >= 0: # If the headers have ended, and we also have part of the body # message in data we still want to validate we aren't going # over our limit for received headers. self.header_bytes_received += index consumed = datalen - (len(s) - index) else: self.header_bytes_received += datalen consumed = datalen # If the first line + headers is over the max length, we return a # RequestHeaderFieldsTooLarge error rather than continuing to # attempt to parse the headers. if self.header_bytes_received >= max_header: self.parse_header(b"GET / HTTP/1.0\r\n") self.error = RequestHeaderFieldsTooLarge( "exceeds max_header of %s" % max_header ) self.completed = True return consumed if index >= 0: # Header finished. header_plus = s[:index] # Remove preceeding blank lines. This is suggested by # https://tools.ietf.org/html/rfc7230#section-3.5 to support # clients sending an extra CR LF after another request when # using HTTP pipelining header_plus = header_plus.lstrip() if not header_plus: self.empty = True self.completed = True else: try: self.parse_header(header_plus) except ParsingError as e: self.error = BadRequest(e.args[0]) self.completed = True else: if self.body_rcv is None: # no content-length header and not a t-e: chunked # request self.completed = True if self.content_length > 0: max_body = self.adj.max_request_body_size # we won't accept this request if the content-length # is too large if self.content_length >= max_body: self.error = RequestEntityTooLarge( "exceeds max_body of %s" % max_body ) self.completed = True self.headers_finished = True return consumed # Header not finished yet. self.header_plus = s return datalen else: # In body. consumed = br.received(data) self.body_bytes_received += consumed max_body = self.adj.max_request_body_size if self.body_bytes_received >= max_body: # this will only be raised during t-e: chunked requests self.error = RequestEntityTooLarge("exceeds max_body of %s" % max_body) self.completed = True elif br.error: # garbage in chunked encoding input probably self.error = br.error self.completed = True elif br.completed: # The request (with the body) is ready to use. self.completed = True if self.chunked: # We've converted the chunked transfer encoding request # body into a normal request body, so we know its content # length; set the header here. We already popped the # TRANSFER_ENCODING header in parse_header, so this will # appear to the client to be an entirely non-chunked HTTP # request with a valid content-length. self.headers["CONTENT_LENGTH"] = str(br.__len__()) return consumed def parse_header(self, header_plus): """ Parses the header_plus block of text (the headers plus the first line of the request). """ index = header_plus.find(b"\r\n") if index >= 0: first_line = header_plus[:index].rstrip() header = header_plus[index + 2 :] else: raise ParsingError("HTTP message header invalid") if b"\r" in first_line or b"\n" in first_line: raise ParsingError("Bare CR or LF found in HTTP message") self.first_line = first_line # for testing lines = get_header_lines(header) headers = self.headers for line in lines: index = line.find(b":") if index > 0: key = line[:index] if key != key.strip(): raise ParsingError("Invalid whitespace after field-name") if b"_" in key: continue value = line[index + 1 :].strip() key1 = tostr(key.upper().replace(b"-", b"_")) # If a header already exists, we append subsequent values # seperated by a comma. Applications already need to handle # the comma seperated values, as HTTP front ends might do # the concatenation for you (behavior specified in RFC2616). try: headers[key1] += tostr(b", " + value) except KeyError: headers[key1] = tostr(value) # else there's garbage in the headers? # command, uri, version will be bytes command, uri, version = crack_first_line(first_line) version = tostr(version) command = tostr(command) self.command = command self.version = version ( self.proxy_scheme, self.proxy_netloc, self.path, self.query, self.fragment, ) = split_uri(uri) self.url_scheme = self.adj.url_scheme connection = headers.get("CONNECTION", "") if version == "1.0": if connection.lower() != "keep-alive": self.connection_close = True if version == "1.1": # since the server buffers data from chunked transfers and clients # never need to deal with chunked requests, downstream clients # should not see the HTTP_TRANSFER_ENCODING header; we pop it # here te = headers.pop("TRANSFER_ENCODING", "") if te.lower() == "chunked": self.chunked = True buf = OverflowableBuffer(self.adj.inbuf_overflow) self.body_rcv = ChunkedReceiver(buf) expect = headers.get("EXPECT", "").lower() self.expect_continue = expect == "100-continue" if connection.lower() == "close": self.connection_close = True if not self.chunked: try: cl = int(headers.get("CONTENT_LENGTH", 0)) except ValueError: cl = 0 self.content_length = cl if cl > 0: buf = OverflowableBuffer(self.adj.inbuf_overflow) self.body_rcv = FixedStreamReceiver(cl, buf) def get_body_stream(self): body_rcv = self.body_rcv if body_rcv is not None: return body_rcv.getfile() else: return BytesIO() def close(self): body_rcv = self.body_rcv if body_rcv is not None: body_rcv.getbuf().close() def split_uri(uri): # urlsplit handles byte input by returning bytes on py3, so # scheme, netloc, path, query, and fragment are bytes scheme = netloc = path = query = fragment = b"" # urlsplit below will treat this as a scheme-less netloc, thereby losing # the original intent of the request. Here we shamelessly stole 4 lines of # code from the CPython stdlib to parse out the fragment and query but # leave the path alone. See # https://github.com/python/cpython/blob/8c9e9b0cd5b24dfbf1424d1f253d02de80e8f5ef/Lib/urllib/parse.py#L465-L468 # and https://github.com/Pylons/waitress/issues/260 if uri[:2] == b"//": path = uri if b"#" in path: path, fragment = path.split(b"#", 1) if b"?" in path: path, query = path.split(b"?", 1) else: try: scheme, netloc, path, query, fragment = urlparse.urlsplit(uri) except UnicodeError: raise ParsingError("Bad URI") return ( tostr(scheme), tostr(netloc), unquote_bytes_to_wsgi(path), tostr(query), tostr(fragment), ) def get_header_lines(header): """ Splits the header into lines, putting multi-line headers together. """ r = [] lines = header.split(b"\r\n") for line in lines: if b"\r" in line or b"\n" in line: raise ParsingError('Bare CR or LF found in header line "%s"' % tostr(line)) if line.startswith((b" ", b"\t")): if not r: # https://corte.si/posts/code/pathod/pythonservers/index.html raise ParsingError('Malformed header line "%s"' % tostr(line)) r[-1] += line else: r.append(line) return r first_line_re = re.compile( b"([^ ]+) " b"((?:[^ :?#]+://[^ ?#/]*(?:[0-9]{1,5})?)?[^ ]+)" b"(( HTTP/([0-9.]+))$|$)" ) def crack_first_line(line): m = first_line_re.match(line) if m is not None and m.end() == len(line): if m.group(3): version = m.group(5) else: version = b"" method = m.group(1) # the request methods that are currently defined are all uppercase: # https://www.iana.org/assignments/http-methods/http-methods.xhtml and # the request method is case sensitive according to # https://tools.ietf.org/html/rfc7231#section-4.1 # By disallowing anything but uppercase methods we save poor # unsuspecting souls from sending lowercase HTTP methods to waitress # and having the request complete, while servers like nginx drop the # request onto the floor. if method != method.upper(): raise ParsingError('Malformed HTTP method "%s"' % tostr(method)) uri = m.group(2) return method, uri, version else: return b"", b"", b""
./CrossVul/dataset_final_sorted/CWE-444/py/bad_1123_0
crossvul-python_data_bad_5077_2
# Authors: John Dennis <jdennis@redhat.com> # Thomas Liu <tliu@redhat.com # Copyright (C) 2007-2010 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # __all__ = ['derive_record_format', 'parse_audit_record_text', 'AvcContext', 'AVC', 'AuditEventID', 'AuditEvent', 'AuditRecord', 'AuditRecordReader', ] import audit import struct import os,errno import re import selinux import base64 from types import * import selinux.audit2why as audit2why from setroubleshoot.util import * from setroubleshoot.html_util import * from setroubleshoot.xml_serialize import * from sepolicy import * O_ACCMODE = 0o0000003 #----------------------------------------------------------------------------- standard_directories = get_standard_directories() #----------------------------------------------------------------------------- def audit_record_from_text(text): parse_succeeded, record_type, event_id, body_text = parse_audit_record_text(text) audit_record = AuditRecord(record_type, event_id, body_text) return audit_record #----------------------------------------------------------------------------- def derive_record_format(socket_path): if re.search('/audispd_events$', socket_path): return AuditRecordReader.TEXT_FORMAT if re.search('/audit_events$', socket_path): return AuditRecordReader.BINARY_FORMAT return AuditRecordReader.TEXT_FORMAT # assume new format # regular expression to find message like this: # msg=audit(1152828325.857:123085): avc: denied { append } for pid=14205 ... # Note, messages arriving directly from the audit system omit # 'msg=', but messages in log files prepend 'msg=' # group 1 is the optional "node=XXX " # group 2 is the node if node=XXX is present # group 3 is the optional "type=XXX " # group 4 is the type if type=XXX is present # group 5 is the optional 'msg=' # group 6 is the complete event id # group 7 is the seconds component of the timestamp # group 8 is the millisconds component of the timestamp # group 9 is the timestamp unique number # group 10 is the body of the message appearing after the event id audit_input_re = re.compile('(node=(\S+)\s+)?(type=(\S+)\s+)?(msg=)?audit\(((\d+)\.(\d+):(\d+))\):\s*(.*)') def parse_audit_record_text(input): parse_succeeded = False host = None record_type = None event_id = None body_text = None match = audit_input_re.search(input) if match is not None: parse_succeeded = True if match.group(2): host = match.group(2) if match.group(4): record_type = match.group(4) if match.group(6): seconds = int(match.group(7)) milli = int(match.group(8)) serial = int(match.group(9)) event_id = AuditEventID(seconds, milli, serial, host) body_text = match.group(10) return (parse_succeeded, record_type, event_id, body_text) audit_binary_input_re = re.compile('audit\(((\d+)\.(\d+):(\d+))\):\s*(.*)') def parse_audit_binary_text(input): parse_succeeded = False event_id = None body_text = None match = audit_binary_input_re.search(input) if match is not None: parse_succeeded = True if match.group(1): seconds = int(match.group(2)) milli = int(match.group(3)) serial = int(match.group(4)) event_id = AuditEventID(seconds, milli, serial) body_text = match.group(5) return (parse_succeeded, event_id, body_text) #------------------------------------------------------------------------ import string def printable(s): if s: filtered_path = [x for x in s if x in string.printable] if filtered_path == s: return True return False class AvcContext(XmlSerialize): _xml_info = { 'user' : {'XMLForm' : 'attribute' }, 'role' : {'XMLForm' : 'attribute' }, 'type' : {'XMLForm' : 'attribute' }, 'mls' : {'XMLForm' : 'attribute' }, } def __init__(self, data): super(AvcContext, self).__init__() if type(data) is StringType: fields = data.split(':') if len(fields) >= 3: self.user = fields[0] self.role = fields[1] self.type = fields[2] if len(fields) > 3: self.mls = ':'.join(fields[3:]) else: self.mls = 's0' def __str__(self): return '%s:%s:%s:%s' % (self.user, self.role, self.type, self.mls) def format(self): # FIXME, what does selinux_raw_to_trans_context() do and why do we need it? (rc, trans) = selinux.selinux_raw_to_trans_context(str(self)) return trans def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): for name in list(self._xml_info.keys()): if getattr(self, name) != getattr(other, name): return False return True #----------------------------------------------------------------------------- class AuditEventID(XmlSerialize): _xml_info = { 'seconds' : {'XMLForm':'attribute', 'import_typecast':int }, 'milli' : {'XMLForm':'attribute', 'import_typecast':int }, 'serial' : {'XMLForm':'attribute', 'import_typecast':int }, 'host' : {'XMLForm':'attribute' }, } def __init__(self, seconds, milli, serial, host=None): super(AuditEventID, self).__init__() self.seconds = seconds self.milli = milli self.serial = serial if host is not None: self.host = host def __eq__(self, other): if self.host != other.host: return False if self.seconds != other.seconds: return False if self.milli != other.milli: return False if self.serial != other.serial: return False return True def __cmp__(self, other): if self.host != other.host: raise ValueError("cannot compare two %s objects whose host values differ (%s!=%s)" \ % (self.__class__.__name__, self.host, other.host)) result = cmp(self.seconds, other.seconds) if result != 0: return result result = cmp(self.milli, other.milli) if result != 0: return result result = cmp(self.serial, other.serial) if result != 0: return result return 0 def copy(self): import copy return copy.copy(self) time = property(lambda self: float(self.sec) + self.milli / 1000.0) def __str__(self): return "audit(%d.%d:%d)" % (self.seconds, self.milli, self.serial) def is_valid(self): if self.seconds is None: return False if self.milli is None: return False if self.serial is None: return False return True #----------------------------------------------------------------------------- class AuditRecord(XmlSerialize): _xml_info = { 'record_type' : {'XMLForm':'attribute', }, 'event_id' : {'XMLForm':'element', 'import_typecast':AuditEventID }, 'body_text' : {'XMLForm':'element' }, 'line_number' : {'XMLForm':'attribute', 'import_typecast':int }, } binary_version = 0 binary_header_format="iiii" binary_header_size = struct.calcsize(binary_header_format) key_value_pair_re = re.compile("([^ \t]+)\s*=\s*([^ \t]+)") avc_re = re.compile("avc:\s+([^\s]+)\s+{([^}]+)}\s+for\s+") exec_arg_re = re.compile(r'^a\d+$') def __init__(self, record_type, event_id, body_text, fields=None, line_number=None): super(AuditRecord, self).__init__() # Header self.record_type = record_type self.event_id = event_id self.body_text = body_text self.fields = fields self.line_number = line_number self._init_postprocess() def _init_postprocess(self): if getattr(self, 'fields', None) is None: self.set_fields_from_text(self.body_text) if self.record_type in ['AVC', 'USER_AVC', "1400", "1107"]: if 'seresult' not in self.fields: match = AuditRecord.avc_re.search(self.body_text) if match: seresult = match.group(1) self.fields['seresult'] = seresult seperms = match.group(2) self.fields['seperms'] = seperms.split() def __str__(self): return self.to_host_text() def audispd_rectify(self): self.line_number = None if self.event_id.host is None: self.event_id.host = get_hostname() def is_valid(self): if not self.event_id.is_valid(): return False if self.record_type is None: return False if self.message is None: return False return True def decode_fields(self): encoded_fields = ['acct', 'cmd', 'comm', 'cwd', 'data', 'dir', 'exe', 'file', 'host', 'key', 'msg', 'name', 'new', 'ocomm' 'old', 'path', 'watch'] for field in encoded_fields: if field in self.fields: if self.record_type == 'AVC' and field == 'saddr': continue value = self.fields[field] decoded_value = audit_msg_decode(value) self.fields[field] = decoded_value if self.record_type == 'EXECVE': for field, value in list(self.fields.items()): if self.exec_arg_re.search(field): value = self.fields[field] decoded_value = audit_msg_decode(value) self.fields[field] = decoded_value def translate_path(self, path): try: t = path.decode("hex") if t[0].encode("hex") == "00": tpath = "@" else: tpath = t[0] for i in range(len(t))[1:]: if t[i].encode("hex") != "00": tpath = tpath + t[i] else: break except: return path return tpath def set_fields_from_text(self, body_text): self.fields_ord = [] self.fields = {} for match in AuditRecord.key_value_pair_re.finditer(body_text): key = match.group(1) value = match.group(2) value = value.strip('"') try: if key == "arch": i = audit.audit_elf_to_machine(int(value,16)) value = audit.audit_machine_to_name(i) if key == "path": value = '"%s"' % self.translate_path(value) if key == "exit": try: value = errno.errorcode[abs(int(value))] except: pass if key == "syscall": syscall_name = audit.audit_syscall_to_name(int(value),audit.audit_detect_machine()) if syscall_name: value = syscall_name except ValueError: pass self.fields[key] = value self.fields_ord.append(key) def get_field(self, name): return self.fields.get(name) def get_binary_header(self, msg): msg_length = len(msg) return struct.pack(AuditRecord.binary_header_format, AuditRecord.binary_version, AuditRecord.binary_header_size, self.record_type, msg_length) def fields_to_text(self): if self.fields is None: return '' if self.record_type == 'AVC': buf = "type=%s msg=%s: avc: denied { %s } " % (self.record_type, self.event_id, ' '.join(self.access)) else: buf = "type=%s msg=%s: " % (self.record_type, self.event_id) buf += ' '.join(["%s=%s" % (k, self.fields[k]) for k in self.fields_ord]) + "\n" return buf def to_text(self): return "type=%s msg=%s: %s\n" % (self.record_type, self.event_id, self.body_text) def to_host_text(self): if self.event_id.host is not None: return "node=%s type=%s msg=%s: %s\n" % \ (self.event_id.host, self.record_type, self.event_id, self.body_text) else: return self.to_text() def to_binary(self): record = "%s: %s" % (self.event_id, self.body_text) return self.get_binary_header(record) + record #----------------------------------------------------------------------------- class AuditRecordReader: BINARY_FORMAT = 1 TEXT_FORMAT = 2 def __init__(self, record_format): self.record_format = record_format self._input_buffer = '' self.line_number = 0 if self.record_format == self.TEXT_FORMAT: self.feed = self.feed_text elif self.record_format == self.BINARY_FORMAT: self.feed = self.feed_binary else: raise ValueError("unknown record format (%s) in %s" % (record_format, self.__class__.__name__)) def feed_binary(self, new_data): if len(new_data) <= 0: return self._input_buffer += new_data # Now process as much of the buffer as we can, iterating over complete # messages. while True: # To read a complete message there must be a complete header and # all the data the header specified via the header.length if len(self._input_buffer) < AuditRecord.binary_header_size: return binary_version, binary_header_size, record_type, msg_length = \ struct.unpack(AuditRecord.binary_header_format, self._input_buffer[0:AuditRecord.binary_header_size]) total_len = AuditRecord.binary_header_size + msg_length if len(self._input_buffer) < total_len: return text = self._input_buffer[AuditRecord.binary_header_size:total_len] parse_succeeded, event_id, body_text = parse_audit_binary_text(text) self._input_buffer = self._input_buffer[total_len:] if parse_succeeded: yield (audit.audit_msg_type_to_name(record_type), event_id, body_text, None, 0) return def feed_text(self, new_data): if len(new_data) <= 0: return self._input_buffer += new_data # Now process as much of the buffer as we can, iterating over complete # messages. # To read a complete message we must see a line ending start = 0 end = self._input_buffer.find('\n', start) while end >= 0: self.line_number += 1 end += 1 # include newline line = self._input_buffer[start:end] parse_succeeded, record_type, event_id, body_text = parse_audit_record_text(line) if parse_succeeded: yield (record_type, event_id, body_text, None, self.line_number) start = end end = self._input_buffer.find('\n', start) self._input_buffer = self._input_buffer[start:] return #----------------------------------------------------------------------------- class AuditEvent(XmlSerialize): _xml_info = { 'records' : {'XMLForm':'element', 'list':'audit_record', 'import_typecast':AuditRecord, }, 'event_id' : {'XMLForm':'element', 'import_typecast':AuditEventID }, } def __init__(self): super(AuditEvent, self).__init__() self.event_id = None self.records = [] self.record_types = {} self.timestamp = None def _init_postprocess(self): if getattr(self, 'record_types', None) is None: self.record_types = {} for record in self.records: self.process_record(record) def __str__(self): line_numbers = self.line_numbers line_numbers.sort() return "%s: is_avc=%s, is_granted=%s: line_numbers=[%s]\n%s" % \ (self.event_id, self.is_avc(), self.is_granted(), ",".join([str(x) for x in line_numbers]), "\n".join([" %s" % record for record in self.records])) def format(self, separator='\n'): return separator.join([str(record) for record in self.records]) def num_records(self): return len(self.records) line_numbers = property(lambda self: [record.line_number for record in self.records if record.line_number]) def add_record(self, record): self.records.append(record) self.process_record(record) def process_record(self, record): if self.event_id is None: self.event_id = record.event_id.copy() self.timestamp = float(self.event_id.seconds) + (self.event_id.milli / 1000.0) else: if not self.event_id == record.event_id: raise ValueError("cannot add audit record to audit event, event_id mismatch %s != %s" % \ (self.event_id, record.event_id)) record_list = self.record_types.setdefault(record.record_type, []) record_list.append(record) def get_field(self, name, record_type=None): '''Return list of (value, record_type) tuples. In other words return the value matching name for every record_type. If record_type is not specified then all records are searched. Note: it is possible to have more than one record of a given type thus it is always possible to have multiple values returned.''' items = [] if record_type is None: records = self.records else: records = self.get_records_of_type(record_type) for record in records: value = record.fields.get(name) if value is None: continue items.append((value, record.type)) return items def get_record_of_type(self, type): record = None records = self.record_types.get(type) if records: record = records[0] return record def get_records_of_type(self, type): return self.record_types.get(type, []) def get_avc_record(self): for record_type in ['AVC', 'USER_AVC', "1400", "1107"]: record = self.get_record_of_type(record_type) if (record): return record def is_avc(self): return self.get_avc_record() is not None def is_granted(self): avc_record = self.get_avc_record() if avc_record is None: return False seresult = avc_record.fields['seresult'] if seresult == 'denied': return False if seresult == 'granted': return True log.avc.warn("unknown value for seresult ('%s')", seresult) return False #------------------------------------------------------------------------------ class AVC: # These are the perm sets from the reference policy for file, dirs, and filesystems. # They are here to be used below in the access matching functions. stat_file_perms = ['getattr'] x_file_perms = ['getattr', 'execute'] r_file_perms = ['open', 'read', 'getattr', 'lock', 'ioctl'] rx_file_perms = ['open', 'read', 'getattr', 'lock', 'execute', 'ioctl'] ra_file_perms = ['open', 'ioctl', 'read', 'getattr', 'lock', 'append'] link_file_perms = ['getattr', 'link', 'unlink', 'rename'] create_lnk_perms = ['create', 'read', 'getattr', 'setattr', 'link', 'unlink', 'rename'] create_file_perms = ['open', 'create', 'ioctl', 'read', 'getattr', 'lock', 'write', 'setattr', 'append', 'link', 'unlink', 'rename'] r_dir_perms = ['open', 'read', 'getattr', 'lock', 'search', 'ioctl'] rw_dir_perms = ['open', 'read', 'getattr', 'lock', 'search', 'ioctl', 'add_name', 'remove_name', 'write'] ra_dir_perms = ['open', 'read', 'getattr', 'lock', 'search', 'ioctl', 'add_name', 'write'] create_dir_perms = ['open', 'create', 'read', 'getattr', 'lock', 'setattr', 'ioctl', 'link', 'unlink', 'rename', 'search', 'add_name', 'remove_name', 'reparent', 'write', 'rmdir'] mount_fs_perms = ['mount', 'remount', 'unmount', 'getattr'] search_dir_perms = ['getattr', 'search'] getattr_dir_perms = ['getattr'] setattr_dir_perms = ['setattr'] list_dir_perms = ['open', 'getattr', 'search', 'read', 'lock', 'ioctl'] add_entry_dir_perms = ['open', 'getattr', 'search', 'lock', 'ioctl', 'write', 'add_name'] del_entry_dir_perms = ['open', 'getattr', 'search', 'lock', 'ioctl', 'write', 'remove_name'] manage_dir_perms = ['open', 'create', 'getattr', 'setattr', 'read', 'write', 'link', 'unlink', 'rename', 'search', 'add_name', 'remove_name', 'reparent', 'rmdir', 'lock', 'ioctl'] getattr_file_perms = ['getattr'] setattr_file_perms = ['setattr'] read_file_perms = ['open', 'getattr', 'read', 'lock', 'ioctl'] append_file_perms = ['open', 'getattr', 'append', 'lock', 'ioctl'] write_file_perms = ['open', 'getattr', 'write', 'append', 'lock', 'ioctl'] rw_file_perms = ['open', 'getattr', 'read', 'write', 'append', 'ioctl', 'lock'] delete_file_perms = ['getattr', 'unlink'] manage_file_perms = ['open', 'create', 'getattr', 'setattr', 'read', 'write', 'append', 'rename', 'link', 'unlink', 'ioctl', 'lock'] pipe_instance_path_re = re.compile(r'^(\w+):\[([^\]]*)\]') proc_pid_instance_re = re.compile(r'^(/proc/)(\d+)(.*)') def __init__(self, audit_event, query_environment=True): self.audit_event = audit_event self.query_environment = query_environment # if audit_event.timestamp is None: # self.audit_event.timestamp = TimeStamp() self.template_substitutions = {} self.tpath = None self.spath = None self.source = None self.source_pkg = None self.access = None self.scontext = None self.tcontext = None self.tclass = None self.port = None self.src_rpms=[] self.tgt_rpms=[] self.host = None self.pid = None self.kmod = None self.syscall = None self.why = None self.bools = [] self.derive_avc_info_from_audit_event() def __str__(self): return self.format_avc() def format_avc(self): text = '' text += 'scontext=%s ' % self.scontext text += 'tcontext=%s ' % self.tcontext text += 'access=%s ' % self.access text += 'tclass=%s ' % self.tclass text += 'tpath=%s ' % self.tpath return text # Below are helper functions to get values that might be # stored in one or more fields in an AVC. def has_any_access_in(self, access_list): 'Returns true if the AVC contains _any_ of the permissions in the access list.' if self.access is None: return False for a in self.access: if a in access_list: return True return False def all_accesses_are_in(self, access_list): """Returns true if _every_ access in the AVC matches at least one of the permissions in the access list.""" if self.access is None: return False for a in self.access: if a not in access_list: return False return True def allowed_target_types(self): all_types = get_all_file_types() + get_all_port_types() all_types.sort() all_attributes = get_all_attributes() all_attributes.sort() allowed_types = [] wtypes = [x[TARGET] for x in [y for y in search([ALLOW], {SOURCE: self.scontext.type, CLASS: self.tclass, PERMS: self.access}) if y["enabled"]]] types = wtypes for t in types: if t in all_attributes: wtypes.extend(info(ATTRIBUTE, t)[0]["types"]) for t in wtypes: if t in all_types: if t not in allowed_types: allowed_types.append(t) allowed_types.sort() return allowed_types def open_with_write(self): if self.has_any_access_in(['open']): try: if self.a1 and (int(self.a1) & O_ACCMODE) != os.O_RDONLY: return True except: pass return False def __typeMatch(self, context, type_list): for type in type_list: if re.match(type, context.type): return True return False def matches_source_types(self, type_list): """Returns true if the type in the source context of the avc regular expression matches any of the types in the type list.""" if self.scontext is None: return False return self.__typeMatch(self.scontext, type_list) def matches_target_types(self, type_list): """Returns true if the type in the target context of the avc regular expression matches any of the types in the type list.""" if self.tcontext is None: return False return self.__typeMatch(self.tcontext, type_list) def has_tclass_in(self, tclass_list): if self.tclass is None: return False return self.tclass in tclass_list def update(self): self.derive_environmental_info() self.update_derived_template_substitutions() def path_is_not_standard_directory(self): if self.tpath is None: return True return self.tpath not in standard_directories def decodehex(self,path): try: t = path.decode("hex") if t[0].encode("hex") == "00": tpath = "@" else: tpath = t[0] for i in range(len(t))[1:]: if t[i].encode("hex") != "00": tpath = tpath + t[i] else: break if not printable(tpath): tpath = path except: tpath = path if not printable(tpath): return "" return tpath def _set_tpath(self): '''Derive the target path. If path information is available the avc record will have a path field and no name field because the path field is more specific and supercedes name. The name field is typically the directory entry. For some special files the kernel embeds instance information into the file name. For example 'pipe:[1234]' or 'socket:[1234]' where the number inside the brackets is the inode number. The proc pseudo file system has the process pid embedded in the name, for example '/proc/1234/mem'. These numbers are ephemeral and do not contribute meaningful information for our reports. Plus we may use the path information to decide if an alert is identical to a previous alert, we coalesce them if they are. The presence of an instance specific number in the path confuses this comparision. For these reasons we strip any instance information out of the path, Example input and output: pipe:[1234] --> pipe socket:[1234] --> socket /proc/1234/fd --> /proc/<pid>/fd ./foo --> ./foo /etc/sysconfig --> /etc/sysconfig ''' path = None name = None # First try to get the path from the AVC record, new kernel # versions put it there rather than in AVC_PATH path = self.avc_record.get_field('path') if path: path = path.strip('"') inodestr = self.avc_record.get_field("ino") if path is None: avc_path_record = self.audit_event.get_record_of_type('PATH') if avc_path_record: path = avc_path_record.get_field('name') if path is None: # No path field, so try and use the name field instead name = self.avc_record.get_field('name') if name is not None: # Use the class to be smart about formatting the name tclass = self.avc_record.get_field('tclass') if tclass == 'file': # file name is not a full path so make it appear relative path = '%s' % name elif tclass == 'dir': # directory component is not a full path so make it appear # relative, but only if it's not the root if name == '/': path = name else: path = '%s' % name else: # just use the bare name path = name if path is not None: if path == "/" and inodestr: matches = [] try: dev_rdev = 0 dev = self.avc_record.get_field('dev') if os.path.exists("/dev/"+dev): dev_rdev = os.lstat("/dev/"+dev).st_rdev ino = int(inodestr) fd=open("/proc/mounts", "r") for i in fd.read().split("\n"): x = i.split() if len(x) and x[1][0] == '/': try: if (dev_rdev == 0 or os.stat(x[0]).st_rdev == dev_rdev) and int(os.lstat(x[1]).st_ino) == ino: matches.append(x[:3]) except OSError: continue fd.close() if len(matches) == 1: path = matches[0][1] elif len(matches) > 1: for i in matches: if i[0] == ("/dev/%s" % dev) or i[2] == dev: path = i[1] break else: try: if dev_rdev != 0 and os.lstat(i[0]).st_rdev == dev_rdev: path = i[1] break except OSError: pass except TypeError: path = "unknown mountpoint" pass except OSError: path = "unknown mountpoint" pass else: if path.startswith("/") == False and inodestr: import subprocess command = "locate -b '\%s'" % path try: output = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True) ino = int(inodestr) for file in output.split("\n"): try: if int(os.lstat(file).st_ino) == ino: path = file break except: pass except subprocess.CalledProcessError as e: pass if path is not None: if path.startswith('/'): # Fully qualified path # map /proc/1234/ to /proc/<pid>, replacing numeric pid with <pid> path = self.proc_pid_instance_re.sub(r'\1<pid>\3', path) else: # map pipe:[1234] to pipe, stripping out inode instance (e.g. [1234]) # applies to socket as well match = self.pipe_instance_path_re.search(path) if match: path = self.tclass self.tpath = self.decodehex(path) if self.tpath == '': self.tpath = path if self.tpath is None: if self.tclass == "filesystem": self.tpath = "" elif self.tclass == "udp_socket" or self.tclass == "tcp_socket": self.tpath = _("port %s") % self.port else: self.tpath = _("Unknown") def derive_avc_info_from_audit_event(self): self.tpath = None self.spath = None self.source = None self.a1 = None self.success = False self.syscall_paths = [] exe = comm = arch = syscall = None self.avc_record = self.audit_event.get_avc_record() syscall_record = self.audit_event.get_record_of_type('SYSCALL') self.access = self.avc_record.get_field('seperms') if not isinstance(self.scontext, AvcContext): self.scontext = AvcContext(self.avc_record.get_field('scontext')) if not isinstance(self.tcontext, AvcContext): self.tcontext = AvcContext(self.avc_record.get_field('tcontext')) self.tclass = self.avc_record.get_field('tclass') if self.avc_record.get_field('dest') is None: self.port = self.avc_record.get_field('src') else: self.port = self.avc_record.get_field('dest') self._set_tpath() self.kmod = self.avc_record.get_field('kmod') self.pid = self.avc_record.get_field('pid') # exe, cwd, name, path, key, dir, comm, ocomm, key_desc if syscall_record: exe = syscall_record.get_field('exe') try: exe.decode("hex") except: pass comm = syscall_record.get_field('comm') self.syscall = syscall_record.get_field('syscall') self.success = (syscall_record.get_field('success') == "yes") self.a1 = syscall_record.get_field('a1') if comm is None: comm = self.avc_record.get_field('comm') if exe is None: exe = self.avc_record.get_field('exe') try: self.spath = exe.decode("hex") except: self.spath = exe if comm: self.source = comm elif exe: self.source = self.spath if not self.spath: self.spath = self.source if not self.spath: self.spath = self.scontext.type cwd_record = self.audit_event.get_record_of_type('CWD') if cwd_record: cwd = cwd_record.get_field('cwd') else: cwd = None path_records = self.audit_event.get_records_of_type('PATH') for path_record in path_records: path = path_record.get_field('name') if os.path.isabs(path) or not cwd: self.syscall_paths.append(path) else: self.syscall_paths.append(os.path.join(cwd, path)) self.src_rpms=[] self.tgt_rpms=[] self.host = self.audit_event.event_id.host self.why, bools = audit2why.analyze(str(self.scontext), str(self.tcontext), str(self.tclass), self.access) if self.why == audit2why.ALLOW: raise ValueError(_("%s \n**** Invalid AVC allowed in current policy ***\n") % self.avc_record) if self.why == audit2why.DONTAUDIT: raise ValueError(_("%s \n**** Invalid AVC dontaudited in current policy. 'semodule -B' will turn on dontaudit rules. ***\n") % self.avc_record) if self.why == audit2why.NOPOLICY: raise ValueError(_("Must call policy_init first")) if self.why == audit2why.BADTCON: raise ValueError(_("%s \n**** Invalid AVC bad target context. ***\n") % self.avc_record) if self.why == audit2why.BADSCON: raise ValueError(_("%s \n**** Invalid AVC bad source context. ***\n") % self.avc_record) if self.why == audit2why.BADSCON: raise ValueError(_("%s \n**** Invalid AVC bad type class ***\n") % self.avc_record) if self.why == audit2why.BADPERM: raise ValueError(_("%s \n**** Invalid AVC bad permission ***\n") % self.avc_record) if self.why == audit2why.BADCOMPUTE: raise ValueError(_("Error during access vector computation")) if self.why == audit2why.BOOLEAN: self.bools = bools def derive_environmental_info(self): if self.query_environment: if self.spath: self.source_pkg = get_rpm_nvr_by_file_path(self.spath) if self.source_pkg: self.src_rpms.append(self.source_pkg) if self.tpath: rpm = get_rpm_nvr_by_file_path(self.tpath) if rpm: self.tgt_rpms.append(rpm) def set_alt_path(self, path): if self.tpath is None: self.tpath = path def set_template_substitutions(self, **kwds): for key, value in list(kwds.items()): if value: self.template_substitutions[key] = value def update_derived_template_substitutions(self): self.template_substitutions["SOURCE_TYPE"] = escape_html(self.scontext.type) self.template_substitutions["TARGET_TYPE"] = escape_html(self.tcontext.type) self.template_substitutions["SOURCE"] = escape_html(self.source) self.template_substitutions["SOURCE_PATH"] = escape_html(self.spath) if self.spath: self.template_substitutions["FIX_SOURCE_PATH"] = re.sub(" ",".",escape_html(self.spath)) self.template_substitutions["TARGET_PATH"] = escape_html(self.tpath) if self.tpath: self.template_substitutions["FIX_TARGET_PATH"] = re.sub(" ",".",escape_html(self.tpath)) if self.tpath is None: self.template_substitutions["TARGET_DIR"] = None else: if self.tclass == 'dir': self.template_substitutions["TARGET_DIR"] = escape_html(self.tpath) elif self.tclass == 'file': self.template_substitutions["TARGET_DIR"] = escape_html(os.path.dirname(self.tpath)) else: self.template_substitutions["TARGET_DIR"] = None self.template_substitutions["TARGET_CLASS"] = escape_html(self.tclass) if self.access is None: self.template_substitutions["ACCESS"] = None else: self.template_substitutions["ACCESS"] = escape_html(' '.join(self.access)) self.template_substitutions["SOURCE_PACKAGE"] = escape_html(self.source_pkg) self.template_substitutions["PORT_NUMBER"] = escape_html(self.port) def validate_template_substitutions(self): # validate, replace any None values with friendly string for key, value in list(self.template_substitutions.items()): if value is None: self.template_substitutions[key] = escape_html(default_text(value))
./CrossVul/dataset_final_sorted/CWE-77/py/bad_5077_2
crossvul-python_data_bad_5076_0
#!/usr/bin/python import dbus import dbus.service import dbus.mainloop.glib import gobject import slip.dbus.service from slip.dbus import polkit import os class RunFix(slip.dbus.service.Object): default_polkit_auth_required = "org.fedoraproject.setroubleshootfixit.write" def __init__ (self, *p, **k): super(RunFix, self).__init__(*p, **k) @dbus.service.method ("org.fedoraproject.SetroubleshootFixit", in_signature='ss', out_signature='s') def run_fix(self, local_id, analysis_id): import commands command = "sealert -f %s -P %s" % ( local_id, analysis_id) return commands.getoutput(command) if __name__ == "__main__": mainloop = gobject.MainLoop () dbus.mainloop.glib.DBusGMainLoop (set_as_default=True) system_bus = dbus.SystemBus () name = dbus.service.BusName("org.fedoraproject.SetroubleshootFixit", system_bus) object = RunFix(system_bus, "/org/fedoraproject/SetroubleshootFixit/object") slip.dbus.service.set_mainloop (mainloop) mainloop.run ()
./CrossVul/dataset_final_sorted/CWE-77/py/bad_5076_0
crossvul-python_data_good_5077_2
# Authors: John Dennis <jdennis@redhat.com> # Thomas Liu <tliu@redhat.com # Copyright (C) 2007-2010 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # __all__ = ['derive_record_format', 'parse_audit_record_text', 'AvcContext', 'AVC', 'AuditEventID', 'AuditEvent', 'AuditRecord', 'AuditRecordReader', ] import audit import struct import os,errno import re import selinux import base64 from types import * import selinux.audit2why as audit2why from setroubleshoot.util import * from setroubleshoot.html_util import * from setroubleshoot.xml_serialize import * from sepolicy import * O_ACCMODE = 0o0000003 #----------------------------------------------------------------------------- standard_directories = get_standard_directories() #----------------------------------------------------------------------------- def audit_record_from_text(text): parse_succeeded, record_type, event_id, body_text = parse_audit_record_text(text) audit_record = AuditRecord(record_type, event_id, body_text) return audit_record #----------------------------------------------------------------------------- def derive_record_format(socket_path): if re.search('/audispd_events$', socket_path): return AuditRecordReader.TEXT_FORMAT if re.search('/audit_events$', socket_path): return AuditRecordReader.BINARY_FORMAT return AuditRecordReader.TEXT_FORMAT # assume new format # regular expression to find message like this: # msg=audit(1152828325.857:123085): avc: denied { append } for pid=14205 ... # Note, messages arriving directly from the audit system omit # 'msg=', but messages in log files prepend 'msg=' # group 1 is the optional "node=XXX " # group 2 is the node if node=XXX is present # group 3 is the optional "type=XXX " # group 4 is the type if type=XXX is present # group 5 is the optional 'msg=' # group 6 is the complete event id # group 7 is the seconds component of the timestamp # group 8 is the millisconds component of the timestamp # group 9 is the timestamp unique number # group 10 is the body of the message appearing after the event id audit_input_re = re.compile('(node=(\S+)\s+)?(type=(\S+)\s+)?(msg=)?audit\(((\d+)\.(\d+):(\d+))\):\s*(.*)') def parse_audit_record_text(input): parse_succeeded = False host = None record_type = None event_id = None body_text = None match = audit_input_re.search(input) if match is not None: parse_succeeded = True if match.group(2): host = match.group(2) if match.group(4): record_type = match.group(4) if match.group(6): seconds = int(match.group(7)) milli = int(match.group(8)) serial = int(match.group(9)) event_id = AuditEventID(seconds, milli, serial, host) body_text = match.group(10) return (parse_succeeded, record_type, event_id, body_text) audit_binary_input_re = re.compile('audit\(((\d+)\.(\d+):(\d+))\):\s*(.*)') def parse_audit_binary_text(input): parse_succeeded = False event_id = None body_text = None match = audit_binary_input_re.search(input) if match is not None: parse_succeeded = True if match.group(1): seconds = int(match.group(2)) milli = int(match.group(3)) serial = int(match.group(4)) event_id = AuditEventID(seconds, milli, serial) body_text = match.group(5) return (parse_succeeded, event_id, body_text) #------------------------------------------------------------------------ import string def printable(s): if s: filtered_path = [x for x in s if x in string.printable] if filtered_path == s: return True return False class AvcContext(XmlSerialize): _xml_info = { 'user' : {'XMLForm' : 'attribute' }, 'role' : {'XMLForm' : 'attribute' }, 'type' : {'XMLForm' : 'attribute' }, 'mls' : {'XMLForm' : 'attribute' }, } def __init__(self, data): super(AvcContext, self).__init__() if type(data) is StringType: fields = data.split(':') if len(fields) >= 3: self.user = fields[0] self.role = fields[1] self.type = fields[2] if len(fields) > 3: self.mls = ':'.join(fields[3:]) else: self.mls = 's0' def __str__(self): return '%s:%s:%s:%s' % (self.user, self.role, self.type, self.mls) def format(self): # FIXME, what does selinux_raw_to_trans_context() do and why do we need it? (rc, trans) = selinux.selinux_raw_to_trans_context(str(self)) return trans def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): for name in list(self._xml_info.keys()): if getattr(self, name) != getattr(other, name): return False return True #----------------------------------------------------------------------------- class AuditEventID(XmlSerialize): _xml_info = { 'seconds' : {'XMLForm':'attribute', 'import_typecast':int }, 'milli' : {'XMLForm':'attribute', 'import_typecast':int }, 'serial' : {'XMLForm':'attribute', 'import_typecast':int }, 'host' : {'XMLForm':'attribute' }, } def __init__(self, seconds, milli, serial, host=None): super(AuditEventID, self).__init__() self.seconds = seconds self.milli = milli self.serial = serial if host is not None: self.host = host def __eq__(self, other): if self.host != other.host: return False if self.seconds != other.seconds: return False if self.milli != other.milli: return False if self.serial != other.serial: return False return True def __cmp__(self, other): if self.host != other.host: raise ValueError("cannot compare two %s objects whose host values differ (%s!=%s)" \ % (self.__class__.__name__, self.host, other.host)) result = cmp(self.seconds, other.seconds) if result != 0: return result result = cmp(self.milli, other.milli) if result != 0: return result result = cmp(self.serial, other.serial) if result != 0: return result return 0 def copy(self): import copy return copy.copy(self) time = property(lambda self: float(self.sec) + self.milli / 1000.0) def __str__(self): return "audit(%d.%d:%d)" % (self.seconds, self.milli, self.serial) def is_valid(self): if self.seconds is None: return False if self.milli is None: return False if self.serial is None: return False return True #----------------------------------------------------------------------------- class AuditRecord(XmlSerialize): _xml_info = { 'record_type' : {'XMLForm':'attribute', }, 'event_id' : {'XMLForm':'element', 'import_typecast':AuditEventID }, 'body_text' : {'XMLForm':'element' }, 'line_number' : {'XMLForm':'attribute', 'import_typecast':int }, } binary_version = 0 binary_header_format="iiii" binary_header_size = struct.calcsize(binary_header_format) key_value_pair_re = re.compile("([^ \t]+)\s*=\s*([^ \t]+)") avc_re = re.compile("avc:\s+([^\s]+)\s+{([^}]+)}\s+for\s+") exec_arg_re = re.compile(r'^a\d+$') def __init__(self, record_type, event_id, body_text, fields=None, line_number=None): super(AuditRecord, self).__init__() # Header self.record_type = record_type self.event_id = event_id self.body_text = body_text self.fields = fields self.line_number = line_number self._init_postprocess() def _init_postprocess(self): if getattr(self, 'fields', None) is None: self.set_fields_from_text(self.body_text) if self.record_type in ['AVC', 'USER_AVC', "1400", "1107"]: if 'seresult' not in self.fields: match = AuditRecord.avc_re.search(self.body_text) if match: seresult = match.group(1) self.fields['seresult'] = seresult seperms = match.group(2) self.fields['seperms'] = seperms.split() def __str__(self): return self.to_host_text() def audispd_rectify(self): self.line_number = None if self.event_id.host is None: self.event_id.host = get_hostname() def is_valid(self): if not self.event_id.is_valid(): return False if self.record_type is None: return False if self.message is None: return False return True def decode_fields(self): encoded_fields = ['acct', 'cmd', 'comm', 'cwd', 'data', 'dir', 'exe', 'file', 'host', 'key', 'msg', 'name', 'new', 'ocomm' 'old', 'path', 'watch'] for field in encoded_fields: if field in self.fields: if self.record_type == 'AVC' and field == 'saddr': continue value = self.fields[field] decoded_value = audit_msg_decode(value) self.fields[field] = decoded_value if self.record_type == 'EXECVE': for field, value in list(self.fields.items()): if self.exec_arg_re.search(field): value = self.fields[field] decoded_value = audit_msg_decode(value) self.fields[field] = decoded_value def translate_path(self, path): try: t = path.decode("hex") if t[0].encode("hex") == "00": tpath = "@" else: tpath = t[0] for i in range(len(t))[1:]: if t[i].encode("hex") != "00": tpath = tpath + t[i] else: break except: return path return tpath def set_fields_from_text(self, body_text): self.fields_ord = [] self.fields = {} for match in AuditRecord.key_value_pair_re.finditer(body_text): key = match.group(1) value = match.group(2) value = value.strip('"') try: if key == "arch": i = audit.audit_elf_to_machine(int(value,16)) value = audit.audit_machine_to_name(i) if key == "path": value = '"%s"' % self.translate_path(value) if key == "exit": try: value = errno.errorcode[abs(int(value))] except: pass if key == "syscall": syscall_name = audit.audit_syscall_to_name(int(value),audit.audit_detect_machine()) if syscall_name: value = syscall_name except ValueError: pass self.fields[key] = value self.fields_ord.append(key) def get_field(self, name): return self.fields.get(name) def get_binary_header(self, msg): msg_length = len(msg) return struct.pack(AuditRecord.binary_header_format, AuditRecord.binary_version, AuditRecord.binary_header_size, self.record_type, msg_length) def fields_to_text(self): if self.fields is None: return '' if self.record_type == 'AVC': buf = "type=%s msg=%s: avc: denied { %s } " % (self.record_type, self.event_id, ' '.join(self.access)) else: buf = "type=%s msg=%s: " % (self.record_type, self.event_id) buf += ' '.join(["%s=%s" % (k, self.fields[k]) for k in self.fields_ord]) + "\n" return buf def to_text(self): return "type=%s msg=%s: %s\n" % (self.record_type, self.event_id, self.body_text) def to_host_text(self): if self.event_id.host is not None: return "node=%s type=%s msg=%s: %s\n" % \ (self.event_id.host, self.record_type, self.event_id, self.body_text) else: return self.to_text() def to_binary(self): record = "%s: %s" % (self.event_id, self.body_text) return self.get_binary_header(record) + record #----------------------------------------------------------------------------- class AuditRecordReader: BINARY_FORMAT = 1 TEXT_FORMAT = 2 def __init__(self, record_format): self.record_format = record_format self._input_buffer = '' self.line_number = 0 if self.record_format == self.TEXT_FORMAT: self.feed = self.feed_text elif self.record_format == self.BINARY_FORMAT: self.feed = self.feed_binary else: raise ValueError("unknown record format (%s) in %s" % (record_format, self.__class__.__name__)) def feed_binary(self, new_data): if len(new_data) <= 0: return self._input_buffer += new_data # Now process as much of the buffer as we can, iterating over complete # messages. while True: # To read a complete message there must be a complete header and # all the data the header specified via the header.length if len(self._input_buffer) < AuditRecord.binary_header_size: return binary_version, binary_header_size, record_type, msg_length = \ struct.unpack(AuditRecord.binary_header_format, self._input_buffer[0:AuditRecord.binary_header_size]) total_len = AuditRecord.binary_header_size + msg_length if len(self._input_buffer) < total_len: return text = self._input_buffer[AuditRecord.binary_header_size:total_len] parse_succeeded, event_id, body_text = parse_audit_binary_text(text) self._input_buffer = self._input_buffer[total_len:] if parse_succeeded: yield (audit.audit_msg_type_to_name(record_type), event_id, body_text, None, 0) return def feed_text(self, new_data): if len(new_data) <= 0: return self._input_buffer += new_data # Now process as much of the buffer as we can, iterating over complete # messages. # To read a complete message we must see a line ending start = 0 end = self._input_buffer.find('\n', start) while end >= 0: self.line_number += 1 end += 1 # include newline line = self._input_buffer[start:end] parse_succeeded, record_type, event_id, body_text = parse_audit_record_text(line) if parse_succeeded: yield (record_type, event_id, body_text, None, self.line_number) start = end end = self._input_buffer.find('\n', start) self._input_buffer = self._input_buffer[start:] return #----------------------------------------------------------------------------- class AuditEvent(XmlSerialize): _xml_info = { 'records' : {'XMLForm':'element', 'list':'audit_record', 'import_typecast':AuditRecord, }, 'event_id' : {'XMLForm':'element', 'import_typecast':AuditEventID }, } def __init__(self): super(AuditEvent, self).__init__() self.event_id = None self.records = [] self.record_types = {} self.timestamp = None def _init_postprocess(self): if getattr(self, 'record_types', None) is None: self.record_types = {} for record in self.records: self.process_record(record) def __str__(self): line_numbers = self.line_numbers line_numbers.sort() return "%s: is_avc=%s, is_granted=%s: line_numbers=[%s]\n%s" % \ (self.event_id, self.is_avc(), self.is_granted(), ",".join([str(x) for x in line_numbers]), "\n".join([" %s" % record for record in self.records])) def format(self, separator='\n'): return separator.join([str(record) for record in self.records]) def num_records(self): return len(self.records) line_numbers = property(lambda self: [record.line_number for record in self.records if record.line_number]) def add_record(self, record): self.records.append(record) self.process_record(record) def process_record(self, record): if self.event_id is None: self.event_id = record.event_id.copy() self.timestamp = float(self.event_id.seconds) + (self.event_id.milli / 1000.0) else: if not self.event_id == record.event_id: raise ValueError("cannot add audit record to audit event, event_id mismatch %s != %s" % \ (self.event_id, record.event_id)) record_list = self.record_types.setdefault(record.record_type, []) record_list.append(record) def get_field(self, name, record_type=None): '''Return list of (value, record_type) tuples. In other words return the value matching name for every record_type. If record_type is not specified then all records are searched. Note: it is possible to have more than one record of a given type thus it is always possible to have multiple values returned.''' items = [] if record_type is None: records = self.records else: records = self.get_records_of_type(record_type) for record in records: value = record.fields.get(name) if value is None: continue items.append((value, record.type)) return items def get_record_of_type(self, type): record = None records = self.record_types.get(type) if records: record = records[0] return record def get_records_of_type(self, type): return self.record_types.get(type, []) def get_avc_record(self): for record_type in ['AVC', 'USER_AVC', "1400", "1107"]: record = self.get_record_of_type(record_type) if (record): return record def is_avc(self): return self.get_avc_record() is not None def is_granted(self): avc_record = self.get_avc_record() if avc_record is None: return False seresult = avc_record.fields['seresult'] if seresult == 'denied': return False if seresult == 'granted': return True log.avc.warn("unknown value for seresult ('%s')", seresult) return False #------------------------------------------------------------------------------ class AVC: # These are the perm sets from the reference policy for file, dirs, and filesystems. # They are here to be used below in the access matching functions. stat_file_perms = ['getattr'] x_file_perms = ['getattr', 'execute'] r_file_perms = ['open', 'read', 'getattr', 'lock', 'ioctl'] rx_file_perms = ['open', 'read', 'getattr', 'lock', 'execute', 'ioctl'] ra_file_perms = ['open', 'ioctl', 'read', 'getattr', 'lock', 'append'] link_file_perms = ['getattr', 'link', 'unlink', 'rename'] create_lnk_perms = ['create', 'read', 'getattr', 'setattr', 'link', 'unlink', 'rename'] create_file_perms = ['open', 'create', 'ioctl', 'read', 'getattr', 'lock', 'write', 'setattr', 'append', 'link', 'unlink', 'rename'] r_dir_perms = ['open', 'read', 'getattr', 'lock', 'search', 'ioctl'] rw_dir_perms = ['open', 'read', 'getattr', 'lock', 'search', 'ioctl', 'add_name', 'remove_name', 'write'] ra_dir_perms = ['open', 'read', 'getattr', 'lock', 'search', 'ioctl', 'add_name', 'write'] create_dir_perms = ['open', 'create', 'read', 'getattr', 'lock', 'setattr', 'ioctl', 'link', 'unlink', 'rename', 'search', 'add_name', 'remove_name', 'reparent', 'write', 'rmdir'] mount_fs_perms = ['mount', 'remount', 'unmount', 'getattr'] search_dir_perms = ['getattr', 'search'] getattr_dir_perms = ['getattr'] setattr_dir_perms = ['setattr'] list_dir_perms = ['open', 'getattr', 'search', 'read', 'lock', 'ioctl'] add_entry_dir_perms = ['open', 'getattr', 'search', 'lock', 'ioctl', 'write', 'add_name'] del_entry_dir_perms = ['open', 'getattr', 'search', 'lock', 'ioctl', 'write', 'remove_name'] manage_dir_perms = ['open', 'create', 'getattr', 'setattr', 'read', 'write', 'link', 'unlink', 'rename', 'search', 'add_name', 'remove_name', 'reparent', 'rmdir', 'lock', 'ioctl'] getattr_file_perms = ['getattr'] setattr_file_perms = ['setattr'] read_file_perms = ['open', 'getattr', 'read', 'lock', 'ioctl'] append_file_perms = ['open', 'getattr', 'append', 'lock', 'ioctl'] write_file_perms = ['open', 'getattr', 'write', 'append', 'lock', 'ioctl'] rw_file_perms = ['open', 'getattr', 'read', 'write', 'append', 'ioctl', 'lock'] delete_file_perms = ['getattr', 'unlink'] manage_file_perms = ['open', 'create', 'getattr', 'setattr', 'read', 'write', 'append', 'rename', 'link', 'unlink', 'ioctl', 'lock'] pipe_instance_path_re = re.compile(r'^(\w+):\[([^\]]*)\]') proc_pid_instance_re = re.compile(r'^(/proc/)(\d+)(.*)') def __init__(self, audit_event, query_environment=True): self.audit_event = audit_event self.query_environment = query_environment # if audit_event.timestamp is None: # self.audit_event.timestamp = TimeStamp() self.template_substitutions = {} self.tpath = None self.spath = None self.source = None self.source_pkg = None self.access = None self.scontext = None self.tcontext = None self.tclass = None self.port = None self.src_rpms=[] self.tgt_rpms=[] self.host = None self.pid = None self.kmod = None self.syscall = None self.why = None self.bools = [] self.derive_avc_info_from_audit_event() def __str__(self): return self.format_avc() def format_avc(self): text = '' text += 'scontext=%s ' % self.scontext text += 'tcontext=%s ' % self.tcontext text += 'access=%s ' % self.access text += 'tclass=%s ' % self.tclass text += 'tpath=%s ' % self.tpath return text # Below are helper functions to get values that might be # stored in one or more fields in an AVC. def has_any_access_in(self, access_list): 'Returns true if the AVC contains _any_ of the permissions in the access list.' if self.access is None: return False for a in self.access: if a in access_list: return True return False def all_accesses_are_in(self, access_list): """Returns true if _every_ access in the AVC matches at least one of the permissions in the access list.""" if self.access is None: return False for a in self.access: if a not in access_list: return False return True def allowed_target_types(self): all_types = get_all_file_types() + get_all_port_types() all_types.sort() all_attributes = get_all_attributes() all_attributes.sort() allowed_types = [] wtypes = [x[TARGET] for x in [y for y in search([ALLOW], {SOURCE: self.scontext.type, CLASS: self.tclass, PERMS: self.access}) if y["enabled"]]] types = wtypes for t in types: if t in all_attributes: wtypes.extend(info(ATTRIBUTE, t)[0]["types"]) for t in wtypes: if t in all_types: if t not in allowed_types: allowed_types.append(t) allowed_types.sort() return allowed_types def open_with_write(self): if self.has_any_access_in(['open']): try: if self.a1 and (int(self.a1) & O_ACCMODE) != os.O_RDONLY: return True except: pass return False def __typeMatch(self, context, type_list): for type in type_list: if re.match(type, context.type): return True return False def matches_source_types(self, type_list): """Returns true if the type in the source context of the avc regular expression matches any of the types in the type list.""" if self.scontext is None: return False return self.__typeMatch(self.scontext, type_list) def matches_target_types(self, type_list): """Returns true if the type in the target context of the avc regular expression matches any of the types in the type list.""" if self.tcontext is None: return False return self.__typeMatch(self.tcontext, type_list) def has_tclass_in(self, tclass_list): if self.tclass is None: return False return self.tclass in tclass_list def update(self): self.derive_environmental_info() self.update_derived_template_substitutions() def path_is_not_standard_directory(self): if self.tpath is None: return True return self.tpath not in standard_directories def decodehex(self,path): try: t = path.decode("hex") if t[0].encode("hex") == "00": tpath = "@" else: tpath = t[0] for i in range(len(t))[1:]: if t[i].encode("hex") != "00": tpath = tpath + t[i] else: break if not printable(tpath): tpath = path except: tpath = path if not printable(tpath): return "" return tpath def _set_tpath(self): '''Derive the target path. If path information is available the avc record will have a path field and no name field because the path field is more specific and supercedes name. The name field is typically the directory entry. For some special files the kernel embeds instance information into the file name. For example 'pipe:[1234]' or 'socket:[1234]' where the number inside the brackets is the inode number. The proc pseudo file system has the process pid embedded in the name, for example '/proc/1234/mem'. These numbers are ephemeral and do not contribute meaningful information for our reports. Plus we may use the path information to decide if an alert is identical to a previous alert, we coalesce them if they are. The presence of an instance specific number in the path confuses this comparision. For these reasons we strip any instance information out of the path, Example input and output: pipe:[1234] --> pipe socket:[1234] --> socket /proc/1234/fd --> /proc/<pid>/fd ./foo --> ./foo /etc/sysconfig --> /etc/sysconfig ''' path = None name = None # First try to get the path from the AVC record, new kernel # versions put it there rather than in AVC_PATH path = self.avc_record.get_field('path') if path: path = path.strip('"') inodestr = self.avc_record.get_field("ino") if path is None: avc_path_record = self.audit_event.get_record_of_type('PATH') if avc_path_record: path = avc_path_record.get_field('name') if path is None: # No path field, so try and use the name field instead name = self.avc_record.get_field('name') if name is not None: # Use the class to be smart about formatting the name tclass = self.avc_record.get_field('tclass') if tclass == 'file': # file name is not a full path so make it appear relative path = '%s' % name elif tclass == 'dir': # directory component is not a full path so make it appear # relative, but only if it's not the root if name == '/': path = name else: path = '%s' % name else: # just use the bare name path = name if path is not None: if path == "/" and inodestr: matches = [] try: dev_rdev = 0 dev = self.avc_record.get_field('dev') if os.path.exists("/dev/"+dev): dev_rdev = os.lstat("/dev/"+dev).st_rdev ino = int(inodestr) fd=open("/proc/mounts", "r") for i in fd.read().split("\n"): x = i.split() if len(x) and x[1][0] == '/': try: if (dev_rdev == 0 or os.stat(x[0]).st_rdev == dev_rdev) and int(os.lstat(x[1]).st_ino) == ino: matches.append(x[:3]) except OSError: continue fd.close() if len(matches) == 1: path = matches[0][1] elif len(matches) > 1: for i in matches: if i[0] == ("/dev/%s" % dev) or i[2] == dev: path = i[1] break else: try: if dev_rdev != 0 and os.lstat(i[0]).st_rdev == dev_rdev: path = i[1] break except OSError: pass except TypeError: path = "unknown mountpoint" pass except OSError: path = "unknown mountpoint" pass else: if path.startswith("/") == False and inodestr: import subprocess command = ["locate", "-b", "\%s" % path] try: output = subprocess.check_output(command, stderr=subprocess.STDOUT) for file in output.split("\n"): try: if int(os.lstat(file).st_ino) == ino: path = file break except: pass except subprocess.CalledProcessError as e: pass if path is not None: if path.startswith('/'): # Fully qualified path # map /proc/1234/ to /proc/<pid>, replacing numeric pid with <pid> path = self.proc_pid_instance_re.sub(r'\1<pid>\3', path) else: # map pipe:[1234] to pipe, stripping out inode instance (e.g. [1234]) # applies to socket as well match = self.pipe_instance_path_re.search(path) if match: path = self.tclass self.tpath = self.decodehex(path) if self.tpath == '': self.tpath = path if self.tpath is None: if self.tclass == "filesystem": self.tpath = "" elif self.tclass == "udp_socket" or self.tclass == "tcp_socket": self.tpath = _("port %s") % self.port else: self.tpath = _("Unknown") def derive_avc_info_from_audit_event(self): self.tpath = None self.spath = None self.source = None self.a1 = None self.success = False self.syscall_paths = [] exe = comm = arch = syscall = None self.avc_record = self.audit_event.get_avc_record() syscall_record = self.audit_event.get_record_of_type('SYSCALL') self.access = self.avc_record.get_field('seperms') if not isinstance(self.scontext, AvcContext): self.scontext = AvcContext(self.avc_record.get_field('scontext')) if not isinstance(self.tcontext, AvcContext): self.tcontext = AvcContext(self.avc_record.get_field('tcontext')) self.tclass = self.avc_record.get_field('tclass') if self.avc_record.get_field('dest') is None: self.port = self.avc_record.get_field('src') else: self.port = self.avc_record.get_field('dest') self._set_tpath() self.kmod = self.avc_record.get_field('kmod') self.pid = self.avc_record.get_field('pid') # exe, cwd, name, path, key, dir, comm, ocomm, key_desc if syscall_record: exe = syscall_record.get_field('exe') try: exe.decode("hex") except: pass comm = syscall_record.get_field('comm') self.syscall = syscall_record.get_field('syscall') self.success = (syscall_record.get_field('success') == "yes") self.a1 = syscall_record.get_field('a1') if comm is None: comm = self.avc_record.get_field('comm') if exe is None: exe = self.avc_record.get_field('exe') try: self.spath = exe.decode("hex") except: self.spath = exe if comm: self.source = comm elif exe: self.source = self.spath if not self.spath: self.spath = self.source if not self.spath: self.spath = self.scontext.type cwd_record = self.audit_event.get_record_of_type('CWD') if cwd_record: cwd = cwd_record.get_field('cwd') else: cwd = None path_records = self.audit_event.get_records_of_type('PATH') for path_record in path_records: path = path_record.get_field('name') if os.path.isabs(path) or not cwd: self.syscall_paths.append(path) else: self.syscall_paths.append(os.path.join(cwd, path)) self.src_rpms=[] self.tgt_rpms=[] self.host = self.audit_event.event_id.host self.why, bools = audit2why.analyze(str(self.scontext), str(self.tcontext), str(self.tclass), self.access) if self.why == audit2why.ALLOW: raise ValueError(_("%s \n**** Invalid AVC allowed in current policy ***\n") % self.avc_record) if self.why == audit2why.DONTAUDIT: raise ValueError(_("%s \n**** Invalid AVC dontaudited in current policy. 'semodule -B' will turn on dontaudit rules. ***\n") % self.avc_record) if self.why == audit2why.NOPOLICY: raise ValueError(_("Must call policy_init first")) if self.why == audit2why.BADTCON: raise ValueError(_("%s \n**** Invalid AVC bad target context. ***\n") % self.avc_record) if self.why == audit2why.BADSCON: raise ValueError(_("%s \n**** Invalid AVC bad source context. ***\n") % self.avc_record) if self.why == audit2why.BADSCON: raise ValueError(_("%s \n**** Invalid AVC bad type class ***\n") % self.avc_record) if self.why == audit2why.BADPERM: raise ValueError(_("%s \n**** Invalid AVC bad permission ***\n") % self.avc_record) if self.why == audit2why.BADCOMPUTE: raise ValueError(_("Error during access vector computation")) if self.why == audit2why.BOOLEAN: self.bools = bools def derive_environmental_info(self): if self.query_environment: if self.spath: self.source_pkg = get_rpm_nvr_by_file_path(self.spath) if self.source_pkg: self.src_rpms.append(self.source_pkg) if self.tpath: rpm = get_rpm_nvr_by_file_path(self.tpath) if rpm: self.tgt_rpms.append(rpm) def set_alt_path(self, path): if self.tpath is None: self.tpath = path def set_template_substitutions(self, **kwds): for key, value in list(kwds.items()): if value: self.template_substitutions[key] = value def update_derived_template_substitutions(self): self.template_substitutions["SOURCE_TYPE"] = escape_html(self.scontext.type) self.template_substitutions["TARGET_TYPE"] = escape_html(self.tcontext.type) self.template_substitutions["SOURCE"] = escape_html(self.source) self.template_substitutions["SOURCE_PATH"] = escape_html(self.spath) if self.spath: self.template_substitutions["FIX_SOURCE_PATH"] = re.sub(" ",".",escape_html(self.spath)) self.template_substitutions["TARGET_PATH"] = escape_html(self.tpath) if self.tpath: self.template_substitutions["FIX_TARGET_PATH"] = re.sub(" ",".",escape_html(self.tpath)) if self.tpath is None: self.template_substitutions["TARGET_DIR"] = None else: if self.tclass == 'dir': self.template_substitutions["TARGET_DIR"] = escape_html(self.tpath) elif self.tclass == 'file': self.template_substitutions["TARGET_DIR"] = escape_html(os.path.dirname(self.tpath)) else: self.template_substitutions["TARGET_DIR"] = None self.template_substitutions["TARGET_CLASS"] = escape_html(self.tclass) if self.access is None: self.template_substitutions["ACCESS"] = None else: self.template_substitutions["ACCESS"] = escape_html(' '.join(self.access)) self.template_substitutions["SOURCE_PACKAGE"] = escape_html(self.source_pkg) self.template_substitutions["PORT_NUMBER"] = escape_html(self.port) def validate_template_substitutions(self): # validate, replace any None values with friendly string for key, value in list(self.template_substitutions.items()): if value is None: self.template_substitutions[key] = escape_html(default_text(value))
./CrossVul/dataset_final_sorted/CWE-77/py/good_5077_2
crossvul-python_data_bad_5860_0
# -*- coding: utf-8 -*- #Canto-curses - ncurses RSS reader # Copyright (C) 2014 Jack Miller <jack@codezen.org> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. from canto_next.hooks import on_hook from canto_next.plugins import Plugin from canto_next.remote import assign_to_dict, access_dict from .command import CommandHandler, register_commands, register_arg_types, unregister_all, _string, register_aliases, commands, command_help from .tagcore import tag_updater from .parser import prep_for_display from .config import needs_eval import logging log = logging.getLogger("COMMON") import subprocess import tempfile import urllib.request, urllib.error, urllib.parse import shlex import sys import os import os.path class BasePlugin(Plugin): pass class GuiBase(CommandHandler): def init(self): args = { "key": ("[key]: Simple keys (a), basic chords (C-r, M-a), or named whitespace like space or tab", _string), "command": ("[command]: Any canto-curses command. (Will show current binding if not given)\n Simple: goto\n Chained: foritems \\\\& goto \\\\& item-state read \\\\& clearitems \\\\& next-item", self.type_unescape_command), "remote-cmd": ("[remote cmd]", self.type_remote_cmd), "url" : ("[URL]", _string), "help-command" : ("[help-command]: Any canto-curses command, if blank, 'any' or unknown, will display help overview", self.type_help_cmd), "config-option" : ("[config-option]: Any canto-curses option", self.type_config_option), "executable" : ("[executable]: A program in your PATH", self.type_executable), } cmds = { "bind" : (self.cmd_bind, [ "key", "command" ], "Add or query %s keybinds" % self.get_opt_name()), "transform" : (self.cmd_transform, ["string"], "Set user transform"), "remote addfeed" : (lambda x : self.cmd_remote("addfeed", x), ["url"], "Subscribe to a feed"), "remote listfeeds" : (lambda : self.cmd_remote("listfeeds", ""), [], "List feeds"), "remote": (self.cmd_remote, ["remote-cmd", "string"], "Give a command to canto-remote"), "destroy": (self.cmd_destroy, [], "Destroy this %s" % self.get_opt_name()), "set" : (self.cmd_set, ["config-option", "string"], "Set configuration options"), "set browser.path" : (lambda x : self.cmd_set("browser.path", x), ["executable"], "Set desired browser"), } help_cmds = { "help" : (self.cmd_help, ["help-command"], "Get help on a specific command") } aliases = { "add" : "remote addfeed", "del" : "remote delfeed", "list" : "remote listfeeds", # Compatibility / evaluation aliases "set global_transform" : "set defaults.global_transform", "set keep_time" : "set defaults.keep_time", "set keep_unread" : "set defaults.keep_unread", "set browser " : "set browser.path ", "set txt_browser " : "set browser.text ", "set update.auto " : "set update.auto.enabled ", "set border" : "set taglist.border", "filter" : "transform", "sort" : "transform", "next-item" : "rel-set-cursor 1", "prev-item" : "rel-set-cursor -1", } register_arg_types(self, args) register_commands(self, cmds, "Base") register_commands(self, help_cmds, "Help") register_aliases(self, aliases) self.editor = None self.plugin_class = BasePlugin self.update_plugin_lookups() def cmd_destroy(self): self.callbacks["die"](self) def die(self): unregister_all(self) # Provide completions, but we don't care to verify settings. def type_executable(self): executables = [] for path_dir in os.environ["PATH"].split(os.pathsep): for f in os.listdir(path_dir): fullpath = os.path.join(path_dir, f) if os.path.isfile(fullpath) and os.access(fullpath, os.X_OK): executables.append(f) return (executables, lambda x : (True, x)) def _fork(self, path, href, text, fetch=False): # Prepare temporary files, if fetch. if fetch: # Get a path (sans query strings, etc.) for the URL tmppath = urllib.parse.urlparse(href).path # Return just the basename of the path (no directories) fname = os.path.basename(tmppath) # Grab a temporary directory. This allows us to create a file with # an unperturbed filename so scripts can freely use regex / # extension matching in addition to mimetype detection. tmpdir = tempfile.mkdtemp(prefix="canto-") tmpnam = tmpdir + '/' + fname on_hook("curses_exit", lambda : (os.unlink(tmpnam))) on_hook("curses_exit", lambda : (os.rmdir(tmpdir))) pid = os.fork() # Parents can now bail. if pid: return pid if fetch: tmp = open(tmpnam, 'w+b') # Grab the HTTP info / prepare to read. response = urllib.request.urlopen(href) # Grab in kilobyte chunks to avoid wasting memory on something # that's going to be immediately written to disk. while True: r = response.read(1024) if not r: break tmp.write(r) response.close() tmp.close() href = tmpnam # A lot of programs don't appreciate # having their fds closed, so instead # we dup them to /dev/null. fd = os.open("/dev/null", os.O_RDWR) os.dup2(fd, sys.stderr.fileno()) if not text: os.setpgid(os.getpid(), os.getpid()) os.dup2(fd, sys.stdout.fileno()) if "%u" in path: path = path.replace("%u", href) elif href: path = path + " " + href os.execv("/bin/sh", ["/bin/sh", "-c", path]) # Just in case. sys.exit(0) def _edit(self, text): if not self.editor: self.editor = os.getenv("EDITOR") if not self.editor: self.editor = self.input("editor: ") # No editor, or cancelled dialog, no change. if not self.editor: return text self.callbacks["pause_interface"]() # Setup tempfile to edit. fd, path = tempfile.mkstemp(text=True) f = os.fdopen(fd, "w") f.write(text) f.close() # Invoke editor logging.info("Invoking editor on %s" % path) pid = self._fork(self.editor + " %u", path, True) pid, status = os.waitpid(pid, 0) if status == 0: f = open(path, "r") r = f.read() f.close() else: self.callbacks["set_var"]("error_msg", "Editor failed! Status = %d" % (status,)) r = text # Cleanup temp file. os.unlink(path) self.callbacks["unpause_interface"]() return r def cmd_edit(self, **kwargs): t = self.callbacks["get_opt"](kwargs["opt"]) r = self._edit(t) log.info("Edited %s to %s" % (kwargs["opt"], r)) self.callbacks["set_opt"](kwargs["opt"], r) def type_remote_cmd(self): remote_cmds = [ "help", "addfeed", "listfeeds", "delfeed", "force-update", "config", "one-config", "export", "import", "kill" ] return (remote_cmds, lambda x : (x in remote_cmds, x)) def _remote_argv(self, argv): loc_args = self.callbacks["get_var"]("location") argv = [argv[0]] + loc_args + argv[1:] log.debug("Calling remote: %s" % argv) # check_output return bytes, we must decode. out = subprocess.check_output(argv).decode() log.debug("Output:") log.debug(out.rstrip()) # Strip anything that could be misconstrued as style # from remote output. out = out.replace("%","\\%") log.info(out.rstrip()) def _remote(self, args): args = "canto-remote " + args # Add location args, so the remote is connecting # to the correct daemon. self._remote_argv(shlex.split(args)) def remote_args(self, args): return self.string(args, "remote: ") def cmd_remote(self, remote_cmd, args): self._remote("%s %s" % (remote_cmd, args)) def _goto(self, urls, fetch=False): browser = self.callbacks["get_conf"]()["browser"] if not browser["path"]: log.error("No browser defined! Cannot goto.") return if browser["text"]: self.callbacks["pause_interface"]() for url in urls: pid = self._fork(browser["path"], url, browser["text"], fetch) if browser["text"]: os.waitpid(pid, 0) if browser["text"]: self.callbacks["unpause_interface"]() # Like goto, except download the file to /tmp before executing browser. def _fetch(self, urls): self._goto(urls, True) def cmd_transform(self, transform): tag_updater.transform("user", transform) tag_updater.reset(True) tag_updater.update() def type_unescape_command(self): def validate_uescape_command(x): # Change the escaped '&' from shlex into a raw & return (True, x.replace(" '&' ", " & ")) return (None, validate_uescape_command) def cmd_bind(self, key, cmd): self.bind(key, cmd.lstrip().rstrip(), True) def bind(self, key, cmd, overwrite=False): opt = self.get_opt_name() key = self.translate_key(key) c = self.callbacks["get_conf"]() if not cmd: if key in c[opt]["key"]: log.info("[%s] %s = %s" % (opt, key, c[opt]["key"][key])) return True else: return False else: if key in c[opt]["key"] and c[opt]["key"][key] and not overwrite: log.debug("%s already bound to %s" % (key, c[opt]["key"][key])) return False log.debug("Binding %s.%s to %s" % (opt, key, cmd)) c[opt]["key"][key] = cmd self.callbacks["set_conf"](c) return True def type_help_cmd(self): help_cmds = commands() def help_validator(x): if x in ["commands", "cmds"]: return (True, 'commands') for group in help_cmds: if x in help_cmds[group]: return (True, x) return (True, 'all') return (help_cmds, help_validator) def cmd_help(self, cmd): if self.callbacks["get_var"]("info_msg"): self.callbacks["set_var"]("info_msg", "") if cmd == 'all': log.info("%BHELP%b\n") log.info("This is a list of available keybinds.\n") log.info("For a list of commands, type ':help commands'\n") log.info("For help with a specific command, type ':help [command]'\n") log.info("%BBinds%b") config = self.callbacks["get_conf"]() for optname in [ "main", "taglist", "reader" ]: if "key" in config[optname] and list(config[optname]["key"].keys()) != []: maxbindl = max([ len(x) for x in config[optname]["key"].keys() ]) + 1 log.info("\n%B" + optname + "%b\n") for bind in sorted(config[optname]["key"]): bindeff = prep_for_display(bind + (" " * (maxbindl - len(bind)))) cmd = prep_for_display(config[optname]["key"][bind]) log.info("%s %s" % (bindeff, cmd)) elif cmd == 'commands': gc = commands() for group in sorted(gc.keys()): log.info("%B" + group + "%b\n") for c in sorted(gc[group]): log.info(command_help(c)) log.info("") else: log.info(command_help(cmd, True)) # Validate a single config option # Will offer completions for any recognized config option # Will *not* reject validly formatted options that don't already exist def _get_current_config_options(self, obj, stack): r = [] for item in obj.keys(): stack.append(item) if type(obj[item]) == dict: r.extend(self._get_current_config_options(obj[item], stack[:])) else: r.append(shlex.quote(".".join(stack))) stack = stack[:-1] return r def type_config_option(self): conf = self.callbacks["get_conf"]() possibles = self._get_current_config_options(conf, []) possibles.sort() return (possibles, lambda x : (True, x)) def cmd_set(self, opt, val): log.debug("SET: %s '%s'" % (opt, val)) evaluate = needs_eval(opt) if val != "" and evaluate: log.debug("Evaluating...") try: val = eval(val) except Exception as e: log.error("Couldn't eval '%s': %s" % (val, e)) return if opt.startswith("defaults."): conf = { "defaults" : self.callbacks["get_defaults"]() } if val != "": assign_to_dict(conf, opt, val) self.callbacks["set_defaults"](conf["defaults"]) elif opt.startswith("feed."): sel = self.callbacks["get_var"]("selected") if not sel: log.info("Feed settings only work with a selected item") return if sel.is_tag: try_tag = sel else: try_tag = sel.parent_tag if not try_tag.tag.startswith("maintag:"): log.info("Selection is in a user tag, cannot set feed settings") return name = try_tag.tag[8:] conf = { "feed" : self.callbacks["get_feed_conf"](name) } if val != "": assign_to_dict(conf, opt, val) self.callbacks["set_feed_conf"](name, conf["feed"]) elif opt.startswith("tag."): sel = self.callbacks["get_var"]("selected") if not sel: log.info("Tag settings only work with a selected item") return if sel.is_tag: tag = sel else: tag = sel.parent_tag conf = { "tag" : self.callbacks["get_tag_conf"](tag.tag) } if val != "": assign_to_dict(conf, opt, val) self.callbacks["set_tag_conf"](tag.tag, conf["tag"]) else: conf = self.callbacks["get_conf"]() if val != "": assign_to_dict(conf, opt, val) self.callbacks["set_conf"](conf) ok, val = access_dict(conf, opt) if not ok: log.error("Unknown option %s" % opt) log.error("Full conf: %s" % conf) else: log.info("%s = %s" % (opt, val))
./CrossVul/dataset_final_sorted/CWE-77/py/bad_5860_0
crossvul-python_data_good_5044_0
# # Copyright (C) 2006-2010 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # import gettext translation=gettext.translation('setroubleshoot-plugins', fallback=True) _=translation.gettext from setroubleshoot.util import * from setroubleshoot.Plugin import Plugin import subprocess import sys def is_execstack(path): if path[0] != "/": return False x = subprocess.check_output(["execstack", "-q", path], universal_newlines=True).split() return ( x[0] == "X" ) def find_execstack(exe, pid): execstacklist = [] for path in subprocess.check_output(["ldd", exe], universal_newlines=True).split(): if is_execstack(path) and path not in execstacklist: execstacklist.append(path) try: fd = open("/proc/%s/maps" % pid , "r") for rec in fd.readlines(): for path in rec.split(): if is_execstack(path) and path not in execstacklist: execstacklist.append(path) except IOError: pass return execstacklist class plugin(Plugin): summary =_(''' SELinux is preventing $SOURCE_PATH from making the program stack executable. ''') problem_description = _(''' The $SOURCE application attempted to make its stack executable. This is a potential security problem. This should never ever be necessary. Stack memory is not executable on most OSes these days and this will not change. Executable stack memory is one of the biggest security problems. An execstack error might in fact be most likely raised by malicious code. Applications are sometimes coded incorrectly and request this permission. The <a href="http://people.redhat.com/drepper/selinux-mem.html">SELinux Memory Protection Tests</a> web page explains how to remove this requirement. If $SOURCE does not work and you need it to work, you can configure SELinux temporarily to allow this access until the application is fixed. Please file a bug report. ''') fix_description = _(''' Sometimes a library is accidentally marked with the execstack flag, if you find a library with this flag you can clear it with the execstack -c LIBRARY_PATH. Then retry your application. If the app continues to not work, you can turn the flag back on with execstack -s LIBRARY_PATH. ''') fix_cmd = "" if_text = _("you do not think $SOURCE_PATH should need to map stack memory that is both writable and executable.") then_text = _("you need to report a bug. \nThis is a potentially dangerous access.") do_text = _("Contact your security administrator and report this issue.") def get_if_text(self, avc, args): try: path = args[0] if not path: return self.if_text return _("you believe that \n%s\nshould not require execstack") % path except: return self.if_text def get_then_text(self, avc, args): try: path = args[0] if not path: return self.then_text return _("you should clear the execstack flag and see if $SOURCE_PATH works correctly.\nReport this as a bug on %s.\nYou can clear the exestack flag by executing:") % path except: return self.then_text def get_do_text(self, avc, args): try: path = args[0] if not path: return self.do_text return _("execstack -c %s") % path except: return self.do_text def __init__(self): Plugin.__init__(self,__name__) def analyze(self, avc): if (avc.matches_source_types(['unconfined_t', 'staff_t', 'user_t', 'guest_t', 'xguest_t']) and avc.has_any_access_in(['execstack'])): reports = [] for i in find_execstack(avc.spath, avc.pid): reports.append(self.report((i,avc))) if len(reports) > 0: return reports return self.report((None,None)) else: return None
./CrossVul/dataset_final_sorted/CWE-77/py/good_5044_0
crossvul-python_data_bad_5077_3
# Authors: John Dennis <jdennis@redhat.com> # Thomas Liu <tliu@redhat.com> # Dan Walsh <dwalsh@redhat.com> # # Copyright (C) 2006-2010 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # import syslog from subprocess import * import setroubleshoot.default_encoding_utf8 import gettext translation=gettext.translation('setroubleshoot-plugins', fallback=True) _=translation.ugettext __all__ = [ 'SignatureMatch', 'SEFilter', 'SEFaultSignature', 'SEFaultSignatureInfo', 'SEFaultSignatureSet', 'SEFaultSignatureUser', 'SEEnvironment', 'SEDatabaseProperties', 'SEFaultUserInfo', 'SEFaultUserSet', 'SEPlugin', 'SEEmailRecipient', 'SEEmailRecipientSet', 'FILTER_NEVER', 'FILTER_ALWAYS', 'FILTER_AFTER_FIRST', 'filter_text' ] if __name__ == "__main__": import gettext from setroubleshoot.config import parse_config_setting, get_config gettext.install(domain = get_config('general', 'i18n_text_domain'), localedir = get_config('general', 'i18n_locale_dir')) from gettext import ngettext as P_ from setroubleshoot.config import get_config from setroubleshoot.errcode import * from setroubleshoot.util import * from setroubleshoot.xml_serialize import * from setroubleshoot.html_util import * import setroubleshoot.uuid as uuid from setroubleshoot.audit_data import * import hashlib from types import * from string import Template import re, os # Don't reuse the numeric values! FILTER_NEVER = 0 FILTER_ALWAYS = 4 FILTER_AFTER_FIRST = 8 filter_text = { FILTER_NEVER : _("Never Ignore"), FILTER_ALWAYS : _("Ignore Always"), FILTER_AFTER_FIRST : _("Ignore After First Alert"), } map_filter_value_to_name = { FILTER_NEVER : 'never', FILTER_ALWAYS : 'always', FILTER_AFTER_FIRST : 'after_first', } map_filter_name_to_value = { 'never' : FILTER_NEVER, 'always' : FILTER_ALWAYS, 'after_first' : FILTER_AFTER_FIRST, } #------------------------------------------------------------------------ class SignatureMatch(object): def __init__(self, siginfo, score): self.siginfo = siginfo self.score = score class SEEnvironment(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '1.0' }, 'platform' : {'XMLForm':'element' }, 'kernel' : {'XMLForm':'element' }, 'policy_type' : {'XMLForm':'element' }, 'policy_rpm' : {'XMLForm':'element' }, 'enforce' : {'XMLForm':'element' }, 'selinux_enabled' : {'XMLForm':'element', 'import_typecast':boolean, }, 'selinux_mls_enabled' : {'XMLForm':'element', 'import_typecast':boolean, }, 'policyvers' : {'XMLForm':'element' }, 'hostname' : {'XMLForm':'element' }, 'uname' : {'XMLForm':'element' }, } def __init__(self): super(SEEnvironment, self).__init__() self.update() def update(self): import platform import selinux # security_getenforce is the same as the getenforce command. # selinux_getenforcemode tells you what is set in /etc/selinux/config self.platform, self.kernel = get_os_environment() self.policy_type = selinux.selinux_getpolicytype()[1] self.policy_rpm = get_rpm_nvr_by_name("selinux-policy") self.policyvers = str(selinux.security_policyvers()) enforce = selinux.security_getenforce() if enforce == 0: self.enforce = "Permissive" else: self.enforce = "Enforcing" self.selinux_enabled = bool(selinux.is_selinux_enabled()) self.selinux_mls_enabled = bool(selinux.is_selinux_mls_enabled()) self.hostname = platform.node() self.uname = " ".join(platform.uname()) def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): for name in self._xml_info.keys(): if getattr(self, name) != getattr(other, name): return False return True class SEFilter(XmlSerialize): _xml_info = { 'filter_type' : {'XMLForm':'element', 'import_typecast':int, 'default':lambda: FILTER_NEVER }, 'count' : {'XMLForm':'element', 'import_typecast':int, 'default':lambda: 0 }, } def __init__(self, filter_type=FILTER_NEVER): super(SEFilter, self).__init__() self.filter_type = filter_type class SEFaultSignatureUser(XmlSerialize): _xml_info = { 'username' : {'XMLForm':'attribute' }, 'seen_flag' : {'XMLForm':'attribute', 'import_typecast':boolean, 'default': lambda: False }, 'delete_flag' : {'XMLForm':'attribute', 'import_typecast':boolean, 'default': lambda: False }, 'filter' : {'XMLForm':'element', 'import_typecast':SEFilter, 'default': lambda: SEFilter() }, } def __init__(self, username): super(SEFaultSignatureUser, self).__init__() self.username = username def update_item(self, item, data): if not item in self._names: raise ProgramError(ERR_NOT_MEMBER, 'item (%s) is not a defined member' % item) if item == 'username': raise ProgramError(ERR_ILLEGAL_USER_CHANGE, 'changing the username is illegal') setattr(self, item, data) def update_filter(self, filter_type, data=None): log_debug("update_filter: filter_type=%s data=%s" % (map_filter_value_to_name.get(filter_type, 'unknown'), data)) if filter_type == FILTER_NEVER or \ filter_type == FILTER_AFTER_FIRST or \ filter_type == FILTER_ALWAYS: log_debug("update_filter: !!!") self.filter = SEFilter(filter_type=filter_type) return True else: raise ValueError("Bad filter_type (%s)" % filter_type) class_dict = {} class_dict['dir'] = _("directory") class_dict['sem'] = _("semaphore") class_dict['shm'] = _("shared memory") class_dict['msgq'] = _("message queue") class_dict['msg'] = _("message") class_dict['file'] = _("file") class_dict['socket'] = _("socket") class_dict['process'] = _("process") class_dict['filesystem'] = _("filesystem") class_dict['node'] = _("node") class_dict['capability'] = _("capability") def translate_class(tclass): if tclass in class_dict.keys(): return class_dict[tclass] return tclass # -- class AttributeValueDictionary(XmlSerialize): _xml_info = 'unstructured' def __init__(self): super(AttributeValueDictionary, self).__init__() class SEFaultSignature(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '4.0', }, 'host' : {'XMLForm':'element', }, 'access' : {'XMLForm':'element', 'list':'operation', }, 'scontext' : {'XMLForm':'element', 'import_typecast':AvcContext }, 'tcontext' : {'XMLForm':'element', 'import_typecast':AvcContext }, 'tclass' : {'XMLForm':'element', }, 'port' : {'XMLForm':'element', 'import_typecast':int, }, } def __init__(self, **kwds): super(SEFaultSignature, self).__init__() for k,v in kwds.items(): setattr(self, k, v) class SEPlugin(XmlSerialize): _xml_info = { 'analysis_id' : {'XMLForm':'element'}, 'args' : {'XMLForm':'element', 'list':'arg', }, } def __init__(self, analysis_id, args): super(SEPlugin, self).__init__() self.analysis_id = analysis_id; self.args = args; def __str__(self): return str((self.analysis_id, self.args)) class SEFaultSignatureInfo(XmlSerialize): _xml_info = { 'plugin_list' : {'XMLForm':'element', 'list':'plugin', 'import_typecast':SEPlugin }, 'audit_event' : {'XMLForm':'element', 'import_typecast':AuditEvent }, 'source' : {'XMLForm':'element' }, 'spath' : {'XMLForm':'element' }, 'tpath' : {'XMLForm':'element' }, 'src_rpm_list' : {'XMLForm':'element', 'list':'rpm', }, 'tgt_rpm_list' : {'XMLForm':'element', 'list':'rpm', }, 'scontext' : {'XMLForm':'element', 'import_typecast':AvcContext }, 'tcontext' : {'XMLForm':'element', 'import_typecast':AvcContext }, 'tclass' : {'XMLForm':'element', }, 'port' : {'XMLForm':'element', 'import_typecast':int, }, 'sig' : {'XMLForm':'element', 'import_typecast':SEFaultSignature }, 'if_text' : {'XMLForm':'element' }, 'then_text' : {'XMLForm':'element' }, 'do_text' : {'XMLForm':'element' }, 'environment' : {'XMLForm':'element', 'import_typecast':SEEnvironment }, 'first_seen_date' : {'XMLForm':'element', 'import_typecast':TimeStamp }, 'last_seen_date' : {'XMLForm':'element', 'import_typecast':TimeStamp }, 'report_count' : {'XMLForm':'element', 'import_typecast':int, 'default':lambda: 0 }, 'local_id' : {'XMLForm':'element' }, 'users' : {'XMLForm':'element', 'list':'user', 'import_typecast':SEFaultSignatureUser, }, 'level' : {'XMLForm':'element' }, 'fixable' : {'XMLForm':'element' }, 'button_text' : {'XMLForm':'element' }, } merge_include = ['audit_event', 'tpath', 'src_rpm_list', 'tgt_rpm_list', 'scontext', 'tcontext', 'tclass', 'port', 'environment', 'last_seen_date' ] def __init__(self, **kwds): super(SEFaultSignatureInfo, self).__init__() for k,v in kwds.items(): setattr(self, k, v) self.report_count = 1 self.plugin_list = [] def update_merge(self, siginfo): if siginfo.last_seen_date != self.last_seen_date: self.last_seen_date = siginfo.last_seen_date self.report_count += 1 for name in self.merge_include: setattr(self, name, getattr(siginfo, name)) # older databases can have an uninitialized level if self.level is None: self.level = siginfo.level def get_policy_rpm(self): return self.environment.policy_rpm; def get_hash_str(self): return "%s,%s,%s,%s,%s" % (self.source, self.scontext.type, self.tcontext.type, self.tclass, ",".join(self.sig.access)) def get_hash(self): hash = hashlib.sha256(self.get_hash_str()) return hash.hexdigest() def get_user_data(self, username): for user in self.users: if user.username == username: return user log_debug("new SEFaultSignatureUser for %s" % username) user = SEFaultSignatureUser(username) self.users.append(user) return user def find_filter_by_username(self, username): log_debug("find_filter_by_username %s" % username) filter = None user_data = self.get_user_data(username) if user_data is not None: filter = user_data.filter return filter def update_user_filter(self, username, filter_type, data=None): user_data = self.get_user_data(username) user_data.update_filter(filter_type, data) def evaluate_filter_for_user(self, username, filter_type=None): action = 'display' f = self.find_filter_by_username(username) log_debug("evaluate_filter_for_user: found %s user's filter = %s" % (username, f)) if f is not None: if filter_type is not None: f.filter_type = filter_type action = self.evaluate_filter(f) log_debug("evaluate_filter_for_user: found filter for %s: %s\n%s" % (username, action, f)) return action def evaluate_filter(self, filter): filter_type = filter.filter_type action = 'display' if filter_type == FILTER_NEVER: action = 'display' elif filter_type == FILTER_AFTER_FIRST: if filter.count == 0: action = 'display' else: action = 'ignore' elif filter_type == FILTER_ALWAYS: action = 'ignore' else: raise ValueError("unknown filter_type (%s)" % (filter_type)) filter.count += 1 return action def format_rpm_list(self, rpm_list): if isinstance(rpm_list, list): if len(rpm_list) > 0: return " ".join(rpm_list) else: return "" else: return default_text(None) def format_target_object(self): return "%s [ %s ]" % (self.tpath, self.tclass) def description_adjusted_for_permissive(self): permissive_msg = None syscall_record = self.audit_event.get_record_of_type('SYSCALL') if syscall_record != None and syscall_record.get_field('success') == 'yes': permissive_msg = _("%s has a permissive type (%s). This access was not denied.") % (self.source, self.scontext.type) if self.environment.enforce == "Permissive": permissive_msg = _("SELinux is in permissive mode. This access was not denied.") def update_derived_template_substitutions(self): self.template_substitutions = {} self.template_substitutions["SOURCE_TYPE"] = self.scontext.type self.template_substitutions["TARGET_TYPE"] = self.tcontext.type self.template_substitutions["SOURCE"] = self.source self.template_substitutions["SOURCE_PATH"] = self.spath self.template_substitutions["SOURCE_BASE_PATH"] = os.path.basename(self.spath) self.template_substitutions["MODULE_NAME"] = re.sub('[^a-zA-Z0-9]', '', self.source) if self.spath: self.template_substitutions["FIX_SOURCE_PATH"] = re.sub(" ",".",self.spath) else: self.spath = _("N/A") self.template_substitutions["TARGET_PATH"] = self.tpath self.template_substitutions["TARGET_BASE_PATH"] = os.path.basename(self.tpath) if self.tpath: self.template_substitutions["FIX_TARGET_PATH"] = re.sub(" ",".",self.tpath) if self.tpath is None: self.template_substitutions["TARGET_DIR"] = None else: if self.tclass == 'dir': self.template_substitutions["TARGET_DIR"] = self.tpath elif self.tclass == 'file': self.template_substitutions["TARGET_DIR"] = os.path.dirname(self.tpath) else: self.template_substitutions["TARGET_DIR"] = None if self.tclass == "dir": self.template_substitutions["TARGET_CLASS"] = "directory" else: self.template_substitutions["TARGET_CLASS"] = self.tclass if self.sig.access is None: self.template_substitutions["ACCESS"] = None else: self.template_substitutions["ACCESS"] = ' '.join(self.sig.access) if len(self.src_rpm_list) > 0: self.template_substitutions["SOURCE_PACKAGE"] = self.src_rpm_list[0] self.template_substitutions["PORT_NUMBER"] = self.port # validate, replace any None values with friendly string for key, value in self.template_substitutions.items(): if value is None: self.template_substitutions[key] = default_text(value) def priority_sort(self, x, y): return cmp(y[0].priority,x[0].priority) def summary(self): if self.tclass == "process": return P_(_("SELinux is preventing %s from using the %s access on a process."), _("SELinux is preventing %s from using the '%s' accesses on a process."), len(self.sig.access)) % (self.spath, ", ".join(self.sig.access)) if self.tclass == "capability": return P_(_("SELinux is preventing %s from using the %s capability."), _("SELinux is preventing %s from using the '%s' capabilities."), len(self.sig.access)) % (self.spath, ", ".join(self.sig.access)) if self.tpath == "(null)": return P_(_("SELinux is preventing %s from %s access on the %s labeled %s."), _("SELinux is preventing %s from '%s' accesses on the %s labeled %s."), len(self.sig.access)) % (self.spath, ", ".join(self.sig.access), translate_class(self.tclass), self.tcontext.type) return P_(_("SELinux is preventing %s from %s access on the %s %s."), _("SELinux is preventing %s from '%s' accesses on the %s %s."), len(self.sig.access)) % (self.spath, ", ".join(self.sig.access), translate_class(self.tclass), self.tpath) def get_plugins(self, all = False): self.plugins = load_plugins() plugins = [] total_priority = 0 if all: for p in self.plugins: total_priority += p.priority plugins.append((p, ("allow_ypbind", "1"))) else: for solution in self.plugin_list: for p in self.plugins: if solution.analysis_id == p.analysis_id: total_priority += p.priority plugins.append((p, tuple(solution.args))) break plugins.sort(self.priority_sort) return total_priority, plugins def substitute(self, txt): return Template(txt).safe_substitute(self.template_substitutions) def substitute_array(self, args): return [self.substitute(txt) for txt in args] def format_details(self, replace=False): env = self.environment text = _("Additional Information:\n") text += format_2_column_name_value(_("Source Context"), self.scontext.format()) text += format_2_column_name_value(_("Target Context"), self.tcontext.format()) text += format_2_column_name_value(_("Target Objects"), self.format_target_object()) text += format_2_column_name_value(_("Source"), default_text(self.source)) text += format_2_column_name_value(_("Source Path"), default_text(self.spath)) text += format_2_column_name_value(_("Port"), default_text(self.port)) if (replace): text += format_2_column_name_value(_("Host"), "(removed)") else: text += format_2_column_name_value(_("Host"), default_text(self.sig.host)) text += format_2_column_name_value(_("Source RPM Packages"), default_text(self.format_rpm_list(self.src_rpm_list))) text += format_2_column_name_value(_("Target RPM Packages"), default_text(self.format_rpm_list(self.tgt_rpm_list))) text += format_2_column_name_value(_("Policy RPM"), default_text(env.policy_rpm)) text += format_2_column_name_value(_("Selinux Enabled"), default_text(env.selinux_enabled)) text += format_2_column_name_value(_("Policy Type"), default_text(env.policy_type)) text += format_2_column_name_value(_("Enforcing Mode"), default_text(env.enforce)) if replace: text += format_2_column_name_value(_("Host Name"),"(removed)") else: text += format_2_column_name_value(_("Host Name"), default_text(env.hostname)) if replace: uname = env.uname.split() uname[1] = "(removed)" text += format_2_column_name_value(_("Platform"), default_text(" ".join(uname))) else: text += format_2_column_name_value(_("Platform"), default_text(env.uname)) text += format_2_column_name_value(_("Alert Count"), default_text(self.report_count)) date_format = "%Y-%m-%d %H:%M:%S %Z" text += format_2_column_name_value(_("First Seen"), self.first_seen_date.format(date_format)) text += format_2_column_name_value(_("Last Seen"), self.last_seen_date.format(date_format)) text += format_2_column_name_value(_("Local ID"), default_text(self.local_id)) text += '\n' + _("Raw Audit Messages") avcbuf = "" for audit_record in self.audit_event.records: if audit_record.record_type == 'AVC': avcbuf += "\n" + audit_record.to_text() + "\n" else: avcbuf += "\ntype=%s msg=%s: " % (audit_record.record_type, audit_record.event_id) avcbuf += ' '.join(["%s=%s" % (k, audit_record.fields[k]) for k in audit_record.fields_ord]) +"\n" avcbuf += "\nHash: " + self.get_hash_str() try: audit2allow = "/usr/bin/audit2allow" if os.path.exist(audit2allow): newbuf = "\n\naudit2allow" p = Popen([audit2allow], shell=True,stdin=PIPE, stdout=PIPE) newbuf += p.communicate(avcbuf)[0] if os.path.exists("/var/lib/sepolgen/interface_info"): newbuf += "\naudit2allow -R" p = Popen(["%s -R" % audit2allow ], shell=True,stdin=PIPE, stdout=PIPE) newbuf += p.communicate(avcbuf)[0] avcbuf += newbuf except: pass text += avcbuf + '\n' return text def untranslated(self, func, *args, **kwargs): r'define.*untranslated\(.*\n' # Call the parameter function with the translations turned off # This function is not thread safe, since it manipulates globals global P_, _ saved_translateP_ = P_ saved_translate_ = _ try: P_ = lambda x,y,z: x if z > 1 else y _ = lambda x:x return func(*args, **kwargs) finally: P_ = saved_translateP_ _ = saved_translate_ def format_text(self, all = False, replace = False): self.update_derived_template_substitutions() text = self.summary() total_priority, plugins = self.get_plugins(all) for p, args in plugins: title = _("\n\n***** Plugin %s (%.4s confidence) suggests ") % (p.analysis_id, ((float(p.priority) / float(total_priority)) * 100 + .5)) text += title for i in range(len(title),80): text += _("*") text += _("\n") txt = self.substitute(p.get_if_text(self.audit_event.records, args)).decode('utf-8') text += _("\nIf ") + txt[0].lower() + txt[1:] txt = self.substitute(p.get_then_text(self.audit_event.records, args)).decode('utf-8') text += _("\nThen ") + txt[0].lower() + txt[1:] txt = self.substitute(p.get_do_text(self.audit_event.records, args)).decode('utf-8') text += _("\nDo\n") + txt[0].lower() + txt[1:] text += _('\n\n') return text class SEFaultUserInfo(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '1.0' }, 'username' : {'XMLForm':'attribute' }, 'email_alert' : {'XMLForm':'element', 'import_typecast':boolean, 'default': lambda: False }, 'email_address_list' : {'XMLForm':'element', 'list':'email_address', }, } def __init__(self, username): super(SEFaultUserInfo, self).__init__() self.username = username def add_email_address(self, email_address): if not email_address in self.email_address_list: self.email_address_list.append(email_address) class SEFaultUserSet(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '1.0' }, 'user_list' : {'XMLForm':'element', 'list':'user', 'import_typecast':SEFaultUserInfo, }, } def __init__(self): super(SEFaultUserSet, self).__init__() def get_user(self, username): for user in self.user_list: if username == user.username: return user return None def add_user(self, username): if self.get_user(username) is not None: return user = SEFaultUserInfo(username) self.user_list.append(user) return user class SEFaultSignatureSet(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '%d.%d' % (DATABASE_MAJOR_VERSION, DATABASE_MINOR_VERSION)}, 'users' : {'XMLForm':'element', 'import_typecast':SEFaultUserSet, 'default': lambda: SEFaultUserSet() }, 'signature_list' : {'XMLForm':'element', 'list':'siginfo', 'import_typecast':SEFaultSignatureInfo, }, } def __init__(self): super(SEFaultSignatureSet, self).__init__() def siginfos(self): for siginfo in self.signature_list: yield siginfo def add_siginfo(self, siginfo): self.signature_list.append(siginfo) return siginfo def remove_siginfo(self, siginfo): self.signature_list.remove(siginfo) def clear(self): self.signature_list = [] def generate_local_id(self): return str(uuid.uuid4()) def lookup_local_id(self, local_id): if local_id is None: return None for siginfo in self.signature_list: if siginfo.local_id == local_id: return siginfo return None def match_signatures(self, pat, criteria='exact', xml_info=SEFaultSignature._xml_info): match_targets = xml_info.keys() exact = False if criteria == 'exact': exact = True elif type(criteria) is FloatType: num_match_targets = len(match_targets) score_per_match_target = 1.0 / num_match_targets else: raise ValueError("unknown criteria = %s" % criteria) matches = [] for siginfo in self.signature_list: score = 0.0 sig = siginfo.sig for name in match_targets: if getattr(pat, name) == getattr(sig, name): if exact: score = 1.0 else: score += score_per_match_target else: if exact: score = 0.0 break if exact: if score == 1.0: matches.append(SignatureMatch(siginfo, score)) else: if score >= criteria: matches.append(SignatureMatch(siginfo, score)) matches.sort((lambda a,b: cmp(b.score, a.score))) return matches class SEDatabaseProperties(XmlSerialize): _xml_info = { 'name' : {'XMLForm':'element' }, 'friendly_name' : {'XMLForm':'element' }, 'filepath' : {'XMLForm':'element' }, } def __init__(self, name=None, friendly_name=None, filepath=None): super(SEDatabaseProperties, self).__init__() if name is not None: self.name = name if friendly_name is not None: self.friendly_name = friendly_name if filepath is not None: self.filepath = filepath class SEEmailRecipient(XmlSerialize): _xml_info = { 'address' : {'XMLForm':'element' }, 'filter_type' : {'XMLForm':'element', 'import_typecast':int, 'default':lambda: FILTER_AFTER_FIRST }, } def __init__(self, address, filter_type=None): super(SEEmailRecipient, self).__init__() self.address = address if filter_type is not None: self.filter_type = filter_type def __str__(self): return "%s:%s" % (self.address, map_filter_value_to_name.get(self.filter_type, 'unknown')) class SEEmailRecipientSet(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '1' }, 'recipient_list' : {'XMLForm':'element', 'list':'recipient', 'import_typecast':SEEmailRecipient, }, } def __init__(self, recipient_list=None): super(SEEmailRecipientSet, self).__init__() if recipient_list is not None: self.recipient_list = recipient_list def __str__(self): return ','.join([str(x) for x in self.recipient_list]) def find_address(self, address): address = address.strip() for recipient in self.recipient_list: if address == recipient.address: return recipient return None def add_address(self, address, filter_type=FILTER_AFTER_FIRST): address = address.strip() if not valid_email_address(address): raise ProgramError(ERR_INVALID_EMAIL_ADDR, detail="address='%s'" % address) return recipient = self.find_address(address) if recipient is not None: return self.recipient_list.append(SEEmailRecipient(address, filter_type)) def clear_recipient_list(self): self.recipient_list = [] def parse_recipient_file(self, filepath): import re comment_re = re.compile('#.*') entry_re = re.compile('(\S+)(\s+(.+))?') key_value_re = re.compile("(\w+)\s*=\s*(\S+)") map_boolean = {'enabled' : True, 'true' : True, 'yes' : True, 'on' : True, 'disabled' : False, 'false' : False, 'no' : False, 'off' : False, } try: f = open(filepath) except IOError, e: raise ProgramError(ERR_FILE_OPEN, detail="%s, %s" % (filepath, e.strerror)) self.clear_recipient_list() for line in f.readlines(): line = comment_re.sub('', line) line = line.strip() if line: match = entry_re.search(line) if match: address = match.group(1) options = match.group(3) filter_type = None if options: for match in key_value_re.finditer(options): option = match.group(1) value = match.group(2) if option == 'filter_type': filter_type = map_filter_name_to_value.get(value.lower(), None) if filter_type is None: log_debug("unknown email filter (%s) for address %s" % (option, address)) else: log_debug("unknown email option (%s) for address %s" % (option, address)) try: self.add_address(address, filter_type) except ProgramError, e: if e.errno == ERR_INVALID_EMAIL_ADDR: log_debug(e.strerror) else: raise e f.close() def write_recipient_file(self, filepath): try: f = open(filepath, 'w') except IOError, e: raise ProgramError(ERR_FILE_OPEN, detail="%s, %s" % (filepath, e.strerror)) for recipient in self.recipient_list: filter_type = map_filter_value_to_name[recipient.filter_type] f.write("%-40s filter_type=%s\n" % (recipient.address, filter_type)) f.close() #------------------------------------------------------------------------ if __name__ == '__main__': import libxml2 #memory debug specific libxml2.debugMemory(1) xml_file = 'audit_listener_database.xml' sigs = SEFaultSignatureSet() sigs.read_xml_file(xml_file, 'sigs') siginfo = sigs.signature_list[0] record = siginfo.audit_event.records[0] print record.record_type print "siginfo.audit_event=%s" % siginfo.audit_event print sigs #memory debug specific libxml2.cleanupParser() if libxml2.debugMemory(1) == 0: print "Memory OK" else: print "Memory leak %d bytes" % (libxml2.debugMemory(1)) libxml2.dumpMemory()
./CrossVul/dataset_final_sorted/CWE-77/py/bad_5077_3
crossvul-python_data_good_5043_1
# Authors: John Dennis <jdennis@redhat.com> # Thomas Liu <tliu@redhat.com> # Dan Walsh <dwalsh@redhat.com> # # Copyright (C) 2006-2010 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # import syslog from subprocess import * import setroubleshoot.default_encoding_utf8 import gettext translation=gettext.translation('setroubleshoot-plugins', fallback=True) _=translation.ugettext __all__ = [ 'SignatureMatch', 'SEFilter', 'SEFaultSignature', 'SEFaultSignatureInfo', 'SEFaultSignatureSet', 'SEFaultSignatureUser', 'SEEnvironment', 'SEDatabaseProperties', 'SEFaultUserInfo', 'SEFaultUserSet', 'SEPlugin', 'SEEmailRecipient', 'SEEmailRecipientSet', 'FILTER_NEVER', 'FILTER_ALWAYS', 'FILTER_AFTER_FIRST', 'filter_text' ] if __name__ == "__main__": import gettext from setroubleshoot.config import parse_config_setting, get_config gettext.install(domain = get_config('general', 'i18n_text_domain'), localedir = get_config('general', 'i18n_locale_dir')) from gettext import ngettext as P_ from setroubleshoot.config import get_config from setroubleshoot.errcode import * from setroubleshoot.util import * from setroubleshoot.xml_serialize import * from setroubleshoot.html_util import * import setroubleshoot.uuid as uuid from setroubleshoot.audit_data import * import hashlib from types import * from string import Template import re, os # Don't reuse the numeric values! FILTER_NEVER = 0 FILTER_ALWAYS = 4 FILTER_AFTER_FIRST = 8 filter_text = { FILTER_NEVER : _("Never Ignore"), FILTER_ALWAYS : _("Ignore Always"), FILTER_AFTER_FIRST : _("Ignore After First Alert"), } map_filter_value_to_name = { FILTER_NEVER : 'never', FILTER_ALWAYS : 'always', FILTER_AFTER_FIRST : 'after_first', } map_filter_name_to_value = { 'never' : FILTER_NEVER, 'always' : FILTER_ALWAYS, 'after_first' : FILTER_AFTER_FIRST, } #------------------------------------------------------------------------ class SignatureMatch(object): def __init__(self, siginfo, score): self.siginfo = siginfo self.score = score class SEEnvironment(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '1.0' }, 'platform' : {'XMLForm':'element' }, 'kernel' : {'XMLForm':'element' }, 'policy_type' : {'XMLForm':'element' }, 'policy_rpm' : {'XMLForm':'element' }, 'enforce' : {'XMLForm':'element' }, 'selinux_enabled' : {'XMLForm':'element', 'import_typecast':boolean, }, 'selinux_mls_enabled' : {'XMLForm':'element', 'import_typecast':boolean, }, 'policyvers' : {'XMLForm':'element' }, 'hostname' : {'XMLForm':'element' }, 'uname' : {'XMLForm':'element' }, } def __init__(self): super(SEEnvironment, self).__init__() self.update() def update(self): import platform import selinux # security_getenforce is the same as the getenforce command. # selinux_getenforcemode tells you what is set in /etc/selinux/config self.platform, self.kernel = get_os_environment() self.policy_type = selinux.selinux_getpolicytype()[1] self.policy_rpm = get_rpm_nvr_by_name("selinux-policy") self.policyvers = str(selinux.security_policyvers()) enforce = selinux.security_getenforce() if enforce == 0: self.enforce = "Permissive" else: self.enforce = "Enforcing" self.selinux_enabled = bool(selinux.is_selinux_enabled()) self.selinux_mls_enabled = bool(selinux.is_selinux_mls_enabled()) self.hostname = platform.node() self.uname = " ".join(platform.uname()) def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): for name in self._xml_info.keys(): if getattr(self, name) != getattr(other, name): return False return True class SEFilter(XmlSerialize): _xml_info = { 'filter_type' : {'XMLForm':'element', 'import_typecast':int, 'default':lambda: FILTER_NEVER }, 'count' : {'XMLForm':'element', 'import_typecast':int, 'default':lambda: 0 }, } def __init__(self, filter_type=FILTER_NEVER): super(SEFilter, self).__init__() self.filter_type = filter_type class SEFaultSignatureUser(XmlSerialize): _xml_info = { 'username' : {'XMLForm':'attribute' }, 'seen_flag' : {'XMLForm':'attribute', 'import_typecast':boolean, 'default': lambda: False }, 'delete_flag' : {'XMLForm':'attribute', 'import_typecast':boolean, 'default': lambda: False }, 'filter' : {'XMLForm':'element', 'import_typecast':SEFilter, 'default': lambda: SEFilter() }, } def __init__(self, username): super(SEFaultSignatureUser, self).__init__() self.username = username def update_item(self, item, data): if not item in self._names: raise ProgramError(ERR_NOT_MEMBER, 'item (%s) is not a defined member' % item) if item == 'username': raise ProgramError(ERR_ILLEGAL_USER_CHANGE, 'changing the username is illegal') setattr(self, item, data) def update_filter(self, filter_type, data=None): log_debug("update_filter: filter_type=%s data=%s" % (map_filter_value_to_name.get(filter_type, 'unknown'), data)) if filter_type == FILTER_NEVER or \ filter_type == FILTER_AFTER_FIRST or \ filter_type == FILTER_ALWAYS: log_debug("update_filter: !!!") self.filter = SEFilter(filter_type=filter_type) return True else: raise ValueError("Bad filter_type (%s)" % filter_type) class_dict = {} class_dict['dir'] = _("directory") class_dict['sem'] = _("semaphore") class_dict['shm'] = _("shared memory") class_dict['msgq'] = _("message queue") class_dict['msg'] = _("message") class_dict['file'] = _("file") class_dict['socket'] = _("socket") class_dict['process'] = _("process") class_dict['filesystem'] = _("filesystem") class_dict['node'] = _("node") class_dict['capability'] = _("capability") def translate_class(tclass): if tclass in class_dict.keys(): return class_dict[tclass] return tclass # -- class AttributeValueDictionary(XmlSerialize): _xml_info = 'unstructured' def __init__(self): super(AttributeValueDictionary, self).__init__() class SEFaultSignature(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '4.0', }, 'host' : {'XMLForm':'element', }, 'access' : {'XMLForm':'element', 'list':'operation', }, 'scontext' : {'XMLForm':'element', 'import_typecast':AvcContext }, 'tcontext' : {'XMLForm':'element', 'import_typecast':AvcContext }, 'tclass' : {'XMLForm':'element', }, 'port' : {'XMLForm':'element', 'import_typecast':int, }, } def __init__(self, **kwds): super(SEFaultSignature, self).__init__() for k,v in kwds.items(): setattr(self, k, v) class SEPlugin(XmlSerialize): _xml_info = { 'analysis_id' : {'XMLForm':'element'}, 'args' : {'XMLForm':'element', 'list':'arg', }, } def __init__(self, analysis_id, args): super(SEPlugin, self).__init__() self.analysis_id = analysis_id; self.args = args; def __str__(self): return str((self.analysis_id, self.args)) class SEFaultSignatureInfo(XmlSerialize): _xml_info = { 'plugin_list' : {'XMLForm':'element', 'list':'plugin', 'import_typecast':SEPlugin }, 'audit_event' : {'XMLForm':'element', 'import_typecast':AuditEvent }, 'source' : {'XMLForm':'element' }, 'spath' : {'XMLForm':'element' }, 'tpath' : {'XMLForm':'element' }, 'src_rpm_list' : {'XMLForm':'element', 'list':'rpm', }, 'tgt_rpm_list' : {'XMLForm':'element', 'list':'rpm', }, 'scontext' : {'XMLForm':'element', 'import_typecast':AvcContext }, 'tcontext' : {'XMLForm':'element', 'import_typecast':AvcContext }, 'tclass' : {'XMLForm':'element', }, 'port' : {'XMLForm':'element', 'import_typecast':int, }, 'sig' : {'XMLForm':'element', 'import_typecast':SEFaultSignature }, 'if_text' : {'XMLForm':'element' }, 'then_text' : {'XMLForm':'element' }, 'do_text' : {'XMLForm':'element' }, 'environment' : {'XMLForm':'element', 'import_typecast':SEEnvironment }, 'first_seen_date' : {'XMLForm':'element', 'import_typecast':TimeStamp }, 'last_seen_date' : {'XMLForm':'element', 'import_typecast':TimeStamp }, 'report_count' : {'XMLForm':'element', 'import_typecast':int, 'default':lambda: 0 }, 'local_id' : {'XMLForm':'element' }, 'users' : {'XMLForm':'element', 'list':'user', 'import_typecast':SEFaultSignatureUser, }, 'level' : {'XMLForm':'element' }, 'fixable' : {'XMLForm':'element' }, 'button_text' : {'XMLForm':'element' }, } merge_include = ['audit_event', 'tpath', 'src_rpm_list', 'tgt_rpm_list', 'scontext', 'tcontext', 'tclass', 'port', 'environment', 'last_seen_date' ] def __init__(self, **kwds): super(SEFaultSignatureInfo, self).__init__() for k,v in kwds.items(): setattr(self, k, v) self.report_count = 1 self.plugin_list = [] def update_merge(self, siginfo): if siginfo.last_seen_date != self.last_seen_date: self.last_seen_date = siginfo.last_seen_date self.report_count += 1 for name in self.merge_include: setattr(self, name, getattr(siginfo, name)) def get_policy_rpm(self): return self.environment.policy_rpm; def get_hash_str(self): return "%s,%s,%s,%s,%s" % (self.source, self.scontext.type, self.tcontext.type, self.tclass, ",".join(self.sig.access)) def get_hash(self): hash = hashlib.sha256(self.get_hash_str()) return hash.hexdigest() def get_user_data(self, username): for user in self.users: if user.username == username: return user log_debug("new SEFaultSignatureUser for %s" % username) user = SEFaultSignatureUser(username) self.users.append(user) return user def find_filter_by_username(self, username): log_debug("find_filter_by_username %s" % username) filter = None user_data = self.get_user_data(username) if user_data is not None: filter = user_data.filter return filter def update_user_filter(self, username, filter_type, data=None): user_data = self.get_user_data(username) user_data.update_filter(filter_type, data) def evaluate_filter_for_user(self, username, filter_type=None): action = 'display' f = self.find_filter_by_username(username) log_debug("evaluate_filter_for_user: found %s user's filter = %s" % (username, f)) if f is not None: if filter_type is not None: f.filter_type = filter_type action = self.evaluate_filter(f) log_debug("evaluate_filter_for_user: found filter for %s: %s\n%s" % (username, action, f)) return action def evaluate_filter(self, filter): filter_type = filter.filter_type action = 'display' if filter_type == FILTER_NEVER: action = 'display' elif filter_type == FILTER_AFTER_FIRST: if filter.count == 0: action = 'display' else: action = 'ignore' elif filter_type == FILTER_ALWAYS: action = 'ignore' else: raise ValueError("unknown filter_type (%s)" % (filter_type)) filter.count += 1 return action def format_rpm_list(self, rpm_list): if isinstance(rpm_list, list): if len(rpm_list) > 0: return " ".join(rpm_list) else: return "" else: return default_text(None) def format_target_object(self): return "%s [ %s ]" % (self.tpath, self.tclass) def description_adjusted_for_permissive(self): permissive_msg = None syscall_record = self.audit_event.get_record_of_type('SYSCALL') if syscall_record != None and syscall_record.get_field('success') == 'yes': permissive_msg = _("%s has a permissive type (%s). This access was not denied.") % (self.source, self.scontext.type) if self.environment.enforce == "Permissive": permissive_msg = _("SELinux is in permissive mode. This access was not denied.") def update_derived_template_substitutions(self): self.template_substitutions = {} self.template_substitutions["SOURCE_TYPE"] = self.scontext.type self.template_substitutions["TARGET_TYPE"] = self.tcontext.type self.template_substitutions["SOURCE"] = self.source self.template_substitutions["SOURCE_PATH"] = self.spath self.template_substitutions["SOURCE_BASE_PATH"] = os.path.basename(self.spath) if self.spath: self.template_substitutions["FIX_SOURCE_PATH"] = re.sub(" ",".",self.spath) else: self.spath = _("N/A") self.template_substitutions["TARGET_PATH"] = self.tpath self.template_substitutions["TARGET_BASE_PATH"] = os.path.basename(self.tpath) if self.tpath: self.template_substitutions["FIX_TARGET_PATH"] = re.sub(" ",".",self.tpath) if self.tpath is None: self.template_substitutions["TARGET_DIR"] = None else: if self.tclass == 'dir': self.template_substitutions["TARGET_DIR"] = self.tpath elif self.tclass == 'file': self.template_substitutions["TARGET_DIR"] = os.path.dirname(self.tpath) else: self.template_substitutions["TARGET_DIR"] = None if self.tclass == "dir": self.template_substitutions["TARGET_CLASS"] = "directory" else: self.template_substitutions["TARGET_CLASS"] = self.tclass if self.sig.access is None: self.template_substitutions["ACCESS"] = None else: self.template_substitutions["ACCESS"] = ' '.join(self.sig.access) if len(self.src_rpm_list) > 0: self.template_substitutions["SOURCE_PACKAGE"] = self.src_rpm_list[0] self.template_substitutions["PORT_NUMBER"] = self.port # validate, replace any None values with friendly string for key, value in self.template_substitutions.items(): if value is None: self.template_substitutions[key] = default_text(value) def priority_sort(self, x, y): return cmp(y[0].priority,x[0].priority) def summary(self): if self.tclass == "process": return P_(_("SELinux is preventing %s from using the %s access on a process."), _("SELinux is preventing %s from using the '%s' accesses on a process."), len(self.sig.access)) % (self.spath, ", ".join(self.sig.access)) if self.tclass == "capability": return P_(_("SELinux is preventing %s from using the %s capability."), _("SELinux is preventing %s from using the '%s' capabilities."), len(self.sig.access)) % (self.spath, ", ".join(self.sig.access)) if self.tpath == "(null)": return P_(_("SELinux is preventing %s from %s access on the %s labeled %s."), _("SELinux is preventing %s from '%s' accesses on the %s labeled %s."), len(self.sig.access)) % (self.spath, ", ".join(self.sig.access), translate_class(self.tclass), self.tcontext.type) return P_(_("SELinux is preventing %s from %s access on the %s %s."), _("SELinux is preventing %s from '%s' accesses on the %s %s."), len(self.sig.access)) % (self.spath, ", ".join(self.sig.access), translate_class(self.tclass), self.tpath) def get_plugins(self, all = False): self.plugins = load_plugins() plugins = [] total_priority = 0 if all: for p in self.plugins: total_priority += p.priority plugins.append((p, ("allow_ypbind", "1"))) else: for solution in self.plugin_list: for p in self.plugins: if solution.analysis_id == p.analysis_id: total_priority += p.priority plugins.append((p, tuple(solution.args))) break plugins.sort(self.priority_sort) return total_priority, plugins def substitute(self, txt): return Template(txt).safe_substitute(self.template_substitutions) def substitute_array(self, args): return [self.substitute(txt) for txt in args] def format_details(self, replace=False): env = self.environment text = _("Additional Information:\n") text += format_2_column_name_value(_("Source Context"), self.scontext.format()) text += format_2_column_name_value(_("Target Context"), self.tcontext.format()) text += format_2_column_name_value(_("Target Objects"), self.format_target_object()) text += format_2_column_name_value(_("Source"), default_text(self.source)) text += format_2_column_name_value(_("Source Path"), default_text(self.spath)) text += format_2_column_name_value(_("Port"), default_text(self.port)) if (replace): text += format_2_column_name_value(_("Host"), "(removed)") else: text += format_2_column_name_value(_("Host"), default_text(self.sig.host)) text += format_2_column_name_value(_("Source RPM Packages"), default_text(self.format_rpm_list(self.src_rpm_list))) text += format_2_column_name_value(_("Target RPM Packages"), default_text(self.format_rpm_list(self.tgt_rpm_list))) text += format_2_column_name_value(_("Policy RPM"), default_text(env.policy_rpm)) text += format_2_column_name_value(_("Selinux Enabled"), default_text(env.selinux_enabled)) text += format_2_column_name_value(_("Policy Type"), default_text(env.policy_type)) text += format_2_column_name_value(_("Enforcing Mode"), default_text(env.enforce)) if replace: text += format_2_column_name_value(_("Host Name"),"(removed)") else: text += format_2_column_name_value(_("Host Name"), default_text(env.hostname)) if replace: uname = env.uname.split() uname[1] = "(removed)" text += format_2_column_name_value(_("Platform"), default_text(" ".join(uname))) else: text += format_2_column_name_value(_("Platform"), default_text(env.uname)) text += format_2_column_name_value(_("Alert Count"), default_text(self.report_count)) date_format = "%Y-%m-%d %H:%M:%S %Z" text += format_2_column_name_value(_("First Seen"), self.first_seen_date.format(date_format)) text += format_2_column_name_value(_("Last Seen"), self.last_seen_date.format(date_format)) text += format_2_column_name_value(_("Local ID"), default_text(self.local_id)) text += '\n' + _("Raw Audit Messages") avcbuf = "" for audit_record in self.audit_event.records: if audit_record.record_type == 'AVC': avcbuf += "\n" + audit_record.to_text() + "\n" else: avcbuf += "\ntype=%s msg=%s: " % (audit_record.record_type, audit_record.event_id) avcbuf += ' '.join(["%s=%s" % (k, audit_record.fields[k]) for k in audit_record.fields_ord]) +"\n" avcbuf += "\nHash: " + self.get_hash_str() try: audit2allow = "/usr/bin/audit2allow" if os.path.exist(audit2allow): newbuf = "\n\naudit2allow" p = Popen([audit2allow], shell=True,stdin=PIPE, stdout=PIPE) newbuf += p.communicate(avcbuf)[0] if os.path.exists("/var/lib/sepolgen/interface_info"): newbuf += "\naudit2allow -R" p = Popen(["%s -R" % audit2allow ], shell=True,stdin=PIPE, stdout=PIPE) newbuf += p.communicate(avcbuf)[0] avcbuf += newbuf except: pass text += avcbuf + '\n' return text def untranslated(self, func, *args, **kwargs): r'define.*untranslated\(.*\n' # Call the parameter function with the translations turned off # This function is not thread safe, since it manipulates globals global P_, _ saved_translateP_ = P_ saved_translate_ = _ try: P_ = lambda x,y,z: x if z > 1 else y _ = lambda x:x return func(*args, **kwargs) finally: P_ = saved_translateP_ _ = saved_translate_ def format_text(self, all = False, replace = False): self.update_derived_template_substitutions() text = self.summary() total_priority, plugins = self.get_plugins(all) for p, args in plugins: title = _("\n\n***** Plugin %s (%.4s confidence) suggests ") % (p.analysis_id, ((float(p.priority) / float(total_priority)) * 100 + .5)) text += title for i in range(len(title),80): text += _("*") text += _("\n") txt = self.substitute(p.get_if_text(self.audit_event.records, args)).decode('utf-8') text += _("\nIf ") + txt[0].lower() + txt[1:] txt = self.substitute(p.get_then_text(self.audit_event.records, args)).decode('utf-8') text += _("\nThen ") + txt[0].lower() + txt[1:] txt = self.substitute(p.get_do_text(self.audit_event.records, args)).decode('utf-8') text += _("\nDo\n") + txt[0].lower() + txt[1:] text += _('\n\n') return text class SEFaultUserInfo(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '1.0' }, 'username' : {'XMLForm':'attribute' }, 'email_alert' : {'XMLForm':'element', 'import_typecast':boolean, 'default': lambda: False }, 'email_address_list' : {'XMLForm':'element', 'list':'email_address', }, } def __init__(self, username): super(SEFaultUserInfo, self).__init__() self.username = username def add_email_address(self, email_address): if not email_address in self.email_address_list: self.email_address_list.append(email_address) class SEFaultUserSet(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '1.0' }, 'user_list' : {'XMLForm':'element', 'list':'user', 'import_typecast':SEFaultUserInfo, }, } def __init__(self): super(SEFaultUserSet, self).__init__() def get_user(self, username): for user in self.user_list: if username == user.username: return user return None def add_user(self, username): if self.get_user(username) is not None: return user = SEFaultUserInfo(username) self.user_list.append(user) return user class SEFaultSignatureSet(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '%d.%d' % (DATABASE_MAJOR_VERSION, DATABASE_MINOR_VERSION)}, 'users' : {'XMLForm':'element', 'import_typecast':SEFaultUserSet, 'default': lambda: SEFaultUserSet() }, 'signature_list' : {'XMLForm':'element', 'list':'siginfo', 'import_typecast':SEFaultSignatureInfo, }, } def __init__(self): super(SEFaultSignatureSet, self).__init__() def siginfos(self): for siginfo in self.signature_list: yield siginfo def add_siginfo(self, siginfo): self.signature_list.append(siginfo) return siginfo def remove_siginfo(self, siginfo): self.signature_list.remove(siginfo) def clear(self): self.signature_list = [] def generate_local_id(self): return str(uuid.uuid4()) def lookup_local_id(self, local_id): if local_id is None: return None for siginfo in self.signature_list: if siginfo.local_id == local_id: return siginfo return None def match_signatures(self, pat, criteria='exact', xml_info=SEFaultSignature._xml_info): match_targets = xml_info.keys() exact = False if criteria == 'exact': exact = True elif type(criteria) is FloatType: num_match_targets = len(match_targets) score_per_match_target = 1.0 / num_match_targets else: raise ValueError("unknown criteria = %s" % criteria) matches = [] for siginfo in self.signature_list: score = 0.0 sig = siginfo.sig for name in match_targets: if getattr(pat, name) == getattr(sig, name): if exact: score = 1.0 else: score += score_per_match_target else: if exact: score = 0.0 break if exact: if score == 1.0: matches.append(SignatureMatch(siginfo, score)) else: if score >= criteria: matches.append(SignatureMatch(siginfo, score)) matches.sort((lambda a,b: cmp(b.score, a.score))) return matches class SEDatabaseProperties(XmlSerialize): _xml_info = { 'name' : {'XMLForm':'element' }, 'friendly_name' : {'XMLForm':'element' }, 'filepath' : {'XMLForm':'element' }, } def __init__(self, name=None, friendly_name=None, filepath=None): super(SEDatabaseProperties, self).__init__() if name is not None: self.name = name if friendly_name is not None: self.friendly_name = friendly_name if filepath is not None: self.filepath = filepath class SEEmailRecipient(XmlSerialize): _xml_info = { 'address' : {'XMLForm':'element' }, 'filter_type' : {'XMLForm':'element', 'import_typecast':int, 'default':lambda: FILTER_AFTER_FIRST }, } def __init__(self, address, filter_type=None): super(SEEmailRecipient, self).__init__() self.address = address if filter_type is not None: self.filter_type = filter_type def __str__(self): return "%s:%s" % (self.address, map_filter_value_to_name.get(self.filter_type, 'unknown')) class SEEmailRecipientSet(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '1' }, 'recipient_list' : {'XMLForm':'element', 'list':'recipient', 'import_typecast':SEEmailRecipient, }, } def __init__(self, recipient_list=None): super(SEEmailRecipientSet, self).__init__() if recipient_list is not None: self.recipient_list = recipient_list def __str__(self): return ','.join([str(x) for x in self.recipient_list]) def find_address(self, address): address = address.strip() for recipient in self.recipient_list: if address == recipient.address: return recipient return None def add_address(self, address, filter_type=FILTER_AFTER_FIRST): address = address.strip() if not valid_email_address(address): raise ProgramError(ERR_INVALID_EMAIL_ADDR, detail="address='%s'" % address) return recipient = self.find_address(address) if recipient is not None: return self.recipient_list.append(SEEmailRecipient(address, filter_type)) def clear_recipient_list(self): self.recipient_list = [] def parse_recipient_file(self, filepath): import re comment_re = re.compile('#.*') entry_re = re.compile('(\S+)(\s+(.+))?') key_value_re = re.compile("(\w+)\s*=\s*(\S+)") map_boolean = {'enabled' : True, 'true' : True, 'yes' : True, 'on' : True, 'disabled' : False, 'false' : False, 'no' : False, 'off' : False, } try: f = open(filepath) except IOError, e: raise ProgramError(ERR_FILE_OPEN, detail="%s, %s" % (filepath, e.strerror)) self.clear_recipient_list() for line in f.readlines(): line = comment_re.sub('', line) line = line.strip() if line: match = entry_re.search(line) if match: address = match.group(1) options = match.group(3) filter_type = None if options: for match in key_value_re.finditer(options): option = match.group(1) value = match.group(2) if option == 'filter_type': filter_type = map_filter_name_to_value.get(value.lower(), None) if filter_type is None: log_debug("unknown email filter (%s) for address %s" % (option, address)) else: log_debug("unknown email option (%s) for address %s" % (option, address)) try: self.add_address(address, filter_type) except ProgramError, e: if e.errno == ERR_INVALID_EMAIL_ADDR: log_debug(e.strerror) else: raise e f.close() def write_recipient_file(self, filepath): try: f = open(filepath, 'w') except IOError, e: raise ProgramError(ERR_FILE_OPEN, detail="%s, %s" % (filepath, e.strerror)) for recipient in self.recipient_list: filter_type = map_filter_value_to_name[recipient.filter_type] f.write("%-40s filter_type=%s\n" % (recipient.address, filter_type)) f.close() #------------------------------------------------------------------------ if __name__ == '__main__': import libxml2 #memory debug specific libxml2.debugMemory(1) xml_file = 'audit_listener_database.xml' sigs = SEFaultSignatureSet() sigs.read_xml_file(xml_file, 'sigs') siginfo = sigs.signature_list[0] record = siginfo.audit_event.records[0] print record.record_type print "siginfo.audit_event=%s" % siginfo.audit_event print sigs #memory debug specific libxml2.cleanupParser() if libxml2.debugMemory(1) == 0: print "Memory OK" else: print "Memory leak %d bytes" % (libxml2.debugMemory(1)) libxml2.dumpMemory()
./CrossVul/dataset_final_sorted/CWE-77/py/good_5043_1
crossvul-python_data_bad_5044_0
# # Copyright (C) 2006-2010 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # import gettext translation=gettext.translation('setroubleshoot-plugins', fallback=True) _=translation.gettext from setroubleshoot.util import * from setroubleshoot.Plugin import Plugin import commands import sys def is_execstack(path): if path[0] != "/": return False x = commands.getoutput("execstack -q %s" % path).split() return ( x[0] == "X" ) def find_execstack(exe, pid): execstacklist = [] for path in commands.getoutput("ldd %s" % exe).split(): if is_execstack(path) and path not in execstacklist: execstacklist.append(path) try: fd = open("/proc/%s/maps" % pid , "r") for rec in fd.readlines(): for path in rec.split(): if is_execstack(path) and path not in execstacklist: execstacklist.append(path) except IOError: pass return execstacklist class plugin(Plugin): summary =_(''' SELinux is preventing $SOURCE_PATH from making the program stack executable. ''') problem_description = _(''' The $SOURCE application attempted to make its stack executable. This is a potential security problem. This should never ever be necessary. Stack memory is not executable on most OSes these days and this will not change. Executable stack memory is one of the biggest security problems. An execstack error might in fact be most likely raised by malicious code. Applications are sometimes coded incorrectly and request this permission. The <a href="http://people.redhat.com/drepper/selinux-mem.html">SELinux Memory Protection Tests</a> web page explains how to remove this requirement. If $SOURCE does not work and you need it to work, you can configure SELinux temporarily to allow this access until the application is fixed. Please file a bug report. ''') fix_description = _(''' Sometimes a library is accidentally marked with the execstack flag, if you find a library with this flag you can clear it with the execstack -c LIBRARY_PATH. Then retry your application. If the app continues to not work, you can turn the flag back on with execstack -s LIBRARY_PATH. ''') fix_cmd = "" if_text = _("you do not think $SOURCE_PATH should need to map stack memory that is both writable and executable.") then_text = _("you need to report a bug. \nThis is a potentially dangerous access.") do_text = _("Contact your security administrator and report this issue.") def get_if_text(self, avc, args): try: path = args[0] if not path: return self.if_text return _("you believe that \n%s\nshould not require execstack") % path except: return self.if_text def get_then_text(self, avc, args): try: path = args[0] if not path: return self.then_text return _("you should clear the execstack flag and see if $SOURCE_PATH works correctly.\nReport this as a bug on %s.\nYou can clear the exestack flag by executing:") % path except: return self.then_text def get_do_text(self, avc, args): try: path = args[0] if not path: return self.do_text return _("execstack -c %s") % path except: return self.do_text def __init__(self): Plugin.__init__(self,__name__) def analyze(self, avc): if (avc.matches_source_types(['unconfined_t', 'staff_t', 'user_t', 'guest_t', 'xguest_t']) and avc.has_any_access_in(['execstack'])): reports = [] for i in find_execstack(avc.spath, avc.pid): reports.append(self.report((i,avc))) if len(reports) > 0: return reports return self.report((None,None)) else: return None
./CrossVul/dataset_final_sorted/CWE-77/py/bad_5044_0
crossvul-python_data_good_5076_0
#!/usr/bin/python import dbus import dbus.service import dbus.mainloop.glib import gobject import slip.dbus.service from slip.dbus import polkit import os class RunFix(slip.dbus.service.Object): default_polkit_auth_required = "org.fedoraproject.setroubleshootfixit.write" def __init__ (self, *p, **k): super(RunFix, self).__init__(*p, **k) @dbus.service.method ("org.fedoraproject.SetroubleshootFixit", in_signature='ss', out_signature='s') def run_fix(self, local_id, analysis_id): import subprocess command = ["sealert", "-f", local_id, "-P", analysis_id] return subprocess.check_output(command, universal_newlines=True) if __name__ == "__main__": mainloop = gobject.MainLoop () dbus.mainloop.glib.DBusGMainLoop (set_as_default=True) system_bus = dbus.SystemBus () name = dbus.service.BusName("org.fedoraproject.SetroubleshootFixit", system_bus) object = RunFix(system_bus, "/org/fedoraproject/SetroubleshootFixit/object") slip.dbus.service.set_mainloop (mainloop) mainloop.run ()
./CrossVul/dataset_final_sorted/CWE-77/py/good_5076_0
crossvul-python_data_good_5077_3
# Authors: John Dennis <jdennis@redhat.com> # Thomas Liu <tliu@redhat.com> # Dan Walsh <dwalsh@redhat.com> # # Copyright (C) 2006-2010 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # import syslog from subprocess import * import setroubleshoot.default_encoding_utf8 import gettext translation=gettext.translation('setroubleshoot-plugins', fallback=True) _=translation.ugettext __all__ = [ 'SignatureMatch', 'SEFilter', 'SEFaultSignature', 'SEFaultSignatureInfo', 'SEFaultSignatureSet', 'SEFaultSignatureUser', 'SEEnvironment', 'SEDatabaseProperties', 'SEFaultUserInfo', 'SEFaultUserSet', 'SEPlugin', 'SEEmailRecipient', 'SEEmailRecipientSet', 'FILTER_NEVER', 'FILTER_ALWAYS', 'FILTER_AFTER_FIRST', 'filter_text' ] if __name__ == "__main__": import gettext from setroubleshoot.config import parse_config_setting, get_config gettext.install(domain = get_config('general', 'i18n_text_domain'), localedir = get_config('general', 'i18n_locale_dir')) from gettext import ngettext as P_ from setroubleshoot.config import get_config from setroubleshoot.errcode import * from setroubleshoot.util import * from setroubleshoot.xml_serialize import * from setroubleshoot.html_util import * import setroubleshoot.uuid as uuid from setroubleshoot.audit_data import * import hashlib from types import * from string import Template import re, os # Don't reuse the numeric values! FILTER_NEVER = 0 FILTER_ALWAYS = 4 FILTER_AFTER_FIRST = 8 filter_text = { FILTER_NEVER : _("Never Ignore"), FILTER_ALWAYS : _("Ignore Always"), FILTER_AFTER_FIRST : _("Ignore After First Alert"), } map_filter_value_to_name = { FILTER_NEVER : 'never', FILTER_ALWAYS : 'always', FILTER_AFTER_FIRST : 'after_first', } map_filter_name_to_value = { 'never' : FILTER_NEVER, 'always' : FILTER_ALWAYS, 'after_first' : FILTER_AFTER_FIRST, } #------------------------------------------------------------------------ class SignatureMatch(object): def __init__(self, siginfo, score): self.siginfo = siginfo self.score = score class SEEnvironment(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '1.0' }, 'platform' : {'XMLForm':'element' }, 'kernel' : {'XMLForm':'element' }, 'policy_type' : {'XMLForm':'element' }, 'policy_rpm' : {'XMLForm':'element' }, 'enforce' : {'XMLForm':'element' }, 'selinux_enabled' : {'XMLForm':'element', 'import_typecast':boolean, }, 'selinux_mls_enabled' : {'XMLForm':'element', 'import_typecast':boolean, }, 'policyvers' : {'XMLForm':'element' }, 'hostname' : {'XMLForm':'element' }, 'uname' : {'XMLForm':'element' }, } def __init__(self): super(SEEnvironment, self).__init__() self.update() def update(self): import platform import selinux # security_getenforce is the same as the getenforce command. # selinux_getenforcemode tells you what is set in /etc/selinux/config self.platform, self.kernel = get_os_environment() self.policy_type = selinux.selinux_getpolicytype()[1] self.policy_rpm = get_rpm_nvr_by_name("selinux-policy") self.policyvers = str(selinux.security_policyvers()) enforce = selinux.security_getenforce() if enforce == 0: self.enforce = "Permissive" else: self.enforce = "Enforcing" self.selinux_enabled = bool(selinux.is_selinux_enabled()) self.selinux_mls_enabled = bool(selinux.is_selinux_mls_enabled()) self.hostname = platform.node() self.uname = " ".join(platform.uname()) def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): for name in self._xml_info.keys(): if getattr(self, name) != getattr(other, name): return False return True class SEFilter(XmlSerialize): _xml_info = { 'filter_type' : {'XMLForm':'element', 'import_typecast':int, 'default':lambda: FILTER_NEVER }, 'count' : {'XMLForm':'element', 'import_typecast':int, 'default':lambda: 0 }, } def __init__(self, filter_type=FILTER_NEVER): super(SEFilter, self).__init__() self.filter_type = filter_type class SEFaultSignatureUser(XmlSerialize): _xml_info = { 'username' : {'XMLForm':'attribute' }, 'seen_flag' : {'XMLForm':'attribute', 'import_typecast':boolean, 'default': lambda: False }, 'delete_flag' : {'XMLForm':'attribute', 'import_typecast':boolean, 'default': lambda: False }, 'filter' : {'XMLForm':'element', 'import_typecast':SEFilter, 'default': lambda: SEFilter() }, } def __init__(self, username): super(SEFaultSignatureUser, self).__init__() self.username = username def update_item(self, item, data): if not item in self._names: raise ProgramError(ERR_NOT_MEMBER, 'item (%s) is not a defined member' % item) if item == 'username': raise ProgramError(ERR_ILLEGAL_USER_CHANGE, 'changing the username is illegal') setattr(self, item, data) def update_filter(self, filter_type, data=None): log_debug("update_filter: filter_type=%s data=%s" % (map_filter_value_to_name.get(filter_type, 'unknown'), data)) if filter_type == FILTER_NEVER or \ filter_type == FILTER_AFTER_FIRST or \ filter_type == FILTER_ALWAYS: log_debug("update_filter: !!!") self.filter = SEFilter(filter_type=filter_type) return True else: raise ValueError("Bad filter_type (%s)" % filter_type) class_dict = {} class_dict['dir'] = _("directory") class_dict['sem'] = _("semaphore") class_dict['shm'] = _("shared memory") class_dict['msgq'] = _("message queue") class_dict['msg'] = _("message") class_dict['file'] = _("file") class_dict['socket'] = _("socket") class_dict['process'] = _("process") class_dict['filesystem'] = _("filesystem") class_dict['node'] = _("node") class_dict['capability'] = _("capability") def translate_class(tclass): if tclass in class_dict.keys(): return class_dict[tclass] return tclass # -- class AttributeValueDictionary(XmlSerialize): _xml_info = 'unstructured' def __init__(self): super(AttributeValueDictionary, self).__init__() class SEFaultSignature(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '4.0', }, 'host' : {'XMLForm':'element', }, 'access' : {'XMLForm':'element', 'list':'operation', }, 'scontext' : {'XMLForm':'element', 'import_typecast':AvcContext }, 'tcontext' : {'XMLForm':'element', 'import_typecast':AvcContext }, 'tclass' : {'XMLForm':'element', }, 'port' : {'XMLForm':'element', 'import_typecast':int, }, } def __init__(self, **kwds): super(SEFaultSignature, self).__init__() for k,v in kwds.items(): setattr(self, k, v) class SEPlugin(XmlSerialize): _xml_info = { 'analysis_id' : {'XMLForm':'element'}, 'args' : {'XMLForm':'element', 'list':'arg', }, } def __init__(self, analysis_id, args): super(SEPlugin, self).__init__() self.analysis_id = analysis_id; self.args = args; def __str__(self): return str((self.analysis_id, self.args)) class SEFaultSignatureInfo(XmlSerialize): _xml_info = { 'plugin_list' : {'XMLForm':'element', 'list':'plugin', 'import_typecast':SEPlugin }, 'audit_event' : {'XMLForm':'element', 'import_typecast':AuditEvent }, 'source' : {'XMLForm':'element' }, 'spath' : {'XMLForm':'element' }, 'tpath' : {'XMLForm':'element' }, 'src_rpm_list' : {'XMLForm':'element', 'list':'rpm', }, 'tgt_rpm_list' : {'XMLForm':'element', 'list':'rpm', }, 'scontext' : {'XMLForm':'element', 'import_typecast':AvcContext }, 'tcontext' : {'XMLForm':'element', 'import_typecast':AvcContext }, 'tclass' : {'XMLForm':'element', }, 'port' : {'XMLForm':'element', 'import_typecast':int, }, 'sig' : {'XMLForm':'element', 'import_typecast':SEFaultSignature }, 'if_text' : {'XMLForm':'element' }, 'then_text' : {'XMLForm':'element' }, 'do_text' : {'XMLForm':'element' }, 'environment' : {'XMLForm':'element', 'import_typecast':SEEnvironment }, 'first_seen_date' : {'XMLForm':'element', 'import_typecast':TimeStamp }, 'last_seen_date' : {'XMLForm':'element', 'import_typecast':TimeStamp }, 'report_count' : {'XMLForm':'element', 'import_typecast':int, 'default':lambda: 0 }, 'local_id' : {'XMLForm':'element' }, 'users' : {'XMLForm':'element', 'list':'user', 'import_typecast':SEFaultSignatureUser, }, 'level' : {'XMLForm':'element' }, 'fixable' : {'XMLForm':'element' }, 'button_text' : {'XMLForm':'element' }, } merge_include = ['audit_event', 'tpath', 'src_rpm_list', 'tgt_rpm_list', 'scontext', 'tcontext', 'tclass', 'port', 'environment', 'last_seen_date' ] def __init__(self, **kwds): super(SEFaultSignatureInfo, self).__init__() for k,v in kwds.items(): setattr(self, k, v) self.report_count = 1 self.plugin_list = [] def update_merge(self, siginfo): if siginfo.last_seen_date != self.last_seen_date: self.last_seen_date = siginfo.last_seen_date self.report_count += 1 for name in self.merge_include: setattr(self, name, getattr(siginfo, name)) # older databases can have an uninitialized level if self.level is None: self.level = siginfo.level def get_policy_rpm(self): return self.environment.policy_rpm; def get_hash_str(self): return "%s,%s,%s,%s,%s" % (self.source, self.scontext.type, self.tcontext.type, self.tclass, ",".join(self.sig.access)) def get_hash(self): hash = hashlib.sha256(self.get_hash_str()) return hash.hexdigest() def get_user_data(self, username): for user in self.users: if user.username == username: return user log_debug("new SEFaultSignatureUser for %s" % username) user = SEFaultSignatureUser(username) self.users.append(user) return user def find_filter_by_username(self, username): log_debug("find_filter_by_username %s" % username) filter = None user_data = self.get_user_data(username) if user_data is not None: filter = user_data.filter return filter def update_user_filter(self, username, filter_type, data=None): user_data = self.get_user_data(username) user_data.update_filter(filter_type, data) def evaluate_filter_for_user(self, username, filter_type=None): action = 'display' f = self.find_filter_by_username(username) log_debug("evaluate_filter_for_user: found %s user's filter = %s" % (username, f)) if f is not None: if filter_type is not None: f.filter_type = filter_type action = self.evaluate_filter(f) log_debug("evaluate_filter_for_user: found filter for %s: %s\n%s" % (username, action, f)) return action def evaluate_filter(self, filter): filter_type = filter.filter_type action = 'display' if filter_type == FILTER_NEVER: action = 'display' elif filter_type == FILTER_AFTER_FIRST: if filter.count == 0: action = 'display' else: action = 'ignore' elif filter_type == FILTER_ALWAYS: action = 'ignore' else: raise ValueError("unknown filter_type (%s)" % (filter_type)) filter.count += 1 return action def format_rpm_list(self, rpm_list): if isinstance(rpm_list, list): if len(rpm_list) > 0: return " ".join(rpm_list) else: return "" else: return default_text(None) def format_target_object(self): return "%s [ %s ]" % (self.tpath, self.tclass) def description_adjusted_for_permissive(self): permissive_msg = None syscall_record = self.audit_event.get_record_of_type('SYSCALL') if syscall_record != None and syscall_record.get_field('success') == 'yes': permissive_msg = _("%s has a permissive type (%s). This access was not denied.") % (self.source, self.scontext.type) if self.environment.enforce == "Permissive": permissive_msg = _("SELinux is in permissive mode. This access was not denied.") def update_derived_template_substitutions(self): self.template_substitutions = {} self.template_substitutions["SOURCE_TYPE"] = self.scontext.type self.template_substitutions["TARGET_TYPE"] = self.tcontext.type self.template_substitutions["SOURCE"] = self.source self.template_substitutions["SOURCE_PATH"] = self.spath self.template_substitutions["SOURCE_BASE_PATH"] = os.path.basename(self.spath) self.template_substitutions["MODULE_NAME"] = re.sub('[^a-zA-Z0-9]', '', self.source) if self.spath: self.template_substitutions["FIX_SOURCE_PATH"] = re.sub(" ",".",self.spath) else: self.spath = _("N/A") self.template_substitutions["TARGET_PATH"] = self.tpath self.template_substitutions["TARGET_BASE_PATH"] = os.path.basename(self.tpath) if self.tpath: self.template_substitutions["FIX_TARGET_PATH"] = re.sub(" ",".",self.tpath) if self.tpath is None: self.template_substitutions["TARGET_DIR"] = None else: if self.tclass == 'dir': self.template_substitutions["TARGET_DIR"] = self.tpath elif self.tclass == 'file': self.template_substitutions["TARGET_DIR"] = os.path.dirname(self.tpath) else: self.template_substitutions["TARGET_DIR"] = None if self.tclass == "dir": self.template_substitutions["TARGET_CLASS"] = "directory" else: self.template_substitutions["TARGET_CLASS"] = self.tclass if self.sig.access is None: self.template_substitutions["ACCESS"] = None else: self.template_substitutions["ACCESS"] = ' '.join(self.sig.access) if len(self.src_rpm_list) > 0: self.template_substitutions["SOURCE_PACKAGE"] = self.src_rpm_list[0] self.template_substitutions["PORT_NUMBER"] = self.port # validate, replace any None values with friendly string for key, value in self.template_substitutions.items(): if value is None: self.template_substitutions[key] = default_text(value) def priority_sort(self, x, y): return cmp(y[0].priority,x[0].priority) def summary(self): if self.tclass == "process": return P_(_("SELinux is preventing %s from using the %s access on a process."), _("SELinux is preventing %s from using the '%s' accesses on a process."), len(self.sig.access)) % (self.spath, ", ".join(self.sig.access)) if self.tclass == "capability": return P_(_("SELinux is preventing %s from using the %s capability."), _("SELinux is preventing %s from using the '%s' capabilities."), len(self.sig.access)) % (self.spath, ", ".join(self.sig.access)) if self.tpath == "(null)": return P_(_("SELinux is preventing %s from %s access on the %s labeled %s."), _("SELinux is preventing %s from '%s' accesses on the %s labeled %s."), len(self.sig.access)) % (self.spath, ", ".join(self.sig.access), translate_class(self.tclass), self.tcontext.type) return P_(_("SELinux is preventing %s from %s access on the %s %s."), _("SELinux is preventing %s from '%s' accesses on the %s %s."), len(self.sig.access)) % (self.spath, ", ".join(self.sig.access), translate_class(self.tclass), self.tpath) def get_plugins(self, all = False): self.plugins = load_plugins() plugins = [] total_priority = 0 if all: for p in self.plugins: total_priority += p.priority plugins.append((p, ("allow_ypbind", "1"))) else: for solution in self.plugin_list: for p in self.plugins: if solution.analysis_id == p.analysis_id: total_priority += p.priority plugins.append((p, tuple(solution.args))) break plugins.sort(self.priority_sort) return total_priority, plugins def substitute(self, txt): return Template(txt).safe_substitute(self.template_substitutions) def substitute_array(self, args): return [self.substitute(txt) for txt in args] def format_details(self, replace=False): env = self.environment text = _("Additional Information:\n") text += format_2_column_name_value(_("Source Context"), self.scontext.format()) text += format_2_column_name_value(_("Target Context"), self.tcontext.format()) text += format_2_column_name_value(_("Target Objects"), self.format_target_object()) text += format_2_column_name_value(_("Source"), default_text(self.source)) text += format_2_column_name_value(_("Source Path"), default_text(self.spath)) text += format_2_column_name_value(_("Port"), default_text(self.port)) if (replace): text += format_2_column_name_value(_("Host"), "(removed)") else: text += format_2_column_name_value(_("Host"), default_text(self.sig.host)) text += format_2_column_name_value(_("Source RPM Packages"), default_text(self.format_rpm_list(self.src_rpm_list))) text += format_2_column_name_value(_("Target RPM Packages"), default_text(self.format_rpm_list(self.tgt_rpm_list))) text += format_2_column_name_value(_("Policy RPM"), default_text(env.policy_rpm)) text += format_2_column_name_value(_("Selinux Enabled"), default_text(env.selinux_enabled)) text += format_2_column_name_value(_("Policy Type"), default_text(env.policy_type)) text += format_2_column_name_value(_("Enforcing Mode"), default_text(env.enforce)) if replace: text += format_2_column_name_value(_("Host Name"),"(removed)") else: text += format_2_column_name_value(_("Host Name"), default_text(env.hostname)) if replace: uname = env.uname.split() uname[1] = "(removed)" text += format_2_column_name_value(_("Platform"), default_text(" ".join(uname))) else: text += format_2_column_name_value(_("Platform"), default_text(env.uname)) text += format_2_column_name_value(_("Alert Count"), default_text(self.report_count)) date_format = "%Y-%m-%d %H:%M:%S %Z" text += format_2_column_name_value(_("First Seen"), self.first_seen_date.format(date_format)) text += format_2_column_name_value(_("Last Seen"), self.last_seen_date.format(date_format)) text += format_2_column_name_value(_("Local ID"), default_text(self.local_id)) text += '\n' + _("Raw Audit Messages") avcbuf = "" for audit_record in self.audit_event.records: if audit_record.record_type == 'AVC': avcbuf += "\n" + audit_record.to_text() + "\n" else: avcbuf += "\ntype=%s msg=%s: " % (audit_record.record_type, audit_record.event_id) avcbuf += ' '.join(["%s=%s" % (k, audit_record.fields[k]) for k in audit_record.fields_ord]) +"\n" avcbuf += "\nHash: " + self.get_hash_str() try: audit2allow = "/usr/bin/audit2allow" if os.path.exist(audit2allow): newbuf = "\n\naudit2allow" p = Popen([audit2allow], stdin=PIPE, stdout=PIPE) newbuf += p.communicate(avcbuf)[0] if os.path.exists("/var/lib/sepolgen/interface_info"): newbuf += "\naudit2allow -R" p = Popen([audit2allow, "-R"], stdin=PIPE, stdout=PIPE) newbuf += p.communicate(avcbuf)[0] avcbuf += newbuf except: pass text += avcbuf + '\n' return text def untranslated(self, func, *args, **kwargs): r'define.*untranslated\(.*\n' # Call the parameter function with the translations turned off # This function is not thread safe, since it manipulates globals global P_, _ saved_translateP_ = P_ saved_translate_ = _ try: P_ = lambda x,y,z: x if z > 1 else y _ = lambda x:x return func(*args, **kwargs) finally: P_ = saved_translateP_ _ = saved_translate_ def format_text(self, all = False, replace = False): self.update_derived_template_substitutions() text = self.summary() total_priority, plugins = self.get_plugins(all) for p, args in plugins: title = _("\n\n***** Plugin %s (%.4s confidence) suggests ") % (p.analysis_id, ((float(p.priority) / float(total_priority)) * 100 + .5)) text += title for i in range(len(title),80): text += _("*") text += _("\n") txt = self.substitute(p.get_if_text(self.audit_event.records, args)).decode('utf-8') text += _("\nIf ") + txt[0].lower() + txt[1:] txt = self.substitute(p.get_then_text(self.audit_event.records, args)).decode('utf-8') text += _("\nThen ") + txt[0].lower() + txt[1:] txt = self.substitute(p.get_do_text(self.audit_event.records, args)).decode('utf-8') text += _("\nDo\n") + txt[0].lower() + txt[1:] text += _('\n\n') return text class SEFaultUserInfo(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '1.0' }, 'username' : {'XMLForm':'attribute' }, 'email_alert' : {'XMLForm':'element', 'import_typecast':boolean, 'default': lambda: False }, 'email_address_list' : {'XMLForm':'element', 'list':'email_address', }, } def __init__(self, username): super(SEFaultUserInfo, self).__init__() self.username = username def add_email_address(self, email_address): if not email_address in self.email_address_list: self.email_address_list.append(email_address) class SEFaultUserSet(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '1.0' }, 'user_list' : {'XMLForm':'element', 'list':'user', 'import_typecast':SEFaultUserInfo, }, } def __init__(self): super(SEFaultUserSet, self).__init__() def get_user(self, username): for user in self.user_list: if username == user.username: return user return None def add_user(self, username): if self.get_user(username) is not None: return user = SEFaultUserInfo(username) self.user_list.append(user) return user class SEFaultSignatureSet(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '%d.%d' % (DATABASE_MAJOR_VERSION, DATABASE_MINOR_VERSION)}, 'users' : {'XMLForm':'element', 'import_typecast':SEFaultUserSet, 'default': lambda: SEFaultUserSet() }, 'signature_list' : {'XMLForm':'element', 'list':'siginfo', 'import_typecast':SEFaultSignatureInfo, }, } def __init__(self): super(SEFaultSignatureSet, self).__init__() def siginfos(self): for siginfo in self.signature_list: yield siginfo def add_siginfo(self, siginfo): self.signature_list.append(siginfo) return siginfo def remove_siginfo(self, siginfo): self.signature_list.remove(siginfo) def clear(self): self.signature_list = [] def generate_local_id(self): return str(uuid.uuid4()) def lookup_local_id(self, local_id): if local_id is None: return None for siginfo in self.signature_list: if siginfo.local_id == local_id: return siginfo return None def match_signatures(self, pat, criteria='exact', xml_info=SEFaultSignature._xml_info): match_targets = xml_info.keys() exact = False if criteria == 'exact': exact = True elif type(criteria) is FloatType: num_match_targets = len(match_targets) score_per_match_target = 1.0 / num_match_targets else: raise ValueError("unknown criteria = %s" % criteria) matches = [] for siginfo in self.signature_list: score = 0.0 sig = siginfo.sig for name in match_targets: if getattr(pat, name) == getattr(sig, name): if exact: score = 1.0 else: score += score_per_match_target else: if exact: score = 0.0 break if exact: if score == 1.0: matches.append(SignatureMatch(siginfo, score)) else: if score >= criteria: matches.append(SignatureMatch(siginfo, score)) matches.sort((lambda a,b: cmp(b.score, a.score))) return matches class SEDatabaseProperties(XmlSerialize): _xml_info = { 'name' : {'XMLForm':'element' }, 'friendly_name' : {'XMLForm':'element' }, 'filepath' : {'XMLForm':'element' }, } def __init__(self, name=None, friendly_name=None, filepath=None): super(SEDatabaseProperties, self).__init__() if name is not None: self.name = name if friendly_name is not None: self.friendly_name = friendly_name if filepath is not None: self.filepath = filepath class SEEmailRecipient(XmlSerialize): _xml_info = { 'address' : {'XMLForm':'element' }, 'filter_type' : {'XMLForm':'element', 'import_typecast':int, 'default':lambda: FILTER_AFTER_FIRST }, } def __init__(self, address, filter_type=None): super(SEEmailRecipient, self).__init__() self.address = address if filter_type is not None: self.filter_type = filter_type def __str__(self): return "%s:%s" % (self.address, map_filter_value_to_name.get(self.filter_type, 'unknown')) class SEEmailRecipientSet(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '1' }, 'recipient_list' : {'XMLForm':'element', 'list':'recipient', 'import_typecast':SEEmailRecipient, }, } def __init__(self, recipient_list=None): super(SEEmailRecipientSet, self).__init__() if recipient_list is not None: self.recipient_list = recipient_list def __str__(self): return ','.join([str(x) for x in self.recipient_list]) def find_address(self, address): address = address.strip() for recipient in self.recipient_list: if address == recipient.address: return recipient return None def add_address(self, address, filter_type=FILTER_AFTER_FIRST): address = address.strip() if not valid_email_address(address): raise ProgramError(ERR_INVALID_EMAIL_ADDR, detail="address='%s'" % address) return recipient = self.find_address(address) if recipient is not None: return self.recipient_list.append(SEEmailRecipient(address, filter_type)) def clear_recipient_list(self): self.recipient_list = [] def parse_recipient_file(self, filepath): import re comment_re = re.compile('#.*') entry_re = re.compile('(\S+)(\s+(.+))?') key_value_re = re.compile("(\w+)\s*=\s*(\S+)") map_boolean = {'enabled' : True, 'true' : True, 'yes' : True, 'on' : True, 'disabled' : False, 'false' : False, 'no' : False, 'off' : False, } try: f = open(filepath) except IOError, e: raise ProgramError(ERR_FILE_OPEN, detail="%s, %s" % (filepath, e.strerror)) self.clear_recipient_list() for line in f.readlines(): line = comment_re.sub('', line) line = line.strip() if line: match = entry_re.search(line) if match: address = match.group(1) options = match.group(3) filter_type = None if options: for match in key_value_re.finditer(options): option = match.group(1) value = match.group(2) if option == 'filter_type': filter_type = map_filter_name_to_value.get(value.lower(), None) if filter_type is None: log_debug("unknown email filter (%s) for address %s" % (option, address)) else: log_debug("unknown email option (%s) for address %s" % (option, address)) try: self.add_address(address, filter_type) except ProgramError, e: if e.errno == ERR_INVALID_EMAIL_ADDR: log_debug(e.strerror) else: raise e f.close() def write_recipient_file(self, filepath): try: f = open(filepath, 'w') except IOError, e: raise ProgramError(ERR_FILE_OPEN, detail="%s, %s" % (filepath, e.strerror)) for recipient in self.recipient_list: filter_type = map_filter_value_to_name[recipient.filter_type] f.write("%-40s filter_type=%s\n" % (recipient.address, filter_type)) f.close() #------------------------------------------------------------------------ if __name__ == '__main__': import libxml2 #memory debug specific libxml2.debugMemory(1) xml_file = 'audit_listener_database.xml' sigs = SEFaultSignatureSet() sigs.read_xml_file(xml_file, 'sigs') siginfo = sigs.signature_list[0] record = siginfo.audit_event.records[0] print record.record_type print "siginfo.audit_event=%s" % siginfo.audit_event print sigs #memory debug specific libxml2.cleanupParser() if libxml2.debugMemory(1) == 0: print "Memory OK" else: print "Memory leak %d bytes" % (libxml2.debugMemory(1)) libxml2.dumpMemory()
./CrossVul/dataset_final_sorted/CWE-77/py/good_5077_3
crossvul-python_data_bad_5043_1
# Authors: John Dennis <jdennis@redhat.com> # Thomas Liu <tliu@redhat.com> # Dan Walsh <dwalsh@redhat.com> # # Copyright (C) 2006-2010 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # import syslog from subprocess import * import setroubleshoot.default_encoding_utf8 import gettext translation=gettext.translation('setroubleshoot-plugins', fallback=True) _=translation.ugettext __all__ = [ 'SignatureMatch', 'SEFilter', 'SEFaultSignature', 'SEFaultSignatureInfo', 'SEFaultSignatureSet', 'SEFaultSignatureUser', 'SEEnvironment', 'SEDatabaseProperties', 'SEFaultUserInfo', 'SEFaultUserSet', 'SEPlugin', 'SEEmailRecipient', 'SEEmailRecipientSet', 'FILTER_NEVER', 'FILTER_ALWAYS', 'FILTER_AFTER_FIRST', 'filter_text' ] if __name__ == "__main__": import gettext from setroubleshoot.config import parse_config_setting, get_config gettext.install(domain = get_config('general', 'i18n_text_domain'), localedir = get_config('general', 'i18n_locale_dir')) from gettext import ngettext as P_ from setroubleshoot.config import get_config from setroubleshoot.errcode import * from setroubleshoot.util import * from setroubleshoot.xml_serialize import * from setroubleshoot.html_util import * import setroubleshoot.uuid as uuid from setroubleshoot.audit_data import * import hashlib from types import * from string import Template import re, os # Don't reuse the numeric values! FILTER_NEVER = 0 FILTER_ALWAYS = 4 FILTER_AFTER_FIRST = 8 filter_text = { FILTER_NEVER : _("Never Ignore"), FILTER_ALWAYS : _("Ignore Always"), FILTER_AFTER_FIRST : _("Ignore After First Alert"), } map_filter_value_to_name = { FILTER_NEVER : 'never', FILTER_ALWAYS : 'always', FILTER_AFTER_FIRST : 'after_first', } map_filter_name_to_value = { 'never' : FILTER_NEVER, 'always' : FILTER_ALWAYS, 'after_first' : FILTER_AFTER_FIRST, } #------------------------------------------------------------------------ class SignatureMatch(object): def __init__(self, siginfo, score): self.siginfo = siginfo self.score = score class SEEnvironment(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '1.0' }, 'platform' : {'XMLForm':'element' }, 'kernel' : {'XMLForm':'element' }, 'policy_type' : {'XMLForm':'element' }, 'policy_rpm' : {'XMLForm':'element' }, 'enforce' : {'XMLForm':'element' }, 'selinux_enabled' : {'XMLForm':'element', 'import_typecast':boolean, }, 'selinux_mls_enabled' : {'XMLForm':'element', 'import_typecast':boolean, }, 'policyvers' : {'XMLForm':'element' }, 'hostname' : {'XMLForm':'element' }, 'uname' : {'XMLForm':'element' }, } def __init__(self): super(SEEnvironment, self).__init__() self.update() def update(self): import platform import selinux # security_getenforce is the same as the getenforce command. # selinux_getenforcemode tells you what is set in /etc/selinux/config self.platform, self.kernel = get_os_environment() self.policy_type = selinux.selinux_getpolicytype()[1] self.policy_rpm = get_rpm_nvr_by_name("selinux-policy") self.policyvers = str(selinux.security_policyvers()) enforce = selinux.security_getenforce() if enforce == 0: self.enforce = "Permissive" else: self.enforce = "Enforcing" self.selinux_enabled = bool(selinux.is_selinux_enabled()) self.selinux_mls_enabled = bool(selinux.is_selinux_mls_enabled()) self.hostname = platform.node() self.uname = " ".join(platform.uname()) def __ne__(self, other): return not self.__eq__(other) def __eq__(self, other): for name in self._xml_info.keys(): if getattr(self, name) != getattr(other, name): return False return True class SEFilter(XmlSerialize): _xml_info = { 'filter_type' : {'XMLForm':'element', 'import_typecast':int, 'default':lambda: FILTER_NEVER }, 'count' : {'XMLForm':'element', 'import_typecast':int, 'default':lambda: 0 }, } def __init__(self, filter_type=FILTER_NEVER): super(SEFilter, self).__init__() self.filter_type = filter_type class SEFaultSignatureUser(XmlSerialize): _xml_info = { 'username' : {'XMLForm':'attribute' }, 'seen_flag' : {'XMLForm':'attribute', 'import_typecast':boolean, 'default': lambda: False }, 'delete_flag' : {'XMLForm':'attribute', 'import_typecast':boolean, 'default': lambda: False }, 'filter' : {'XMLForm':'element', 'import_typecast':SEFilter, 'default': lambda: SEFilter() }, } def __init__(self, username): super(SEFaultSignatureUser, self).__init__() self.username = username def update_item(self, item, data): if not item in self._names: raise ProgramError(ERR_NOT_MEMBER, 'item (%s) is not a defined member' % item) if item == 'username': raise ProgramError(ERR_ILLEGAL_USER_CHANGE, 'changing the username is illegal') setattr(self, item, data) def update_filter(self, filter_type, data=None): log_debug("update_filter: filter_type=%s data=%s" % (map_filter_value_to_name.get(filter_type, 'unknown'), data)) if filter_type == FILTER_NEVER or \ filter_type == FILTER_AFTER_FIRST or \ filter_type == FILTER_ALWAYS: log_debug("update_filter: !!!") self.filter = SEFilter(filter_type=filter_type) return True else: raise ValueError("Bad filter_type (%s)" % filter_type) class_dict = {} class_dict['dir'] = _("directory") class_dict['sem'] = _("semaphore") class_dict['shm'] = _("shared memory") class_dict['msgq'] = _("message queue") class_dict['msg'] = _("message") class_dict['file'] = _("file") class_dict['socket'] = _("socket") class_dict['process'] = _("process") class_dict['filesystem'] = _("filesystem") class_dict['node'] = _("node") class_dict['capability'] = _("capability") def translate_class(tclass): if tclass in class_dict.keys(): return class_dict[tclass] return tclass # -- class AttributeValueDictionary(XmlSerialize): _xml_info = 'unstructured' def __init__(self): super(AttributeValueDictionary, self).__init__() class SEFaultSignature(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '4.0', }, 'host' : {'XMLForm':'element', }, 'access' : {'XMLForm':'element', 'list':'operation', }, 'scontext' : {'XMLForm':'element', 'import_typecast':AvcContext }, 'tcontext' : {'XMLForm':'element', 'import_typecast':AvcContext }, 'tclass' : {'XMLForm':'element', }, 'port' : {'XMLForm':'element', 'import_typecast':int, }, } def __init__(self, **kwds): super(SEFaultSignature, self).__init__() for k,v in kwds.items(): setattr(self, k, v) class SEPlugin(XmlSerialize): _xml_info = { 'analysis_id' : {'XMLForm':'element'}, 'args' : {'XMLForm':'element', 'list':'arg', }, } def __init__(self, analysis_id, args): super(SEPlugin, self).__init__() self.analysis_id = analysis_id; self.args = args; def __str__(self): return str((self.analysis_id, self.args)) class SEFaultSignatureInfo(XmlSerialize): _xml_info = { 'plugin_list' : {'XMLForm':'element', 'list':'plugin', 'import_typecast':SEPlugin }, 'audit_event' : {'XMLForm':'element', 'import_typecast':AuditEvent }, 'source' : {'XMLForm':'element' }, 'spath' : {'XMLForm':'element' }, 'tpath' : {'XMLForm':'element' }, 'src_rpm_list' : {'XMLForm':'element', 'list':'rpm', }, 'tgt_rpm_list' : {'XMLForm':'element', 'list':'rpm', }, 'scontext' : {'XMLForm':'element', 'import_typecast':AvcContext }, 'tcontext' : {'XMLForm':'element', 'import_typecast':AvcContext }, 'tclass' : {'XMLForm':'element', }, 'port' : {'XMLForm':'element', 'import_typecast':int, }, 'sig' : {'XMLForm':'element', 'import_typecast':SEFaultSignature }, 'if_text' : {'XMLForm':'element' }, 'then_text' : {'XMLForm':'element' }, 'do_text' : {'XMLForm':'element' }, 'environment' : {'XMLForm':'element', 'import_typecast':SEEnvironment }, 'first_seen_date' : {'XMLForm':'element', 'import_typecast':TimeStamp }, 'last_seen_date' : {'XMLForm':'element', 'import_typecast':TimeStamp }, 'report_count' : {'XMLForm':'element', 'import_typecast':int, 'default':lambda: 0 }, 'local_id' : {'XMLForm':'element' }, 'users' : {'XMLForm':'element', 'list':'user', 'import_typecast':SEFaultSignatureUser, }, 'level' : {'XMLForm':'element' }, 'fixable' : {'XMLForm':'element' }, 'button_text' : {'XMLForm':'element' }, } merge_include = ['audit_event', 'tpath', 'src_rpm_list', 'tgt_rpm_list', 'scontext', 'tcontext', 'tclass', 'port', 'environment', 'last_seen_date' ] def __init__(self, **kwds): super(SEFaultSignatureInfo, self).__init__() for k,v in kwds.items(): setattr(self, k, v) self.report_count = 1 self.plugin_list = [] def update_merge(self, siginfo): if siginfo.last_seen_date != self.last_seen_date: self.last_seen_date = siginfo.last_seen_date self.report_count += 1 for name in self.merge_include: setattr(self, name, getattr(siginfo, name)) def get_policy_rpm(self): return self.environment.policy_rpm; def get_hash_str(self): return "%s,%s,%s,%s,%s" % (self.source, self.scontext.type, self.tcontext.type, self.tclass, ",".join(self.sig.access)) def get_hash(self): hash = hashlib.sha256(self.get_hash_str()) return hash.hexdigest() def get_user_data(self, username): for user in self.users: if user.username == username: return user log_debug("new SEFaultSignatureUser for %s" % username) user = SEFaultSignatureUser(username) self.users.append(user) return user def find_filter_by_username(self, username): log_debug("find_filter_by_username %s" % username) filter = None user_data = self.get_user_data(username) if user_data is not None: filter = user_data.filter return filter def update_user_filter(self, username, filter_type, data=None): user_data = self.get_user_data(username) user_data.update_filter(filter_type, data) def evaluate_filter_for_user(self, username, filter_type=None): action = 'display' f = self.find_filter_by_username(username) log_debug("evaluate_filter_for_user: found %s user's filter = %s" % (username, f)) if f is not None: if filter_type is not None: f.filter_type = filter_type action = self.evaluate_filter(f) log_debug("evaluate_filter_for_user: found filter for %s: %s\n%s" % (username, action, f)) return action def evaluate_filter(self, filter): filter_type = filter.filter_type action = 'display' if filter_type == FILTER_NEVER: action = 'display' elif filter_type == FILTER_AFTER_FIRST: if filter.count == 0: action = 'display' else: action = 'ignore' elif filter_type == FILTER_ALWAYS: action = 'ignore' else: raise ValueError("unknown filter_type (%s)" % (filter_type)) filter.count += 1 return action def format_rpm_list(self, rpm_list): if isinstance(rpm_list, list): if len(rpm_list) > 0: return " ".join(rpm_list) else: return "" else: return default_text(None) def format_target_object(self): return "%s [ %s ]" % (self.tpath, self.tclass) def description_adjusted_for_permissive(self): permissive_msg = None syscall_record = self.audit_event.get_record_of_type('SYSCALL') if syscall_record != None and syscall_record.get_field('success') == 'yes': permissive_msg = _("%s has a permissive type (%s). This access was not denied.") % (self.source, self.scontext.type) if self.environment.enforce == "Permissive": permissive_msg = _("SELinux is in permissive mode. This access was not denied.") def update_derived_template_substitutions(self): self.template_substitutions = {} self.template_substitutions["SOURCE_TYPE"] = self.scontext.type self.template_substitutions["TARGET_TYPE"] = self.tcontext.type self.template_substitutions["SOURCE"] = self.source self.template_substitutions["SOURCE_PATH"] = self.spath self.template_substitutions["SOURCE_BASE_PATH"] = os.path.basename(self.spath) if self.spath: self.template_substitutions["FIX_SOURCE_PATH"] = re.sub(" ",".",self.spath) else: self.spath = _("N/A") self.template_substitutions["TARGET_PATH"] = self.tpath self.template_substitutions["TARGET_BASE_PATH"] = os.path.basename(self.tpath) if self.tpath: self.template_substitutions["FIX_TARGET_PATH"] = re.sub(" ",".",self.tpath) if self.tpath is None: self.template_substitutions["TARGET_DIR"] = None else: if self.tclass == 'dir': self.template_substitutions["TARGET_DIR"] = self.tpath elif self.tclass == 'file': self.template_substitutions["TARGET_DIR"] = os.path.dirname(self.tpath) else: self.template_substitutions["TARGET_DIR"] = None if self.tclass == "dir": self.template_substitutions["TARGET_CLASS"] = "directory" else: self.template_substitutions["TARGET_CLASS"] = self.tclass if self.sig.access is None: self.template_substitutions["ACCESS"] = None else: self.template_substitutions["ACCESS"] = ' '.join(self.sig.access) if len(self.src_rpm_list) > 0: self.template_substitutions["SOURCE_PACKAGE"] = self.src_rpm_list[0] self.template_substitutions["PORT_NUMBER"] = self.port # validate, replace any None values with friendly string for key, value in self.template_substitutions.items(): if value is None: self.template_substitutions[key] = default_text(value) def priority_sort(self, x, y): return cmp(y[0].priority,x[0].priority) def summary(self): if self.tclass == "process": return P_(_("SELinux is preventing %s from using the %s access on a process."), _("SELinux is preventing %s from using the '%s' accesses on a process."), len(self.sig.access)) % (self.spath, ", ".join(self.sig.access)) if self.tclass == "capability": return P_(_("SELinux is preventing %s from using the %s capability."), _("SELinux is preventing %s from using the '%s' capabilities."), len(self.sig.access)) % (self.spath, ", ".join(self.sig.access)) if self.tpath == "(null)": return P_(_("SELinux is preventing %s from %s access on the %s labeled %s."), _("SELinux is preventing %s from '%s' accesses on the %s labeled %s."), len(self.sig.access)) % (self.spath, ", ".join(self.sig.access), translate_class(self.tclass), self.tcontext.type) return P_(_("SELinux is preventing %s from %s access on the %s %s."), _("SELinux is preventing %s from '%s' accesses on the %s %s."), len(self.sig.access)) % (self.spath, ", ".join(self.sig.access), translate_class(self.tclass), self.tpath) def get_plugins(self, all = False): self.plugins = load_plugins() plugins = [] total_priority = 0 if all: for p in self.plugins: total_priority += p.priority plugins.append((p, ("allow_ypbind", "1"))) else: for solution in self.plugin_list: for p in self.plugins: if solution.analysis_id == p.analysis_id: total_priority += p.priority plugins.append((p, tuple(solution.args))) break plugins.sort(self.priority_sort) return total_priority, plugins def substitute(self, txt): return Template(txt).safe_substitute(self.template_substitutions) def format_details(self, replace=False): env = self.environment text = _("Additional Information:\n") text += format_2_column_name_value(_("Source Context"), self.scontext.format()) text += format_2_column_name_value(_("Target Context"), self.tcontext.format()) text += format_2_column_name_value(_("Target Objects"), self.format_target_object()) text += format_2_column_name_value(_("Source"), default_text(self.source)) text += format_2_column_name_value(_("Source Path"), default_text(self.spath)) text += format_2_column_name_value(_("Port"), default_text(self.port)) if (replace): text += format_2_column_name_value(_("Host"), "(removed)") else: text += format_2_column_name_value(_("Host"), default_text(self.sig.host)) text += format_2_column_name_value(_("Source RPM Packages"), default_text(self.format_rpm_list(self.src_rpm_list))) text += format_2_column_name_value(_("Target RPM Packages"), default_text(self.format_rpm_list(self.tgt_rpm_list))) text += format_2_column_name_value(_("Policy RPM"), default_text(env.policy_rpm)) text += format_2_column_name_value(_("Selinux Enabled"), default_text(env.selinux_enabled)) text += format_2_column_name_value(_("Policy Type"), default_text(env.policy_type)) text += format_2_column_name_value(_("Enforcing Mode"), default_text(env.enforce)) if replace: text += format_2_column_name_value(_("Host Name"),"(removed)") else: text += format_2_column_name_value(_("Host Name"), default_text(env.hostname)) if replace: uname = env.uname.split() uname[1] = "(removed)" text += format_2_column_name_value(_("Platform"), default_text(" ".join(uname))) else: text += format_2_column_name_value(_("Platform"), default_text(env.uname)) text += format_2_column_name_value(_("Alert Count"), default_text(self.report_count)) date_format = "%Y-%m-%d %H:%M:%S %Z" text += format_2_column_name_value(_("First Seen"), self.first_seen_date.format(date_format)) text += format_2_column_name_value(_("Last Seen"), self.last_seen_date.format(date_format)) text += format_2_column_name_value(_("Local ID"), default_text(self.local_id)) text += '\n' + _("Raw Audit Messages") avcbuf = "" for audit_record in self.audit_event.records: if audit_record.record_type == 'AVC': avcbuf += "\n" + audit_record.to_text() + "\n" else: avcbuf += "\ntype=%s msg=%s: " % (audit_record.record_type, audit_record.event_id) avcbuf += ' '.join(["%s=%s" % (k, audit_record.fields[k]) for k in audit_record.fields_ord]) +"\n" avcbuf += "\nHash: " + self.get_hash_str() try: audit2allow = "/usr/bin/audit2allow" if os.path.exist(audit2allow): newbuf = "\n\naudit2allow" p = Popen([audit2allow], shell=True,stdin=PIPE, stdout=PIPE) newbuf += p.communicate(avcbuf)[0] if os.path.exists("/var/lib/sepolgen/interface_info"): newbuf += "\naudit2allow -R" p = Popen(["%s -R" % audit2allow ], shell=True,stdin=PIPE, stdout=PIPE) newbuf += p.communicate(avcbuf)[0] avcbuf += newbuf except: pass text += avcbuf + '\n' return text def untranslated(self, func, *args, **kwargs): r'define.*untranslated\(.*\n' # Call the parameter function with the translations turned off # This function is not thread safe, since it manipulates globals global P_, _ saved_translateP_ = P_ saved_translate_ = _ try: P_ = lambda x,y,z: x if z > 1 else y _ = lambda x:x return func(*args, **kwargs) finally: P_ = saved_translateP_ _ = saved_translate_ def format_text(self, all = False, replace = False): self.update_derived_template_substitutions() text = self.summary() total_priority, plugins = self.get_plugins(all) for p, args in plugins: title = _("\n\n***** Plugin %s (%.4s confidence) suggests ") % (p.analysis_id, ((float(p.priority) / float(total_priority)) * 100 + .5)) text += title for i in range(len(title),80): text += _("*") text += _("\n") txt = self.substitute(p.get_if_text(self.audit_event.records, args)).decode('utf-8') text += _("\nIf ") + txt[0].lower() + txt[1:] txt = self.substitute(p.get_then_text(self.audit_event.records, args)).decode('utf-8') text += _("\nThen ") + txt[0].lower() + txt[1:] txt = self.substitute(p.get_do_text(self.audit_event.records, args)).decode('utf-8') text += _("\nDo\n") + txt[0].lower() + txt[1:] text += _('\n\n') return text class SEFaultUserInfo(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '1.0' }, 'username' : {'XMLForm':'attribute' }, 'email_alert' : {'XMLForm':'element', 'import_typecast':boolean, 'default': lambda: False }, 'email_address_list' : {'XMLForm':'element', 'list':'email_address', }, } def __init__(self, username): super(SEFaultUserInfo, self).__init__() self.username = username def add_email_address(self, email_address): if not email_address in self.email_address_list: self.email_address_list.append(email_address) class SEFaultUserSet(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '1.0' }, 'user_list' : {'XMLForm':'element', 'list':'user', 'import_typecast':SEFaultUserInfo, }, } def __init__(self): super(SEFaultUserSet, self).__init__() def get_user(self, username): for user in self.user_list: if username == user.username: return user return None def add_user(self, username): if self.get_user(username) is not None: return user = SEFaultUserInfo(username) self.user_list.append(user) return user class SEFaultSignatureSet(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '%d.%d' % (DATABASE_MAJOR_VERSION, DATABASE_MINOR_VERSION)}, 'users' : {'XMLForm':'element', 'import_typecast':SEFaultUserSet, 'default': lambda: SEFaultUserSet() }, 'signature_list' : {'XMLForm':'element', 'list':'siginfo', 'import_typecast':SEFaultSignatureInfo, }, } def __init__(self): super(SEFaultSignatureSet, self).__init__() def siginfos(self): for siginfo in self.signature_list: yield siginfo def add_siginfo(self, siginfo): self.signature_list.append(siginfo) return siginfo def remove_siginfo(self, siginfo): self.signature_list.remove(siginfo) def clear(self): self.signature_list = [] def generate_local_id(self): return str(uuid.uuid4()) def lookup_local_id(self, local_id): if local_id is None: return None for siginfo in self.signature_list: if siginfo.local_id == local_id: return siginfo return None def match_signatures(self, pat, criteria='exact', xml_info=SEFaultSignature._xml_info): match_targets = xml_info.keys() exact = False if criteria == 'exact': exact = True elif type(criteria) is FloatType: num_match_targets = len(match_targets) score_per_match_target = 1.0 / num_match_targets else: raise ValueError("unknown criteria = %s" % criteria) matches = [] for siginfo in self.signature_list: score = 0.0 sig = siginfo.sig for name in match_targets: if getattr(pat, name) == getattr(sig, name): if exact: score = 1.0 else: score += score_per_match_target else: if exact: score = 0.0 break if exact: if score == 1.0: matches.append(SignatureMatch(siginfo, score)) else: if score >= criteria: matches.append(SignatureMatch(siginfo, score)) matches.sort((lambda a,b: cmp(b.score, a.score))) return matches class SEDatabaseProperties(XmlSerialize): _xml_info = { 'name' : {'XMLForm':'element' }, 'friendly_name' : {'XMLForm':'element' }, 'filepath' : {'XMLForm':'element' }, } def __init__(self, name=None, friendly_name=None, filepath=None): super(SEDatabaseProperties, self).__init__() if name is not None: self.name = name if friendly_name is not None: self.friendly_name = friendly_name if filepath is not None: self.filepath = filepath class SEEmailRecipient(XmlSerialize): _xml_info = { 'address' : {'XMLForm':'element' }, 'filter_type' : {'XMLForm':'element', 'import_typecast':int, 'default':lambda: FILTER_AFTER_FIRST }, } def __init__(self, address, filter_type=None): super(SEEmailRecipient, self).__init__() self.address = address if filter_type is not None: self.filter_type = filter_type def __str__(self): return "%s:%s" % (self.address, map_filter_value_to_name.get(self.filter_type, 'unknown')) class SEEmailRecipientSet(XmlSerialize): _xml_info = { 'version' : {'XMLForm':'attribute','default':lambda: '1' }, 'recipient_list' : {'XMLForm':'element', 'list':'recipient', 'import_typecast':SEEmailRecipient, }, } def __init__(self, recipient_list=None): super(SEEmailRecipientSet, self).__init__() if recipient_list is not None: self.recipient_list = recipient_list def __str__(self): return ','.join([str(x) for x in self.recipient_list]) def find_address(self, address): address = address.strip() for recipient in self.recipient_list: if address == recipient.address: return recipient return None def add_address(self, address, filter_type=FILTER_AFTER_FIRST): address = address.strip() if not valid_email_address(address): raise ProgramError(ERR_INVALID_EMAIL_ADDR, detail="address='%s'" % address) return recipient = self.find_address(address) if recipient is not None: return self.recipient_list.append(SEEmailRecipient(address, filter_type)) def clear_recipient_list(self): self.recipient_list = [] def parse_recipient_file(self, filepath): import re comment_re = re.compile('#.*') entry_re = re.compile('(\S+)(\s+(.+))?') key_value_re = re.compile("(\w+)\s*=\s*(\S+)") map_boolean = {'enabled' : True, 'true' : True, 'yes' : True, 'on' : True, 'disabled' : False, 'false' : False, 'no' : False, 'off' : False, } try: f = open(filepath) except IOError, e: raise ProgramError(ERR_FILE_OPEN, detail="%s, %s" % (filepath, e.strerror)) self.clear_recipient_list() for line in f.readlines(): line = comment_re.sub('', line) line = line.strip() if line: match = entry_re.search(line) if match: address = match.group(1) options = match.group(3) filter_type = None if options: for match in key_value_re.finditer(options): option = match.group(1) value = match.group(2) if option == 'filter_type': filter_type = map_filter_name_to_value.get(value.lower(), None) if filter_type is None: log_debug("unknown email filter (%s) for address %s" % (option, address)) else: log_debug("unknown email option (%s) for address %s" % (option, address)) try: self.add_address(address, filter_type) except ProgramError, e: if e.errno == ERR_INVALID_EMAIL_ADDR: log_debug(e.strerror) else: raise e f.close() def write_recipient_file(self, filepath): try: f = open(filepath, 'w') except IOError, e: raise ProgramError(ERR_FILE_OPEN, detail="%s, %s" % (filepath, e.strerror)) for recipient in self.recipient_list: filter_type = map_filter_value_to_name[recipient.filter_type] f.write("%-40s filter_type=%s\n" % (recipient.address, filter_type)) f.close() #------------------------------------------------------------------------ if __name__ == '__main__': import libxml2 #memory debug specific libxml2.debugMemory(1) xml_file = 'audit_listener_database.xml' sigs = SEFaultSignatureSet() sigs.read_xml_file(xml_file, 'sigs') siginfo = sigs.signature_list[0] record = siginfo.audit_event.records[0] print record.record_type print "siginfo.audit_event=%s" % siginfo.audit_event print sigs #memory debug specific libxml2.cleanupParser() if libxml2.debugMemory(1) == 0: print "Memory OK" else: print "Memory leak %d bytes" % (libxml2.debugMemory(1)) libxml2.dumpMemory()
./CrossVul/dataset_final_sorted/CWE-77/py/bad_5043_1
crossvul-python_data_bad_5042_0
# # Copyright (C) 2006-2011 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # import selinux from stat import * import gettext translation=gettext.translation('setroubleshoot-plugins', fallback=True) _=translation.gettext from setroubleshoot.util import * from setroubleshoot.Plugin import Plugin class plugin(Plugin): summary =_(''' SELinux is preventing $SOURCE_PATH from loading $TARGET_PATH which requires text relocation. ''') problem_description = _(''' The $SOURCE application attempted to load $TARGET_PATH which requires text relocation. This is a potential security problem. Most libraries do not need this permission. Libraries are sometimes coded incorrectly and request this permission. The <a href="http://people.redhat.com/drepper/selinux-mem.html">SELinux Memory Protection Tests</a> web page explains how to remove this requirement. You can configure SELinux temporarily to allow $TARGET_PATH to use relocation as a workaround, until the library is fixed. Please file a bug report. ''') unsafe_problem_description = _(''' The $SOURCE application attempted to load $TARGET_PATH which requires text relocation. This is a potential security problem. Most libraries should not need this permission. The <a href="http://people.redhat.com/drepper/selinux-mem.html"> SELinux Memory Protection Tests</a> web page explains this check. This tool examined the library and it looks like it was built correctly. So setroubleshoot can not determine if this application is compromized or not. This could be a serious issue. Your system may very well be compromised. Contact your security administrator and report this issue. ''') unsafe_fix_description = "Contact your security administrator and report this issue." fix_description = _(''' If you trust $TARGET_PATH to run correctly, you can change the file context to textrel_shlib_t. "chcon -t textrel_shlib_t '$TARGET_PATH'" You must also change the default file context files on the system in order to preserve them even on a full relabel. "semanage fcontext -a -t textrel_shlib_t '$FIX_TARGET_PATH'" ''') unsafe_then_text = """ setroubleshoot examined '$FIX_TARGET_PATH' to make sure it was built correctly, but can not determine if this application has been compromized. This alert could be a serious issue and your system could be compromised. """ unsafe_do_text = "Contact your security administrator and report this issue." then_text = "You need to change the label on '$FIX_TARGET_PATH'" do_text = """# semanage fcontext -a -t textrel_shlib_t '$FIX_TARGET_PATH' # restorecon -v '$FIX_TARGET_PATH'""" def get_then_text(self, avc, args): if len(args) > 0: return self.unsafe_then_text return self.then_text def get_do_text(self, avc, args): if len(args) > 0: return self.unsafe_do_text return self.do_text def __init__(self): Plugin.__init__(self,__name__) self.set_priority(10) def analyze(self, avc): import commands if avc.has_any_access_in(['execmod']): # MATCH if (commands.getstatusoutput("eu-readelf -d %s | fgrep -q TEXTREL" % avc.tpath)[0] == 1): return self.report(("unsafe")) mcon = selinux.matchpathcon(avc.tpath.strip('"'), S_IFREG)[1] if mcon.split(":")[2] == "lib_t": return self.report() return None
./CrossVul/dataset_final_sorted/CWE-77/py/bad_5042_0
crossvul-python_data_good_5860_0
# -*- coding: utf-8 -*- #Canto-curses - ncurses RSS reader # Copyright (C) 2014 Jack Miller <jack@codezen.org> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. from canto_next.hooks import on_hook from canto_next.plugins import Plugin from canto_next.remote import assign_to_dict, access_dict from .command import CommandHandler, register_commands, register_arg_types, unregister_all, _string, register_aliases, commands, command_help from .tagcore import tag_updater from .parser import prep_for_display from .config import needs_eval import logging log = logging.getLogger("COMMON") import subprocess import tempfile import urllib.request, urllib.error, urllib.parse import shlex import sys import os import os.path class BasePlugin(Plugin): pass class GuiBase(CommandHandler): def init(self): args = { "key": ("[key]: Simple keys (a), basic chords (C-r, M-a), or named whitespace like space or tab", _string), "command": ("[command]: Any canto-curses command. (Will show current binding if not given)\n Simple: goto\n Chained: foritems \\\\& goto \\\\& item-state read \\\\& clearitems \\\\& next-item", self.type_unescape_command), "remote-cmd": ("[remote cmd]", self.type_remote_cmd), "url" : ("[URL]", _string), "help-command" : ("[help-command]: Any canto-curses command, if blank, 'any' or unknown, will display help overview", self.type_help_cmd), "config-option" : ("[config-option]: Any canto-curses option", self.type_config_option), "executable" : ("[executable]: A program in your PATH", self.type_executable), } cmds = { "bind" : (self.cmd_bind, [ "key", "command" ], "Add or query %s keybinds" % self.get_opt_name()), "transform" : (self.cmd_transform, ["string"], "Set user transform"), "remote addfeed" : (lambda x : self.cmd_remote("addfeed", x), ["url"], "Subscribe to a feed"), "remote listfeeds" : (lambda : self.cmd_remote("listfeeds", ""), [], "List feeds"), "remote": (self.cmd_remote, ["remote-cmd", "string"], "Give a command to canto-remote"), "destroy": (self.cmd_destroy, [], "Destroy this %s" % self.get_opt_name()), "set" : (self.cmd_set, ["config-option", "string"], "Set configuration options"), "set browser.path" : (lambda x : self.cmd_set("browser.path", x), ["executable"], "Set desired browser"), } help_cmds = { "help" : (self.cmd_help, ["help-command"], "Get help on a specific command") } aliases = { "add" : "remote addfeed", "del" : "remote delfeed", "list" : "remote listfeeds", # Compatibility / evaluation aliases "set global_transform" : "set defaults.global_transform", "set keep_time" : "set defaults.keep_time", "set keep_unread" : "set defaults.keep_unread", "set browser " : "set browser.path ", "set txt_browser " : "set browser.text ", "set update.auto " : "set update.auto.enabled ", "set border" : "set taglist.border", "filter" : "transform", "sort" : "transform", "next-item" : "rel-set-cursor 1", "prev-item" : "rel-set-cursor -1", } register_arg_types(self, args) register_commands(self, cmds, "Base") register_commands(self, help_cmds, "Help") register_aliases(self, aliases) self.editor = None self.plugin_class = BasePlugin self.update_plugin_lookups() def cmd_destroy(self): self.callbacks["die"](self) def die(self): unregister_all(self) # Provide completions, but we don't care to verify settings. def type_executable(self): executables = [] for path_dir in os.environ["PATH"].split(os.pathsep): for f in os.listdir(path_dir): fullpath = os.path.join(path_dir, f) if os.path.isfile(fullpath) and os.access(fullpath, os.X_OK): executables.append(f) return (executables, lambda x : (True, x)) def _fork(self, path, href, text, fetch=False): # Prepare temporary files, if fetch. if fetch: # Get a path (sans query strings, etc.) for the URL tmppath = urllib.parse.urlparse(href).path # Return just the basename of the path (no directories) fname = os.path.basename(tmppath) # Grab a temporary directory. This allows us to create a file with # an unperturbed filename so scripts can freely use regex / # extension matching in addition to mimetype detection. tmpdir = tempfile.mkdtemp(prefix="canto-") tmpnam = tmpdir + '/' + fname on_hook("curses_exit", lambda : (os.unlink(tmpnam))) on_hook("curses_exit", lambda : (os.rmdir(tmpdir))) pid = os.fork() # Parents can now bail. if pid: return pid if fetch: tmp = open(tmpnam, 'w+b') # Grab the HTTP info / prepare to read. response = urllib.request.urlopen(href) # Grab in kilobyte chunks to avoid wasting memory on something # that's going to be immediately written to disk. while True: r = response.read(1024) if not r: break tmp.write(r) response.close() tmp.close() href = tmpnam # Make sure that we quote href such that malicious URLs like # "http://example.com & rm -rf ~/" won't be interpreted by the shell. href = shlex.quote(href) # A lot of programs don't appreciate # having their fds closed, so instead # we dup them to /dev/null. fd = os.open("/dev/null", os.O_RDWR) os.dup2(fd, sys.stderr.fileno()) if not text: os.setpgid(os.getpid(), os.getpid()) os.dup2(fd, sys.stdout.fileno()) if "%u" in path: path = path.replace("%u", href) elif href: path = path + " " + href os.execv("/bin/sh", ["/bin/sh", "-c", path]) # Just in case. sys.exit(0) def _edit(self, text): if not self.editor: self.editor = os.getenv("EDITOR") if not self.editor: self.editor = self.input("editor: ") # No editor, or cancelled dialog, no change. if not self.editor: return text self.callbacks["pause_interface"]() # Setup tempfile to edit. fd, path = tempfile.mkstemp(text=True) f = os.fdopen(fd, "w") f.write(text) f.close() # Invoke editor logging.info("Invoking editor on %s" % path) pid = self._fork(self.editor + " %u", path, True) pid, status = os.waitpid(pid, 0) if status == 0: f = open(path, "r") r = f.read() f.close() else: self.callbacks["set_var"]("error_msg", "Editor failed! Status = %d" % (status,)) r = text # Cleanup temp file. os.unlink(path) self.callbacks["unpause_interface"]() return r def cmd_edit(self, **kwargs): t = self.callbacks["get_opt"](kwargs["opt"]) r = self._edit(t) log.info("Edited %s to %s" % (kwargs["opt"], r)) self.callbacks["set_opt"](kwargs["opt"], r) def type_remote_cmd(self): remote_cmds = [ "help", "addfeed", "listfeeds", "delfeed", "force-update", "config", "one-config", "export", "import", "kill" ] return (remote_cmds, lambda x : (x in remote_cmds, x)) def _remote_argv(self, argv): loc_args = self.callbacks["get_var"]("location") argv = [argv[0]] + loc_args + argv[1:] log.debug("Calling remote: %s" % argv) # check_output return bytes, we must decode. out = subprocess.check_output(argv).decode() log.debug("Output:") log.debug(out.rstrip()) # Strip anything that could be misconstrued as style # from remote output. out = out.replace("%","\\%") log.info(out.rstrip()) def _remote(self, args): args = "canto-remote " + args # Add location args, so the remote is connecting # to the correct daemon. self._remote_argv(shlex.split(args)) def remote_args(self, args): return self.string(args, "remote: ") def cmd_remote(self, remote_cmd, args): self._remote("%s %s" % (remote_cmd, args)) def _goto(self, urls, fetch=False): browser = self.callbacks["get_conf"]()["browser"] if not browser["path"]: log.error("No browser defined! Cannot goto.") return if browser["text"]: self.callbacks["pause_interface"]() for url in urls: pid = self._fork(browser["path"], url, browser["text"], fetch) if browser["text"]: os.waitpid(pid, 0) if browser["text"]: self.callbacks["unpause_interface"]() # Like goto, except download the file to /tmp before executing browser. def _fetch(self, urls): self._goto(urls, True) def cmd_transform(self, transform): tag_updater.transform("user", transform) tag_updater.reset(True) tag_updater.update() def type_unescape_command(self): def validate_uescape_command(x): # Change the escaped '&' from shlex into a raw & return (True, x.replace(" '&' ", " & ")) return (None, validate_uescape_command) def cmd_bind(self, key, cmd): self.bind(key, cmd.lstrip().rstrip(), True) def bind(self, key, cmd, overwrite=False): opt = self.get_opt_name() key = self.translate_key(key) c = self.callbacks["get_conf"]() if not cmd: if key in c[opt]["key"]: log.info("[%s] %s = %s" % (opt, key, c[opt]["key"][key])) return True else: return False else: if key in c[opt]["key"] and c[opt]["key"][key] and not overwrite: log.debug("%s already bound to %s" % (key, c[opt]["key"][key])) return False log.debug("Binding %s.%s to %s" % (opt, key, cmd)) c[opt]["key"][key] = cmd self.callbacks["set_conf"](c) return True def type_help_cmd(self): help_cmds = commands() def help_validator(x): if x in ["commands", "cmds"]: return (True, 'commands') for group in help_cmds: if x in help_cmds[group]: return (True, x) return (True, 'all') return (help_cmds, help_validator) def cmd_help(self, cmd): if self.callbacks["get_var"]("info_msg"): self.callbacks["set_var"]("info_msg", "") if cmd == 'all': log.info("%BHELP%b\n") log.info("This is a list of available keybinds.\n") log.info("For a list of commands, type ':help commands'\n") log.info("For help with a specific command, type ':help [command]'\n") log.info("%BBinds%b") config = self.callbacks["get_conf"]() for optname in [ "main", "taglist", "reader" ]: if "key" in config[optname] and list(config[optname]["key"].keys()) != []: maxbindl = max([ len(x) for x in config[optname]["key"].keys() ]) + 1 log.info("\n%B" + optname + "%b\n") for bind in sorted(config[optname]["key"]): bindeff = prep_for_display(bind + (" " * (maxbindl - len(bind)))) cmd = prep_for_display(config[optname]["key"][bind]) log.info("%s %s" % (bindeff, cmd)) elif cmd == 'commands': gc = commands() for group in sorted(gc.keys()): log.info("%B" + group + "%b\n") for c in sorted(gc[group]): log.info(command_help(c)) log.info("") else: log.info(command_help(cmd, True)) # Validate a single config option # Will offer completions for any recognized config option # Will *not* reject validly formatted options that don't already exist def _get_current_config_options(self, obj, stack): r = [] for item in obj.keys(): stack.append(item) if type(obj[item]) == dict: r.extend(self._get_current_config_options(obj[item], stack[:])) else: r.append(shlex.quote(".".join(stack))) stack = stack[:-1] return r def type_config_option(self): conf = self.callbacks["get_conf"]() possibles = self._get_current_config_options(conf, []) possibles.sort() return (possibles, lambda x : (True, x)) def cmd_set(self, opt, val): log.debug("SET: %s '%s'" % (opt, val)) evaluate = needs_eval(opt) if val != "" and evaluate: log.debug("Evaluating...") try: val = eval(val) except Exception as e: log.error("Couldn't eval '%s': %s" % (val, e)) return if opt.startswith("defaults."): conf = { "defaults" : self.callbacks["get_defaults"]() } if val != "": assign_to_dict(conf, opt, val) self.callbacks["set_defaults"](conf["defaults"]) elif opt.startswith("feed."): sel = self.callbacks["get_var"]("selected") if not sel: log.info("Feed settings only work with a selected item") return if sel.is_tag: try_tag = sel else: try_tag = sel.parent_tag if not try_tag.tag.startswith("maintag:"): log.info("Selection is in a user tag, cannot set feed settings") return name = try_tag.tag[8:] conf = { "feed" : self.callbacks["get_feed_conf"](name) } if val != "": assign_to_dict(conf, opt, val) self.callbacks["set_feed_conf"](name, conf["feed"]) elif opt.startswith("tag."): sel = self.callbacks["get_var"]("selected") if not sel: log.info("Tag settings only work with a selected item") return if sel.is_tag: tag = sel else: tag = sel.parent_tag conf = { "tag" : self.callbacks["get_tag_conf"](tag.tag) } if val != "": assign_to_dict(conf, opt, val) self.callbacks["set_tag_conf"](tag.tag, conf["tag"]) else: conf = self.callbacks["get_conf"]() if val != "": assign_to_dict(conf, opt, val) self.callbacks["set_conf"](conf) ok, val = access_dict(conf, opt) if not ok: log.error("Unknown option %s" % opt) log.error("Full conf: %s" % conf) else: log.info("%s = %s" % (opt, val))
./CrossVul/dataset_final_sorted/CWE-77/py/good_5860_0
crossvul-python_data_good_5042_0
# # Copyright (C) 2006-2011 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # import selinux from stat import * import gettext translation=gettext.translation('setroubleshoot-plugins', fallback=True) _=translation.gettext from setroubleshoot.util import * from setroubleshoot.Plugin import Plugin class plugin(Plugin): summary =_(''' SELinux is preventing $SOURCE_PATH from loading $TARGET_PATH which requires text relocation. ''') problem_description = _(''' The $SOURCE application attempted to load $TARGET_PATH which requires text relocation. This is a potential security problem. Most libraries do not need this permission. Libraries are sometimes coded incorrectly and request this permission. The <a href="http://people.redhat.com/drepper/selinux-mem.html">SELinux Memory Protection Tests</a> web page explains how to remove this requirement. You can configure SELinux temporarily to allow $TARGET_PATH to use relocation as a workaround, until the library is fixed. Please file a bug report. ''') unsafe_problem_description = _(''' The $SOURCE application attempted to load $TARGET_PATH which requires text relocation. This is a potential security problem. Most libraries should not need this permission. The <a href="http://people.redhat.com/drepper/selinux-mem.html"> SELinux Memory Protection Tests</a> web page explains this check. This tool examined the library and it looks like it was built correctly. So setroubleshoot can not determine if this application is compromized or not. This could be a serious issue. Your system may very well be compromised. Contact your security administrator and report this issue. ''') unsafe_fix_description = "Contact your security administrator and report this issue." fix_description = _(''' If you trust $TARGET_PATH to run correctly, you can change the file context to textrel_shlib_t. "chcon -t textrel_shlib_t '$TARGET_PATH'" You must also change the default file context files on the system in order to preserve them even on a full relabel. "semanage fcontext -a -t textrel_shlib_t '$FIX_TARGET_PATH'" ''') unsafe_then_text = """ setroubleshoot examined '$FIX_TARGET_PATH' to make sure it was built correctly, but can not determine if this application has been compromized. This alert could be a serious issue and your system could be compromised. """ unsafe_do_text = "Contact your security administrator and report this issue." then_text = "You need to change the label on '$FIX_TARGET_PATH'" do_text = """# semanage fcontext -a -t textrel_shlib_t '$FIX_TARGET_PATH' # restorecon -v '$FIX_TARGET_PATH'""" def get_then_text(self, avc, args): if len(args) > 0: return self.unsafe_then_text return self.then_text def get_do_text(self, avc, args): if len(args) > 0: return self.unsafe_do_text return self.do_text def __init__(self): Plugin.__init__(self,__name__) self.set_priority(10) def analyze(self, avc): import subprocess if avc.has_any_access_in(['execmod']): # MATCH # from https://docs.python.org/2.7/library/subprocess.html#replacing-shell-pipeline p1 = subprocess.Popen(['eu-readelf', '-d', avc.tpath], stdout=subprocess.PIPE) p2 = subprocess.Popen(["fgrep", "-q", "TEXTREL"], stdin=p1.stdout, stdout=subprocess.PIPE) p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits. p1.wait() p2.wait() if p2.returncode == 1: return self.report(("unsafe")) mcon = selinux.matchpathcon(avc.tpath.strip('"'), S_IFREG)[1] if mcon.split(":")[2] == "lib_t": return self.report() return None
./CrossVul/dataset_final_sorted/CWE-77/py/good_5042_0
crossvul-python_data_good_3373_0
#!/usr/bin/python # -*- coding: utf-8 -*- import sys import re REG_LINE_GPERF = re.compile('#line .+gperf"') REG_HASH_FUNC = re.compile('hash\s*\(register\s+const\s+char\s*\*\s*str,\s*register\s+unsigned\s+int\s+len\s*\)') REG_STR_AT = re.compile('str\[(\d+)\]') REG_UNFOLD_KEY = re.compile('unicode_unfold_key\s*\(register\s+const\s+char\s*\*\s*str,\s*register\s+unsigned\s+int\s+len\)') REG_ENTRY = re.compile('\{".+?",\s*/\*(.+?)\*/\s*(-?\d+),\s*(\d)\}') REG_EMPTY_ENTRY = re.compile('\{"",\s*(-?\d+),\s*(\d)\}') REG_IF_LEN = re.compile('if\s*\(\s*len\s*<=\s*MAX_WORD_LENGTH.+') REG_GET_HASH = re.compile('(?:register\s+)?(?:unsigned\s+)?int\s+key\s*=\s*hash\s*\(str,\s*len\);') REG_GET_CODE = re.compile('(?:register\s+)?const\s+char\s*\*\s*s\s*=\s*wordlist\[key\]\.name;') REG_CODE_CHECK = re.compile('if\s*\(\*str\s*==\s*\*s\s*&&\s*!strncmp.+\)') def parse_line(s): s = s.rstrip() r = re.sub(REG_LINE_GPERF, '', s) if r != s: return r r = re.sub(REG_HASH_FUNC, 'hash(OnigCodePoint codes[])', s) if r != s: return r r = re.sub(REG_STR_AT, 'onig_codes_byte_at(codes, \\1)', s) if r != s: return r r = re.sub(REG_UNFOLD_KEY, 'unicode_unfold_key(OnigCodePoint code)', s) if r != s: return r r = re.sub(REG_ENTRY, '{\\1, \\2, \\3}', s) if r != s: return r r = re.sub(REG_EMPTY_ENTRY, '{0xffffffff, \\1, \\2}', s) if r != s: return r r = re.sub(REG_IF_LEN, 'if (0 == 0)', s) if r != s: return r r = re.sub(REG_GET_HASH, 'int key = hash(&code);', s) if r != s: return r r = re.sub(REG_GET_CODE, 'OnigCodePoint gcode = wordlist[key].code;', s) if r != s: return r r = re.sub(REG_CODE_CHECK, 'if (code == gcode && wordlist[key].index >= 0)', s) if r != s: return r return s def parse_file(f): print "/* This file was converted by gperf_unfold_key_conv.py\n from gperf output file. */" line = f.readline() while line: s = parse_line(line) print s line = f.readline() # main parse_file(sys.stdin)
./CrossVul/dataset_final_sorted/CWE-787/py/good_3373_0
crossvul-python_data_bad_3373_0
#!/usr/bin/python # -*- coding: utf-8 -*- import sys import re REG_LINE_GPERF = re.compile('#line .+gperf"') REG_HASH_FUNC = re.compile('hash\s*\(register\s+const\s+char\s*\*\s*str,\s*register\s+unsigned\s+int\s+len\s*\)') REG_STR_AT = re.compile('str\[(\d+)\]') REG_UNFOLD_KEY = re.compile('unicode_unfold_key\s*\(register\s+const\s+char\s*\*\s*str,\s*register\s+unsigned\s+int\s+len\)') REG_ENTRY = re.compile('\{".+?",\s*/\*(.+?)\*/\s*(-?\d+),\s*(\d)\}') REG_EMPTY_ENTRY = re.compile('\{"",\s*(-?\d+),\s*(\d)\}') REG_IF_LEN = re.compile('if\s*\(\s*len\s*<=\s*MAX_WORD_LENGTH.+') REG_GET_HASH = re.compile('(?:register\s+)?(?:unsigned\s+)?int\s+key\s*=\s*hash\s*\(str,\s*len\);') REG_GET_CODE = re.compile('(?:register\s+)?const\s+char\s*\*\s*s\s*=\s*wordlist\[key\]\.name;') REG_CODE_CHECK = re.compile('if\s*\(\*str\s*==\s*\*s\s*&&\s*!strncmp.+\)') def parse_line(s): s = s.rstrip() r = re.sub(REG_LINE_GPERF, '', s) if r != s: return r r = re.sub(REG_HASH_FUNC, 'hash(OnigCodePoint codes[])', s) if r != s: return r r = re.sub(REG_STR_AT, 'onig_codes_byte_at(codes, \\1)', s) if r != s: return r r = re.sub(REG_UNFOLD_KEY, 'unicode_unfold_key(OnigCodePoint code)', s) if r != s: return r r = re.sub(REG_ENTRY, '{\\1, \\2, \\3}', s) if r != s: return r r = re.sub(REG_EMPTY_ENTRY, '{0xffffffff, \\1, \\2}', s) if r != s: return r r = re.sub(REG_IF_LEN, 'if (0 == 0)', s) if r != s: return r r = re.sub(REG_GET_HASH, 'int key = hash(&code);', s) if r != s: return r r = re.sub(REG_GET_CODE, 'OnigCodePoint gcode = wordlist[key].code;', s) if r != s: return r r = re.sub(REG_CODE_CHECK, 'if (code == gcode)', s) if r != s: return r return s def parse_file(f): print "/* This file was converted by gperf_unfold_key_conv.py\n from gperf output file. */" line = f.readline() while line: s = parse_line(line) print s line = f.readline() # main parse_file(sys.stdin)
./CrossVul/dataset_final_sorted/CWE-787/py/bad_3373_0
crossvul-python_data_bad_765_6
# -*- coding: utf-8 -*- from django.conf import settings from django.contrib.auth import login as django_login from django.contrib.auth import logout as django_logout from django.core.exceptions import ObjectDoesNotExist from django.utils.decorators import method_decorator from django.utils.translation import ugettext_lazy as _ from django.views.decorators.debug import sensitive_post_parameters from rest_framework import status from rest_framework.authtoken.models import Token from rest_framework.generics import GenericAPIView from rest_framework.permissions import AllowAny from rest_framework.response import Response from rest_framework.views import APIView from nopassword.rest import serializers class LoginView(GenericAPIView): serializer_class = serializers.LoginSerializer permission_classes = (AllowAny,) def post(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) serializer.save() return Response( {"detail": _("Login code has been sent.")}, status=status.HTTP_200_OK ) @method_decorator(sensitive_post_parameters('code'), 'dispatch') class LoginCodeView(GenericAPIView): permission_classes = (AllowAny,) serializer_class = serializers.LoginCodeSerializer token_serializer_class = serializers.TokenSerializer token_model = Token def process_login(self): django_login(self.request, self.user) def login(self): self.user = self.serializer.validated_data['user'] self.token, created = self.token_model.objects.get_or_create(user=self.user) if getattr(settings, 'REST_SESSION_LOGIN', True): self.process_login() def get_response(self): token_serializer = self.token_serializer_class( instance=self.token, context=self.get_serializer_context(), ) data = token_serializer.data data['next'] = self.serializer.validated_data['code'].next return Response(data, status=status.HTTP_200_OK) def post(self, request, *args, **kwargs): self.serializer = self.get_serializer(data=request.data) self.serializer.is_valid(raise_exception=True) self.serializer.save() self.login() return self.get_response() class LogoutView(APIView): permission_classes = (AllowAny,) def post(self, request, *args, **kwargs): return self.logout(request) def logout(self, request): try: request.user.auth_token.delete() except (AttributeError, ObjectDoesNotExist): pass django_logout(request) return Response( {"detail": _("Successfully logged out.")}, status=status.HTTP_200_OK, )
./CrossVul/dataset_final_sorted/CWE-522/py/bad_765_6
crossvul-python_data_bad_765_7
from django.db import models try: from django.contrib.auth.models import AbstractUser except ImportError: from django.db.models import Model as AbstractUser class CustomUser(AbstractUser): extra_field = models.CharField(max_length=2) new_username_field = models.CharField('userid', unique=True, max_length=20) USERNAME_FIELD = 'new_username_field' def save(self, *args, **kwargs): self.new_username_field = self.username super(CustomUser, self).save(*args, **kwargs) class PhoneNumberUser(CustomUser): phone_number = models.CharField(max_length=11, default="+15555555") class NoUsernameUser(models.Model): """User model without a "username" field for authentication backend testing """ pass
./CrossVul/dataset_final_sorted/CWE-522/py/bad_765_7
crossvul-python_data_good_765_8
# -*- coding: utf8 -*- import os import django DEBUG = True DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.environ.get('DB_NAME', ':memory:'), } } AUTH_USER_MODEL = 'tests.CustomUser' NOPASSWORD_LOGIN_CODE_TIMEOUT = 900 INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'rest_framework', 'rest_framework.authtoken', 'nopassword', 'tests', ] AUTHENTICATION_BACKENDS = ( 'nopassword.backends.EmailBackend', 'django.contrib.auth.backends.ModelBackend' ) TIME_ZONE = 'America/Chicago' LANGUAGE_CODE = 'en-us' STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) SECRET_KEY = 'supersecret' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages' ], }, }, ] MIDDLEWARE = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) if django.VERSION < (1, 10): MIDDLEWARE_CLASSES = MIDDLEWARE ROOT_URLCONF = 'tests.urls' EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.SessionAuthentication', 'rest_framework.authentication.TokenAuthentication', ), }
./CrossVul/dataset_final_sorted/CWE-522/py/good_765_8
crossvul-python_data_bad_765_3
# -*- coding: utf-8 -*- from django import forms from django.contrib.auth import authenticate, get_backends, get_user_model from django.contrib.sites.shortcuts import get_current_site from django.core.exceptions import ImproperlyConfigured from django.shortcuts import resolve_url from django.utils.translation import ugettext_lazy as _ from nopassword import models class LoginForm(forms.Form): error_messages = { 'invalid_username': _( "Please enter a correct %(username)s. " "Note that it is case-sensitive." ), 'inactive': _("This account is inactive."), } next = forms.CharField(max_length=200, required=False, widget=forms.HiddenInput) def __init__(self, *args, **kwargs): super(LoginForm, self).__init__(*args, **kwargs) self.username_field = get_user_model()._meta.get_field(get_user_model().USERNAME_FIELD) self.fields['username'] = self.username_field.formfield() def clean_username(self): username = self.cleaned_data['username'] try: user = get_user_model()._default_manager.get_by_natural_key(username) except get_user_model().DoesNotExist: raise forms.ValidationError( self.error_messages['invalid_username'], code='invalid_username', params={'username': self.username_field.verbose_name}, ) if not user.is_active: raise forms.ValidationError( self.error_messages['inactive'], code='inactive', ) self.cleaned_data['user'] = user return username def save(self, request, login_code_url='login_code', domain_override=None, extra_context=None): login_code = models.LoginCode.create_code_for_user( user=self.cleaned_data['user'], next=self.cleaned_data['next'], ) if not domain_override: current_site = get_current_site(request) site_name = current_site.name domain = current_site.domain else: site_name = domain = domain_override url = '{}://{}{}?code={}'.format( 'https' if request.is_secure() else 'http', domain, resolve_url(login_code_url), login_code.code, ) context = { 'domain': domain, 'site_name': site_name, 'code': login_code.code, 'url': url, } if extra_context: context.update(extra_context) self.send_login_code(login_code, context) return login_code def send_login_code(self, login_code, context, **kwargs): for backend in get_backends(): if hasattr(backend, 'send_login_code'): backend.send_login_code(login_code, context, **kwargs) break else: raise ImproperlyConfigured( 'Please add a nopassword authentication backend to settings, ' 'e.g. `nopassword.backends.EmailBackend`' ) class LoginCodeForm(forms.Form): code = forms.ModelChoiceField( label=_('Login code'), queryset=models.LoginCode.objects.select_related('user'), to_field_name='code', widget=forms.TextInput, error_messages={ 'invalid_choice': _('Login code is invalid. It might have expired.'), }, ) error_messages = { 'invalid_code': _("Unable to log in with provided login code."), } def __init__(self, request=None, *args, **kwargs): super(LoginCodeForm, self).__init__(*args, **kwargs) self.request = request def clean_code(self): code = self.cleaned_data['code'] username = code.user.get_username() user = authenticate(self.request, **{ get_user_model().USERNAME_FIELD: username, 'code': code.code, }) if not user: raise forms.ValidationError( self.error_messages['invalid_code'], code='invalid_code', ) self.cleaned_data['user'] = user return code def get_user(self): return self.cleaned_data.get('user') def save(self): self.cleaned_data['code'].delete()
./CrossVul/dataset_final_sorted/CWE-522/py/bad_765_3
crossvul-python_data_bad_765_4
404: Not Found
./CrossVul/dataset_final_sorted/CWE-522/py/bad_765_4
crossvul-python_data_bad_765_8
# -*- coding: utf8 -*- import django DEBUG = False DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:', } } AUTH_USER_MODEL = 'tests.CustomUser' NOPASSWORD_LOGIN_CODE_TIMEOUT = 900 INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'rest_framework', 'rest_framework.authtoken', 'nopassword', 'tests', ] AUTHENTICATION_BACKENDS = ( 'nopassword.backends.EmailBackend', 'django.contrib.auth.backends.ModelBackend' ) TIME_ZONE = 'America/Chicago' LANGUAGE_CODE = 'en-us' STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) SECRET_KEY = 'supersecret' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.contrib.auth.context_processors.auth', ], }, }, ] MIDDLEWARE = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) if django.VERSION < (1, 10): MIDDLEWARE_CLASSES = MIDDLEWARE ROOT_URLCONF = 'tests.urls' EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' REST_FRAMEWORK = { 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.SessionAuthentication', 'rest_framework.authentication.TokenAuthentication', ), }
./CrossVul/dataset_final_sorted/CWE-522/py/bad_765_8
crossvul-python_data_good_765_4
# Generated by Django 2.2 on 2019-04-06 13:22 import uuid from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('nopassword', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='logincode', name='code', ), migrations.AlterField( model_name='logincode', name='id', field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False), ), ]
./CrossVul/dataset_final_sorted/CWE-522/py/good_765_4
crossvul-python_data_good_765_2
# -*- coding: utf-8 -*- from datetime import timedelta from django.conf import settings from django.contrib.auth import get_user_model from django.contrib.auth.backends import ModelBackend from django.utils import timezone from nopassword.models import LoginCode class NoPasswordBackend(ModelBackend): def authenticate(self, request, username=None, code=None, **kwargs): if username is None: username = kwargs.get(get_user_model().USERNAME_FIELD) if not username or not code: return try: user = get_user_model()._default_manager.get_by_natural_key(username) if not self.user_can_authenticate(user): return timeout = getattr(settings, 'NOPASSWORD_LOGIN_CODE_TIMEOUT', 900) timestamp = timezone.now() - timedelta(seconds=timeout) # We don't delete the login code when authenticating, # as that is done during validation of the login form # and validation should not have any side effects. # It is the responsibility of the view/form to delete the token # as soon as the login was successful. for c in LoginCode.objects.filter(user=user, timestamp__gt=timestamp): if c.code == code: user.login_code = c return user return except (get_user_model().DoesNotExist, LoginCode.DoesNotExist): return def send_login_code(self, code, context, **kwargs): raise NotImplementedError
./CrossVul/dataset_final_sorted/CWE-522/py/good_765_2
crossvul-python_data_bad_765_1
404: Not Found
./CrossVul/dataset_final_sorted/CWE-522/py/bad_765_1
crossvul-python_data_good_765_1
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
./CrossVul/dataset_final_sorted/CWE-522/py/good_765_1
crossvul-python_data_bad_407_0
# -*- coding: utf-8 -*- """ requests.session ~~~~~~~~~~~~~~~~ This module provides a Session object to manage and persist settings across requests (cookies, auth, proxies). """ import os import sys import time from datetime import timedelta from .auth import _basic_auth_str from .compat import cookielib, is_py3, OrderedDict, urljoin, urlparse, Mapping from .cookies import ( cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies) from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT from .hooks import default_hooks, dispatch_hook from ._internal_utils import to_native_string from .utils import to_key_val_list, default_headers from .exceptions import ( TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError) from .structures import CaseInsensitiveDict from .adapters import HTTPAdapter from .utils import ( requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies, get_auth_from_url, rewind_body ) from .status_codes import codes # formerly defined here, reexposed here for backward compatibility from .models import REDIRECT_STATI # Preferred clock, based on which one is more accurate on a given system. if sys.platform == 'win32': try: # Python 3.4+ preferred_clock = time.perf_counter except AttributeError: # Earlier than Python 3. preferred_clock = time.clock else: preferred_clock = time.time def merge_setting(request_setting, session_setting, dict_class=OrderedDict): """Determines appropriate setting for a given request, taking into account the explicit setting on that request, and the setting in the session. If a setting is a dictionary, they will be merged together using `dict_class` """ if session_setting is None: return request_setting if request_setting is None: return session_setting # Bypass if not a dictionary (e.g. verify) if not ( isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping) ): return request_setting merged_setting = dict_class(to_key_val_list(session_setting)) merged_setting.update(to_key_val_list(request_setting)) # Remove keys that are set to None. Extract keys first to avoid altering # the dictionary during iteration. none_keys = [k for (k, v) in merged_setting.items() if v is None] for key in none_keys: del merged_setting[key] return merged_setting def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): """Properly merges both requests and session hooks. This is necessary because when request_hooks == {'response': []}, the merge breaks Session hooks entirely. """ if session_hooks is None or session_hooks.get('response') == []: return request_hooks if request_hooks is None or request_hooks.get('response') == []: return session_hooks return merge_setting(request_hooks, session_hooks, dict_class) class SessionRedirectMixin(object): def get_redirect_target(self, resp): """Receives a Response. Returns a redirect URI or ``None``""" # Due to the nature of how requests processes redirects this method will # be called at least once upon the original response and at least twice # on each subsequent redirect response (if any). # If a custom mixin is used to handle this logic, it may be advantageous # to cache the redirect location onto the response object as a private # attribute. if resp.is_redirect: location = resp.headers['location'] # Currently the underlying http module on py3 decode headers # in latin1, but empirical evidence suggests that latin1 is very # rarely used with non-ASCII characters in HTTP headers. # It is more likely to get UTF8 header rather than latin1. # This causes incorrect handling of UTF8 encoded location headers. # To solve this, we re-encode the location in latin1. if is_py3: location = location.encode('latin1') return to_native_string(location, 'utf8') return None def resolve_redirects(self, resp, req, stream=False, timeout=None, verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs): """Receives a Response. Returns a generator of Responses or Requests.""" hist = [] # keep track of history url = self.get_redirect_target(resp) previous_fragment = urlparse(req.url).fragment while url: prepared_request = req.copy() # Update history and keep track of redirects. # resp.history must ignore the original request in this loop hist.append(resp) resp.history = hist[1:] try: resp.content # Consume socket so it can be released except (ChunkedEncodingError, ContentDecodingError, RuntimeError): resp.raw.read(decode_content=False) if len(resp.history) >= self.max_redirects: raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp) # Release the connection back into the pool. resp.close() # Handle redirection without scheme (see: RFC 1808 Section 4) if url.startswith('//'): parsed_rurl = urlparse(resp.url) url = '%s:%s' % (to_native_string(parsed_rurl.scheme), url) # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2) parsed = urlparse(url) if parsed.fragment == '' and previous_fragment: parsed = parsed._replace(fragment=previous_fragment) elif parsed.fragment: previous_fragment = parsed.fragment url = parsed.geturl() # Facilitate relative 'location' headers, as allowed by RFC 7231. # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') # Compliant with RFC3986, we percent encode the url. if not parsed.netloc: url = urljoin(resp.url, requote_uri(url)) else: url = requote_uri(url) prepared_request.url = to_native_string(url) self.rebuild_method(prepared_request, resp) # https://github.com/requests/requests/issues/1084 if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect): # https://github.com/requests/requests/issues/3490 purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding') for header in purged_headers: prepared_request.headers.pop(header, None) prepared_request.body = None headers = prepared_request.headers try: del headers['Cookie'] except KeyError: pass # Extract any cookies sent on the response to the cookiejar # in the new request. Because we've mutated our copied prepared # request, use the old one that we haven't yet touched. extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) merge_cookies(prepared_request._cookies, self.cookies) prepared_request.prepare_cookies(prepared_request._cookies) # Rebuild auth and proxy information. proxies = self.rebuild_proxies(prepared_request, proxies) self.rebuild_auth(prepared_request, resp) # A failed tell() sets `_body_position` to `object()`. This non-None # value ensures `rewindable` will be True, allowing us to raise an # UnrewindableBodyError, instead of hanging the connection. rewindable = ( prepared_request._body_position is not None and ('Content-Length' in headers or 'Transfer-Encoding' in headers) ) # Attempt to rewind consumed file-like object. if rewindable: rewind_body(prepared_request) # Override the original request. req = prepared_request if yield_requests: yield req else: resp = self.send( req, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, allow_redirects=False, **adapter_kwargs ) extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) # extract redirect url, if any, for the next loop url = self.get_redirect_target(resp) yield resp def rebuild_auth(self, prepared_request, response): """When being redirected we may want to strip authentication from the request to avoid leaking credentials. This method intelligently removes and reapplies authentication where possible to avoid credential loss. """ headers = prepared_request.headers url = prepared_request.url if 'Authorization' in headers: # If we get redirected to a new host, we should strip out any # authentication headers. original_parsed = urlparse(response.request.url) redirect_parsed = urlparse(url) if (original_parsed.hostname != redirect_parsed.hostname): del headers['Authorization'] # .netrc might have more auth for us on our new host. new_auth = get_netrc_auth(url) if self.trust_env else None if new_auth is not None: prepared_request.prepare_auth(new_auth) return def rebuild_proxies(self, prepared_request, proxies): """This method re-evaluates the proxy configuration by considering the environment variables. If we are redirected to a URL covered by NO_PROXY, we strip the proxy configuration. Otherwise, we set missing proxy keys for this URL (in case they were stripped by a previous redirect). This method also replaces the Proxy-Authorization header where necessary. :rtype: dict """ proxies = proxies if proxies is not None else {} headers = prepared_request.headers url = prepared_request.url scheme = urlparse(url).scheme new_proxies = proxies.copy() no_proxy = proxies.get('no_proxy') bypass_proxy = should_bypass_proxies(url, no_proxy=no_proxy) if self.trust_env and not bypass_proxy: environ_proxies = get_environ_proxies(url, no_proxy=no_proxy) proxy = environ_proxies.get(scheme, environ_proxies.get('all')) if proxy: new_proxies.setdefault(scheme, proxy) if 'Proxy-Authorization' in headers: del headers['Proxy-Authorization'] try: username, password = get_auth_from_url(new_proxies[scheme]) except KeyError: username, password = None, None if username and password: headers['Proxy-Authorization'] = _basic_auth_str(username, password) return new_proxies def rebuild_method(self, prepared_request, response): """When being redirected we may want to change the method of the request based on certain specs or browser behavior. """ method = prepared_request.method # http://tools.ietf.org/html/rfc7231#section-6.4.4 if response.status_code == codes.see_other and method != 'HEAD': method = 'GET' # Do what the browsers do, despite standards... # First, turn 302s into GETs. if response.status_code == codes.found and method != 'HEAD': method = 'GET' # Second, if a POST is responded to with a 301, turn it into a GET. # This bizarre behaviour is explained in Issue 1704. if response.status_code == codes.moved and method == 'POST': method = 'GET' prepared_request.method = method class Session(SessionRedirectMixin): """A Requests session. Provides cookie persistence, connection-pooling, and configuration. Basic Usage:: >>> import requests >>> s = requests.Session() >>> s.get('http://httpbin.org/get') <Response [200]> Or as a context manager:: >>> with requests.Session() as s: >>> s.get('http://httpbin.org/get') <Response [200]> """ __attrs__ = [ 'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify', 'cert', 'prefetch', 'adapters', 'stream', 'trust_env', 'max_redirects', ] def __init__(self): #: A case-insensitive dictionary of headers to be sent on each #: :class:`Request <Request>` sent from this #: :class:`Session <Session>`. self.headers = default_headers() #: Default Authentication tuple or object to attach to #: :class:`Request <Request>`. self.auth = None #: Dictionary mapping protocol or protocol and host to the URL of the proxy #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to #: be used on each :class:`Request <Request>`. self.proxies = {} #: Event-handling hooks. self.hooks = default_hooks() #: Dictionary of querystring data to attach to each #: :class:`Request <Request>`. The dictionary values may be lists for #: representing multivalued query parameters. self.params = {} #: Stream response content default. self.stream = False #: SSL Verification default. self.verify = True #: SSL client certificate default, if String, path to ssl client #: cert file (.pem). If Tuple, ('cert', 'key') pair. self.cert = None #: Maximum number of redirects allowed. If the request exceeds this #: limit, a :class:`TooManyRedirects` exception is raised. #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is #: 30. self.max_redirects = DEFAULT_REDIRECT_LIMIT #: Trust environment settings for proxy configuration, default #: authentication and similar. self.trust_env = True #: A CookieJar containing all currently outstanding cookies set on this #: session. By default it is a #: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but #: may be any other ``cookielib.CookieJar`` compatible object. self.cookies = cookiejar_from_dict({}) # Default connection adapters. self.adapters = OrderedDict() self.mount('https://', HTTPAdapter()) self.mount('http://', HTTPAdapter()) def __enter__(self): return self def __exit__(self, *args): self.close() def prepare_request(self, request): """Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it. The :class:`PreparedRequest` has settings merged from the :class:`Request <Request>` instance and those of the :class:`Session`. :param request: :class:`Request` instance to prepare with this session's settings. :rtype: requests.PreparedRequest """ cookies = request.cookies or {} # Bootstrap CookieJar. if not isinstance(cookies, cookielib.CookieJar): cookies = cookiejar_from_dict(cookies) # Merge with session cookies merged_cookies = merge_cookies( merge_cookies(RequestsCookieJar(), self.cookies), cookies) # Set environment's basic authentication if not explicitly set. auth = request.auth if self.trust_env and not auth and not self.auth: auth = get_netrc_auth(request.url) p = PreparedRequest() p.prepare( method=request.method.upper(), url=request.url, files=request.files, data=request.data, json=request.json, headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict), params=merge_setting(request.params, self.params), auth=merge_setting(auth, self.auth), cookies=merged_cookies, hooks=merge_hooks(request.hooks, self.hooks), ) return p def request(self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=None, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=None, cert=None, json=None): """Constructs a :class:`Request <Request>`, prepares it and sends it. Returns :class:`Response <Response>` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'filename': file-like-objects`` for multipart encoding upload. :param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Set to True by default. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol or protocol and hostname to the URL of the proxy. :param stream: (optional) whether to immediately download the response content. Defaults to ``False``. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :rtype: requests.Response """ # Create the Request. req = Request( method=method.upper(), url=url, headers=headers, files=files, data=data or {}, json=json, params=params or {}, auth=auth, cookies=cookies, hooks=hooks, ) prep = self.prepare_request(req) proxies = proxies or {} settings = self.merge_environment_settings( prep.url, proxies, stream, verify, cert ) # Send the request. send_kwargs = { 'timeout': timeout, 'allow_redirects': allow_redirects, } send_kwargs.update(settings) resp = self.send(prep, **send_kwargs) return resp def get(self, url, **kwargs): r"""Sends a GET request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return self.request('GET', url, **kwargs) def options(self, url, **kwargs): r"""Sends a OPTIONS request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return self.request('OPTIONS', url, **kwargs) def head(self, url, **kwargs): r"""Sends a HEAD request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault('allow_redirects', False) return self.request('HEAD', url, **kwargs) def post(self, url, data=None, json=None, **kwargs): r"""Sends a POST request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('POST', url, data=data, json=json, **kwargs) def put(self, url, data=None, **kwargs): r"""Sends a PUT request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('PUT', url, data=data, **kwargs) def patch(self, url, data=None, **kwargs): r"""Sends a PATCH request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('PATCH', url, data=data, **kwargs) def delete(self, url, **kwargs): r"""Sends a DELETE request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('DELETE', url, **kwargs) def send(self, request, **kwargs): """Send a given PreparedRequest. :rtype: requests.Response """ # Set defaults that the hooks can utilize to ensure they always have # the correct parameters to reproduce the previous request. kwargs.setdefault('stream', self.stream) kwargs.setdefault('verify', self.verify) kwargs.setdefault('cert', self.cert) kwargs.setdefault('proxies', self.proxies) # It's possible that users might accidentally send a Request object. # Guard against that specific failure case. if isinstance(request, Request): raise ValueError('You can only send PreparedRequests.') # Set up variables needed for resolve_redirects and dispatching of hooks allow_redirects = kwargs.pop('allow_redirects', True) stream = kwargs.get('stream') hooks = request.hooks # Get the appropriate adapter to use adapter = self.get_adapter(url=request.url) # Start time (approximately) of the request start = preferred_clock() # Send the request r = adapter.send(request, **kwargs) # Total elapsed time of the request (approximately) elapsed = preferred_clock() - start r.elapsed = timedelta(seconds=elapsed) # Response manipulation hooks r = dispatch_hook('response', hooks, r, **kwargs) # Persist cookies if r.history: # If the hooks create history then we want those cookies too for resp in r.history: extract_cookies_to_jar(self.cookies, resp.request, resp.raw) extract_cookies_to_jar(self.cookies, request, r.raw) # Redirect resolving generator. gen = self.resolve_redirects(r, request, **kwargs) # Resolve redirects if allowed. history = [resp for resp in gen] if allow_redirects else [] # Shuffle things around if there's history. if history: # Insert the first (original) request at the start history.insert(0, r) # Get the last request made r = history.pop() r.history = history # If redirects aren't being followed, store the response on the Request for Response.next(). if not allow_redirects: try: r._next = next(self.resolve_redirects(r, request, yield_requests=True, **kwargs)) except StopIteration: pass if not stream: r.content return r def merge_environment_settings(self, url, proxies, stream, verify, cert): """ Check the environment and merge it with some settings. :rtype: dict """ # Gather clues from the surrounding environment. if self.trust_env: # Set environment's proxies. no_proxy = proxies.get('no_proxy') if proxies is not None else None env_proxies = get_environ_proxies(url, no_proxy=no_proxy) for (k, v) in env_proxies.items(): proxies.setdefault(k, v) # Look for requests environment configuration and be compatible # with cURL. if verify is True or verify is None: verify = (os.environ.get('REQUESTS_CA_BUNDLE') or os.environ.get('CURL_CA_BUNDLE')) # Merge all the kwargs. proxies = merge_setting(proxies, self.proxies) stream = merge_setting(stream, self.stream) verify = merge_setting(verify, self.verify) cert = merge_setting(cert, self.cert) return {'verify': verify, 'proxies': proxies, 'stream': stream, 'cert': cert} def get_adapter(self, url): """ Returns the appropriate connection adapter for the given URL. :rtype: requests.adapters.BaseAdapter """ for (prefix, adapter) in self.adapters.items(): if url.lower().startswith(prefix.lower()): return adapter # Nothing matches :-/ raise InvalidSchema("No connection adapters were found for '%s'" % url) def close(self): """Closes all adapters and as such the session""" for v in self.adapters.values(): v.close() def mount(self, prefix, adapter): """Registers a connection adapter to a prefix. Adapters are sorted in descending order by prefix length. """ self.adapters[prefix] = adapter keys_to_move = [k for k in self.adapters if len(k) < len(prefix)] for key in keys_to_move: self.adapters[key] = self.adapters.pop(key) def __getstate__(self): state = dict((attr, getattr(self, attr, None)) for attr in self.__attrs__) return state def __setstate__(self, state): for attr, value in state.items(): setattr(self, attr, value) def session(): """ Returns a :class:`Session` for context-management. .. deprecated:: 1.0.0 This method has been deprecated since version 1.0.0 and is only kept for backwards compatibility. New code should use :class:`~requests.sessions.Session` to create a session. This may be removed at a future date. :rtype: Session """ return Session()
./CrossVul/dataset_final_sorted/CWE-522/py/bad_407_0
crossvul-python_data_bad_765_5
# -*- coding: utf-8 -*- import hashlib import os from django.conf import settings from django.db import models from django.utils import timezone from django.utils.translation import ugettext_lazy as _ class LoginCode(models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='login_codes', editable=False, verbose_name=_('user'), on_delete=models.CASCADE) code = models.CharField(max_length=20, editable=False, verbose_name=_('code')) timestamp = models.DateTimeField(editable=False) next = models.TextField(editable=False, blank=True) def __str__(self): return "%s - %s" % (self.user, self.timestamp) def save(self, *args, **kwargs): self.timestamp = timezone.now() if not self.next: self.next = '/' super(LoginCode, self).save(*args, **kwargs) @classmethod def create_code_for_user(cls, user, next=None): if not user.is_active: return None code = cls.generate_code() login_code = LoginCode(user=user, code=code) if next is not None: login_code.next = next login_code.save() return login_code @classmethod def generate_code(cls): hash_algorithm = getattr(settings, 'NOPASSWORD_HASH_ALGORITHM', 'sha256') m = getattr(hashlib, hash_algorithm)() m.update(getattr(settings, 'SECRET_KEY', None).encode('utf-8')) m.update(os.urandom(16)) if getattr(settings, 'NOPASSWORD_NUMERIC_CODES', False): hashed = str(int(m.hexdigest(), 16)) else: hashed = m.hexdigest() return hashed
./CrossVul/dataset_final_sorted/CWE-522/py/bad_765_5
crossvul-python_data_good_765_7
from django.db import models try: from django.contrib.auth.models import AbstractUser, UserManager except ImportError: from django.db.models import Model as AbstractUser class CustomUser(AbstractUser): extra_field = models.CharField(max_length=2) new_username_field = models.CharField('userid', unique=True, max_length=20) USERNAME_FIELD = 'new_username_field' def save(self, *args, **kwargs): self.new_username_field = self.username super(CustomUser, self).save(*args, **kwargs) class PhoneNumberUser(CustomUser): phone_number = models.CharField(max_length=11, default="+15555555") class NoUsernameUser(models.Model): """User model without a "username" field for authentication backend testing """ pass
./CrossVul/dataset_final_sorted/CWE-522/py/good_765_7
crossvul-python_data_good_407_0
# -*- coding: utf-8 -*- """ requests.session ~~~~~~~~~~~~~~~~ This module provides a Session object to manage and persist settings across requests (cookies, auth, proxies). """ import os import sys import time from datetime import timedelta from .auth import _basic_auth_str from .compat import cookielib, is_py3, OrderedDict, urljoin, urlparse, Mapping from .cookies import ( cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies) from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT from .hooks import default_hooks, dispatch_hook from ._internal_utils import to_native_string from .utils import to_key_val_list, default_headers from .exceptions import ( TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError) from .structures import CaseInsensitiveDict from .adapters import HTTPAdapter from .utils import ( requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies, get_auth_from_url, rewind_body ) from .status_codes import codes # formerly defined here, reexposed here for backward compatibility from .models import REDIRECT_STATI # Preferred clock, based on which one is more accurate on a given system. if sys.platform == 'win32': try: # Python 3.4+ preferred_clock = time.perf_counter except AttributeError: # Earlier than Python 3. preferred_clock = time.clock else: preferred_clock = time.time def merge_setting(request_setting, session_setting, dict_class=OrderedDict): """Determines appropriate setting for a given request, taking into account the explicit setting on that request, and the setting in the session. If a setting is a dictionary, they will be merged together using `dict_class` """ if session_setting is None: return request_setting if request_setting is None: return session_setting # Bypass if not a dictionary (e.g. verify) if not ( isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping) ): return request_setting merged_setting = dict_class(to_key_val_list(session_setting)) merged_setting.update(to_key_val_list(request_setting)) # Remove keys that are set to None. Extract keys first to avoid altering # the dictionary during iteration. none_keys = [k for (k, v) in merged_setting.items() if v is None] for key in none_keys: del merged_setting[key] return merged_setting def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): """Properly merges both requests and session hooks. This is necessary because when request_hooks == {'response': []}, the merge breaks Session hooks entirely. """ if session_hooks is None or session_hooks.get('response') == []: return request_hooks if request_hooks is None or request_hooks.get('response') == []: return session_hooks return merge_setting(request_hooks, session_hooks, dict_class) class SessionRedirectMixin(object): def get_redirect_target(self, resp): """Receives a Response. Returns a redirect URI or ``None``""" # Due to the nature of how requests processes redirects this method will # be called at least once upon the original response and at least twice # on each subsequent redirect response (if any). # If a custom mixin is used to handle this logic, it may be advantageous # to cache the redirect location onto the response object as a private # attribute. if resp.is_redirect: location = resp.headers['location'] # Currently the underlying http module on py3 decode headers # in latin1, but empirical evidence suggests that latin1 is very # rarely used with non-ASCII characters in HTTP headers. # It is more likely to get UTF8 header rather than latin1. # This causes incorrect handling of UTF8 encoded location headers. # To solve this, we re-encode the location in latin1. if is_py3: location = location.encode('latin1') return to_native_string(location, 'utf8') return None def should_strip_auth(self, old_url, new_url): """Decide whether Authorization header should be removed when redirecting""" old_parsed = urlparse(old_url) new_parsed = urlparse(new_url) if old_parsed.hostname != new_parsed.hostname: return True # Special case: allow http -> https redirect when using the standard # ports. This isn't specified by RFC 7235, but is kept to avoid # breaking backwards compatibility with older versions of requests # that allowed any redirects on the same host. if (old_parsed.scheme == 'http' and old_parsed.port in (80, None) and new_parsed.scheme == 'https' and new_parsed.port in (443, None)): return False # Standard case: root URI must match return old_parsed.port != new_parsed.port or old_parsed.scheme != new_parsed.scheme def resolve_redirects(self, resp, req, stream=False, timeout=None, verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs): """Receives a Response. Returns a generator of Responses or Requests.""" hist = [] # keep track of history url = self.get_redirect_target(resp) previous_fragment = urlparse(req.url).fragment while url: prepared_request = req.copy() # Update history and keep track of redirects. # resp.history must ignore the original request in this loop hist.append(resp) resp.history = hist[1:] try: resp.content # Consume socket so it can be released except (ChunkedEncodingError, ContentDecodingError, RuntimeError): resp.raw.read(decode_content=False) if len(resp.history) >= self.max_redirects: raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp) # Release the connection back into the pool. resp.close() # Handle redirection without scheme (see: RFC 1808 Section 4) if url.startswith('//'): parsed_rurl = urlparse(resp.url) url = '%s:%s' % (to_native_string(parsed_rurl.scheme), url) # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2) parsed = urlparse(url) if parsed.fragment == '' and previous_fragment: parsed = parsed._replace(fragment=previous_fragment) elif parsed.fragment: previous_fragment = parsed.fragment url = parsed.geturl() # Facilitate relative 'location' headers, as allowed by RFC 7231. # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') # Compliant with RFC3986, we percent encode the url. if not parsed.netloc: url = urljoin(resp.url, requote_uri(url)) else: url = requote_uri(url) prepared_request.url = to_native_string(url) self.rebuild_method(prepared_request, resp) # https://github.com/requests/requests/issues/1084 if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect): # https://github.com/requests/requests/issues/3490 purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding') for header in purged_headers: prepared_request.headers.pop(header, None) prepared_request.body = None headers = prepared_request.headers try: del headers['Cookie'] except KeyError: pass # Extract any cookies sent on the response to the cookiejar # in the new request. Because we've mutated our copied prepared # request, use the old one that we haven't yet touched. extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) merge_cookies(prepared_request._cookies, self.cookies) prepared_request.prepare_cookies(prepared_request._cookies) # Rebuild auth and proxy information. proxies = self.rebuild_proxies(prepared_request, proxies) self.rebuild_auth(prepared_request, resp) # A failed tell() sets `_body_position` to `object()`. This non-None # value ensures `rewindable` will be True, allowing us to raise an # UnrewindableBodyError, instead of hanging the connection. rewindable = ( prepared_request._body_position is not None and ('Content-Length' in headers or 'Transfer-Encoding' in headers) ) # Attempt to rewind consumed file-like object. if rewindable: rewind_body(prepared_request) # Override the original request. req = prepared_request if yield_requests: yield req else: resp = self.send( req, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, allow_redirects=False, **adapter_kwargs ) extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) # extract redirect url, if any, for the next loop url = self.get_redirect_target(resp) yield resp def rebuild_auth(self, prepared_request, response): """When being redirected we may want to strip authentication from the request to avoid leaking credentials. This method intelligently removes and reapplies authentication where possible to avoid credential loss. """ headers = prepared_request.headers url = prepared_request.url if 'Authorization' in headers and self.should_strip_auth(response.request.url, url): # If we get redirected to a new host, we should strip out any # authentication headers. del headers['Authorization'] # .netrc might have more auth for us on our new host. new_auth = get_netrc_auth(url) if self.trust_env else None if new_auth is not None: prepared_request.prepare_auth(new_auth) return def rebuild_proxies(self, prepared_request, proxies): """This method re-evaluates the proxy configuration by considering the environment variables. If we are redirected to a URL covered by NO_PROXY, we strip the proxy configuration. Otherwise, we set missing proxy keys for this URL (in case they were stripped by a previous redirect). This method also replaces the Proxy-Authorization header where necessary. :rtype: dict """ proxies = proxies if proxies is not None else {} headers = prepared_request.headers url = prepared_request.url scheme = urlparse(url).scheme new_proxies = proxies.copy() no_proxy = proxies.get('no_proxy') bypass_proxy = should_bypass_proxies(url, no_proxy=no_proxy) if self.trust_env and not bypass_proxy: environ_proxies = get_environ_proxies(url, no_proxy=no_proxy) proxy = environ_proxies.get(scheme, environ_proxies.get('all')) if proxy: new_proxies.setdefault(scheme, proxy) if 'Proxy-Authorization' in headers: del headers['Proxy-Authorization'] try: username, password = get_auth_from_url(new_proxies[scheme]) except KeyError: username, password = None, None if username and password: headers['Proxy-Authorization'] = _basic_auth_str(username, password) return new_proxies def rebuild_method(self, prepared_request, response): """When being redirected we may want to change the method of the request based on certain specs or browser behavior. """ method = prepared_request.method # http://tools.ietf.org/html/rfc7231#section-6.4.4 if response.status_code == codes.see_other and method != 'HEAD': method = 'GET' # Do what the browsers do, despite standards... # First, turn 302s into GETs. if response.status_code == codes.found and method != 'HEAD': method = 'GET' # Second, if a POST is responded to with a 301, turn it into a GET. # This bizarre behaviour is explained in Issue 1704. if response.status_code == codes.moved and method == 'POST': method = 'GET' prepared_request.method = method class Session(SessionRedirectMixin): """A Requests session. Provides cookie persistence, connection-pooling, and configuration. Basic Usage:: >>> import requests >>> s = requests.Session() >>> s.get('http://httpbin.org/get') <Response [200]> Or as a context manager:: >>> with requests.Session() as s: >>> s.get('http://httpbin.org/get') <Response [200]> """ __attrs__ = [ 'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify', 'cert', 'prefetch', 'adapters', 'stream', 'trust_env', 'max_redirects', ] def __init__(self): #: A case-insensitive dictionary of headers to be sent on each #: :class:`Request <Request>` sent from this #: :class:`Session <Session>`. self.headers = default_headers() #: Default Authentication tuple or object to attach to #: :class:`Request <Request>`. self.auth = None #: Dictionary mapping protocol or protocol and host to the URL of the proxy #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to #: be used on each :class:`Request <Request>`. self.proxies = {} #: Event-handling hooks. self.hooks = default_hooks() #: Dictionary of querystring data to attach to each #: :class:`Request <Request>`. The dictionary values may be lists for #: representing multivalued query parameters. self.params = {} #: Stream response content default. self.stream = False #: SSL Verification default. self.verify = True #: SSL client certificate default, if String, path to ssl client #: cert file (.pem). If Tuple, ('cert', 'key') pair. self.cert = None #: Maximum number of redirects allowed. If the request exceeds this #: limit, a :class:`TooManyRedirects` exception is raised. #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is #: 30. self.max_redirects = DEFAULT_REDIRECT_LIMIT #: Trust environment settings for proxy configuration, default #: authentication and similar. self.trust_env = True #: A CookieJar containing all currently outstanding cookies set on this #: session. By default it is a #: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but #: may be any other ``cookielib.CookieJar`` compatible object. self.cookies = cookiejar_from_dict({}) # Default connection adapters. self.adapters = OrderedDict() self.mount('https://', HTTPAdapter()) self.mount('http://', HTTPAdapter()) def __enter__(self): return self def __exit__(self, *args): self.close() def prepare_request(self, request): """Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it. The :class:`PreparedRequest` has settings merged from the :class:`Request <Request>` instance and those of the :class:`Session`. :param request: :class:`Request` instance to prepare with this session's settings. :rtype: requests.PreparedRequest """ cookies = request.cookies or {} # Bootstrap CookieJar. if not isinstance(cookies, cookielib.CookieJar): cookies = cookiejar_from_dict(cookies) # Merge with session cookies merged_cookies = merge_cookies( merge_cookies(RequestsCookieJar(), self.cookies), cookies) # Set environment's basic authentication if not explicitly set. auth = request.auth if self.trust_env and not auth and not self.auth: auth = get_netrc_auth(request.url) p = PreparedRequest() p.prepare( method=request.method.upper(), url=request.url, files=request.files, data=request.data, json=request.json, headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict), params=merge_setting(request.params, self.params), auth=merge_setting(auth, self.auth), cookies=merged_cookies, hooks=merge_hooks(request.hooks, self.hooks), ) return p def request(self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=None, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=None, cert=None, json=None): """Constructs a :class:`Request <Request>`, prepares it and sends it. Returns :class:`Response <Response>` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'filename': file-like-objects`` for multipart encoding upload. :param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Set to True by default. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol or protocol and hostname to the URL of the proxy. :param stream: (optional) whether to immediately download the response content. Defaults to ``False``. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :rtype: requests.Response """ # Create the Request. req = Request( method=method.upper(), url=url, headers=headers, files=files, data=data or {}, json=json, params=params or {}, auth=auth, cookies=cookies, hooks=hooks, ) prep = self.prepare_request(req) proxies = proxies or {} settings = self.merge_environment_settings( prep.url, proxies, stream, verify, cert ) # Send the request. send_kwargs = { 'timeout': timeout, 'allow_redirects': allow_redirects, } send_kwargs.update(settings) resp = self.send(prep, **send_kwargs) return resp def get(self, url, **kwargs): r"""Sends a GET request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return self.request('GET', url, **kwargs) def options(self, url, **kwargs): r"""Sends a OPTIONS request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return self.request('OPTIONS', url, **kwargs) def head(self, url, **kwargs): r"""Sends a HEAD request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault('allow_redirects', False) return self.request('HEAD', url, **kwargs) def post(self, url, data=None, json=None, **kwargs): r"""Sends a POST request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('POST', url, data=data, json=json, **kwargs) def put(self, url, data=None, **kwargs): r"""Sends a PUT request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('PUT', url, data=data, **kwargs) def patch(self, url, data=None, **kwargs): r"""Sends a PATCH request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('PATCH', url, data=data, **kwargs) def delete(self, url, **kwargs): r"""Sends a DELETE request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('DELETE', url, **kwargs) def send(self, request, **kwargs): """Send a given PreparedRequest. :rtype: requests.Response """ # Set defaults that the hooks can utilize to ensure they always have # the correct parameters to reproduce the previous request. kwargs.setdefault('stream', self.stream) kwargs.setdefault('verify', self.verify) kwargs.setdefault('cert', self.cert) kwargs.setdefault('proxies', self.proxies) # It's possible that users might accidentally send a Request object. # Guard against that specific failure case. if isinstance(request, Request): raise ValueError('You can only send PreparedRequests.') # Set up variables needed for resolve_redirects and dispatching of hooks allow_redirects = kwargs.pop('allow_redirects', True) stream = kwargs.get('stream') hooks = request.hooks # Get the appropriate adapter to use adapter = self.get_adapter(url=request.url) # Start time (approximately) of the request start = preferred_clock() # Send the request r = adapter.send(request, **kwargs) # Total elapsed time of the request (approximately) elapsed = preferred_clock() - start r.elapsed = timedelta(seconds=elapsed) # Response manipulation hooks r = dispatch_hook('response', hooks, r, **kwargs) # Persist cookies if r.history: # If the hooks create history then we want those cookies too for resp in r.history: extract_cookies_to_jar(self.cookies, resp.request, resp.raw) extract_cookies_to_jar(self.cookies, request, r.raw) # Redirect resolving generator. gen = self.resolve_redirects(r, request, **kwargs) # Resolve redirects if allowed. history = [resp for resp in gen] if allow_redirects else [] # Shuffle things around if there's history. if history: # Insert the first (original) request at the start history.insert(0, r) # Get the last request made r = history.pop() r.history = history # If redirects aren't being followed, store the response on the Request for Response.next(). if not allow_redirects: try: r._next = next(self.resolve_redirects(r, request, yield_requests=True, **kwargs)) except StopIteration: pass if not stream: r.content return r def merge_environment_settings(self, url, proxies, stream, verify, cert): """ Check the environment and merge it with some settings. :rtype: dict """ # Gather clues from the surrounding environment. if self.trust_env: # Set environment's proxies. no_proxy = proxies.get('no_proxy') if proxies is not None else None env_proxies = get_environ_proxies(url, no_proxy=no_proxy) for (k, v) in env_proxies.items(): proxies.setdefault(k, v) # Look for requests environment configuration and be compatible # with cURL. if verify is True or verify is None: verify = (os.environ.get('REQUESTS_CA_BUNDLE') or os.environ.get('CURL_CA_BUNDLE')) # Merge all the kwargs. proxies = merge_setting(proxies, self.proxies) stream = merge_setting(stream, self.stream) verify = merge_setting(verify, self.verify) cert = merge_setting(cert, self.cert) return {'verify': verify, 'proxies': proxies, 'stream': stream, 'cert': cert} def get_adapter(self, url): """ Returns the appropriate connection adapter for the given URL. :rtype: requests.adapters.BaseAdapter """ for (prefix, adapter) in self.adapters.items(): if url.lower().startswith(prefix.lower()): return adapter # Nothing matches :-/ raise InvalidSchema("No connection adapters were found for '%s'" % url) def close(self): """Closes all adapters and as such the session""" for v in self.adapters.values(): v.close() def mount(self, prefix, adapter): """Registers a connection adapter to a prefix. Adapters are sorted in descending order by prefix length. """ self.adapters[prefix] = adapter keys_to_move = [k for k in self.adapters if len(k) < len(prefix)] for key in keys_to_move: self.adapters[key] = self.adapters.pop(key) def __getstate__(self): state = dict((attr, getattr(self, attr, None)) for attr in self.__attrs__) return state def __setstate__(self, state): for attr, value in state.items(): setattr(self, attr, value) def session(): """ Returns a :class:`Session` for context-management. .. deprecated:: 1.0.0 This method has been deprecated since version 1.0.0 and is only kept for backwards compatibility. New code should use :class:`~requests.sessions.Session` to create a session. This may be removed at a future date. :rtype: Session """ return Session()
./CrossVul/dataset_final_sorted/CWE-522/py/good_407_0
crossvul-python_data_bad_765_2
# -*- coding: utf-8 -*- from datetime import timedelta from django.conf import settings from django.contrib.auth import get_user_model from django.contrib.auth.backends import ModelBackend from django.utils import timezone from nopassword.models import LoginCode class NoPasswordBackend(ModelBackend): def authenticate(self, request, username=None, code=None, **kwargs): if username is None: username = kwargs.get(get_user_model().USERNAME_FIELD) if not username or not code: return try: user = get_user_model()._default_manager.get_by_natural_key(username) if not self.user_can_authenticate(user): return timeout = getattr(settings, 'NOPASSWORD_LOGIN_CODE_TIMEOUT', 900) timestamp = timezone.now() - timedelta(seconds=timeout) # We don't delete the login code when authenticating, # as that is done during validation of the login form # and validation should not have any side effects. # It is the responsibility of the view/form to delete the token # as soon as the login was successfull. user.login_code = LoginCode.objects.get(user=user, code=code, timestamp__gt=timestamp) return user except (get_user_model().DoesNotExist, LoginCode.DoesNotExist): return def send_login_code(self, code, context, **kwargs): raise NotImplementedError
./CrossVul/dataset_final_sorted/CWE-522/py/bad_765_2
crossvul-python_data_good_765_3
# -*- coding: utf-8 -*- from django import forms from django.contrib.auth import authenticate, get_backends, get_user_model from django.contrib.sites.shortcuts import get_current_site from django.core.exceptions import ImproperlyConfigured from django.shortcuts import resolve_url from django.utils.translation import ugettext_lazy as _ from nopassword import models class LoginForm(forms.Form): error_messages = { 'invalid_username': _( "Please enter a correct %(username)s. " "Note that it is case-sensitive." ), 'inactive': _("This account is inactive."), } next = forms.CharField(max_length=200, required=False, widget=forms.HiddenInput) def __init__(self, *args, **kwargs): super(LoginForm, self).__init__(*args, **kwargs) self.username_field = get_user_model()._meta.get_field(get_user_model().USERNAME_FIELD) self.fields['username'] = self.username_field.formfield() def clean_username(self): username = self.cleaned_data['username'] try: user = get_user_model()._default_manager.get_by_natural_key(username) except get_user_model().DoesNotExist: raise forms.ValidationError( self.error_messages['invalid_username'], code='invalid_username', params={'username': self.username_field.verbose_name}, ) if not user.is_active: raise forms.ValidationError( self.error_messages['inactive'], code='inactive', ) self.cleaned_data['user'] = user return username def save(self, request, login_code_url='login_code', domain_override=None, extra_context=None): login_code = models.LoginCode.create_code_for_user( user=self.cleaned_data['user'], next=self.cleaned_data['next'], ) if not domain_override: current_site = get_current_site(request) site_name = current_site.name domain = current_site.domain else: site_name = domain = domain_override url = '{}://{}{}?user={}&code={}'.format( 'https' if request.is_secure() else 'http', domain, resolve_url(login_code_url), login_code.user.pk, login_code.code, ) context = { 'domain': domain, 'site_name': site_name, 'code': login_code.code, 'url': url, } if extra_context: context.update(extra_context) self.send_login_code(login_code, context) return login_code def send_login_code(self, login_code, context, **kwargs): for backend in get_backends(): if hasattr(backend, 'send_login_code'): backend.send_login_code(login_code, context, **kwargs) break else: raise ImproperlyConfigured( 'Please add a nopassword authentication backend to settings, ' 'e.g. `nopassword.backends.EmailBackend`' ) class LoginCodeForm(forms.Form): user = forms.CharField() code = forms.CharField( label=_('Login code'), error_messages={ 'invalid_choice': _('Login code is invalid. It might have expired.'), }, ) error_messages = { 'invalid_code': _("Unable to log in with provided login code."), } def __init__(self, request=None, *args, **kwargs): super(LoginCodeForm, self).__init__(*args, **kwargs) self.request = request def clean(self): user_id = self.cleaned_data.get('user', None) if user_id is None: raise forms.ValidationError( self.error_messages['invalid_code'], code='invalid_code', ) user = get_user_model().objects.get(pk=user_id) code = self.cleaned_data['code'] user = authenticate(self.request, **{ get_user_model().USERNAME_FIELD: user.username, 'code': code, }) if not user: raise forms.ValidationError( self.error_messages['invalid_code'], code='invalid_code', ) self.cleaned_data['user'] = user return self.cleaned_data def get_user(self): return self.cleaned_data.get('user') def save(self): if self.get_user().login_code: self.get_user().login_code.delete()
./CrossVul/dataset_final_sorted/CWE-522/py/good_765_3
crossvul-python_data_good_765_5
# -*- coding: utf-8 -*- import hashlib import uuid from django.conf import settings from django.db import models from django.utils import timezone from django.utils.translation import ugettext_lazy as _ class LoginCode(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='login_codes', editable=False, verbose_name=_('user'), on_delete=models.CASCADE) timestamp = models.DateTimeField(editable=False) next = models.TextField(editable=False, blank=True) def __str__(self): return "%s - %s" % (self.user, self.timestamp) @property def code(self): hash_algorithm = getattr(settings, 'NOPASSWORD_HASH_ALGORITHM', 'sha256') m = getattr(hashlib, hash_algorithm)() m.update(getattr(settings, 'SECRET_KEY', None).encode('utf-8')) m.update(str(self.id).encode()) if getattr(settings, 'NOPASSWORD_NUMERIC_CODES', False): hashed = str(int(m.hexdigest(), 16)) else: hashed = m.hexdigest() return hashed def save(self, *args, **kwargs): self.timestamp = timezone.now() if not self.next: self.next = '/' super(LoginCode, self).save(*args, **kwargs) @classmethod def create_code_for_user(cls, user, next=None): if not user.is_active: return None login_code = LoginCode(user=user) if next is not None: login_code.next = next login_code.save() return login_code
./CrossVul/dataset_final_sorted/CWE-522/py/good_765_5
crossvul-python_data_bad_1110_0
from functools import partial import django_otp from django.conf import settings from django.contrib.auth.views import redirect_to_login from django.urls import NoReverseMatch, reverse from django.utils.functional import SimpleLazyObject from django_otp.middleware import OTPMiddleware as _OTPMiddleware class VerifyUserMiddleware(_OTPMiddleware): _allowed_url_names = [ "wagtail_2fa_auth", "wagtail_2fa_device_list", "wagtail_2fa_device_new", "wagtail_2fa_device_qrcode", "wagtailadmin_login", "wagtailadmin_logout", ] def __call__(self, request): if hasattr(self, 'process_request'): response = self.process_request(request) if not response: response = self.get_response(request) if hasattr(self, 'process_response'): response = self.process_response(request, response) return response def process_request(self, request): if request.user: request.user = SimpleLazyObject(partial(self._verify_user, request, request.user)) user = request.user if self._require_verified_user(request): user_has_device = django_otp.user_has_device(user, confirmed=True) if user_has_device and not user.is_verified(): return redirect_to_login( request.get_full_path(), login_url=reverse("wagtail_2fa_auth") ) elif not user_has_device and settings.WAGTAIL_2FA_REQUIRED: # only allow the user to visit the admin index page and the # admin setup page return redirect_to_login( request.get_full_path(), login_url=reverse("wagtail_2fa_device_new") ) def _require_verified_user(self, request): user = request.user if not settings.WAGTAIL_2FA_REQUIRED: # If two factor authentication is disabled in the settings return False if not user.is_authenticated: return False # If the user has no access to the admin anyway then don't require a # verified user here if not ( user.is_staff or user.is_superuser or user.has_perms(["wagtailadmin.access_admin"]) ): return False # Allow the user to a fixed number of paths when not verified if request.path in self._allowed_paths: return False # For all other cases require that the user is verfied via otp return True @property def _allowed_paths(self): """Return the paths the user may visit when not verified via otp This result cannot be cached since we want to be compatible with the django-hosts package. Django-hosts alters the urlconf based on the hostname in the request, so the urls might exist for admin.<domain> but not for www.<domain>. """ results = [] for route_name in self._allowed_url_names: try: results.append(settings.WAGTAIL_MOUNT_PATH + reverse(route_name)) except NoReverseMatch: pass return results
./CrossVul/dataset_final_sorted/CWE-522/py/bad_1110_0
crossvul-python_data_good_1110_0
from functools import partial import django_otp from django.conf import settings from django.contrib.auth.views import redirect_to_login from django.urls import NoReverseMatch, reverse from django.utils.functional import SimpleLazyObject from django_otp.middleware import OTPMiddleware as _OTPMiddleware class VerifyUserMiddleware(_OTPMiddleware): _allowed_url_names = [ "wagtail_2fa_auth", "wagtailadmin_login", "wagtailadmin_logout", ] _allowed_url_names_no_device = [ "wagtail_2fa_auth", "wagtail_2fa_device_list", "wagtail_2fa_device_new", "wagtail_2fa_device_qrcode", "wagtailadmin_login", "wagtailadmin_logout", ] def __call__(self, request): if hasattr(self, 'process_request'): response = self.process_request(request) if not response: response = self.get_response(request) if hasattr(self, 'process_response'): response = self.process_response(request, response) return response def process_request(self, request): if request.user: request.user = SimpleLazyObject(partial(self._verify_user, request, request.user)) user = request.user if self._require_verified_user(request): user_has_device = django_otp.user_has_device(user, confirmed=True) if user_has_device and not user.is_verified(): return redirect_to_login( request.get_full_path(), login_url=reverse("wagtail_2fa_auth") ) elif not user_has_device and settings.WAGTAIL_2FA_REQUIRED: # only allow the user to visit the admin index page and the # admin setup page return redirect_to_login( request.get_full_path(), login_url=reverse("wagtail_2fa_device_new") ) def _require_verified_user(self, request): user = request.user if not settings.WAGTAIL_2FA_REQUIRED: # If two factor authentication is disabled in the settings return False if not user.is_authenticated: return False # If the user has no access to the admin anyway then don't require a # verified user here if not ( user.is_staff or user.is_superuser or user.has_perms(["wagtailadmin.access_admin"]) ): return False # Allow the user to a fixed number of paths when not verified user_has_device = django_otp.user_has_device(user, confirmed=True) if request.path in self._get_allowed_paths(user_has_device): return False # For all other cases require that the user is verfied via otp return True def _get_allowed_paths(self, has_device): """Return the paths the user may visit when not verified via otp. If the user already has a registered device, return a limited set of paths to prevent them from adding or listing devices to prevent them from adding or listing devices. """ allowed_url_names = self._allowed_url_names if not has_device: allowed_url_names = self._allowed_url_names_no_device results = [] for route_name in allowed_url_names: try: results.append(settings.WAGTAIL_MOUNT_PATH + reverse(route_name)) except NoReverseMatch: pass return results
./CrossVul/dataset_final_sorted/CWE-522/py/good_1110_0
crossvul-python_data_good_765_6
# -*- coding: utf-8 -*- from django.conf import settings from django.contrib.auth import login as django_login from django.contrib.auth import logout as django_logout from django.core.exceptions import ObjectDoesNotExist from django.utils.decorators import method_decorator from django.utils.translation import ugettext_lazy as _ from django.views.decorators.debug import sensitive_post_parameters from rest_framework import status from rest_framework.authtoken.models import Token from rest_framework.generics import GenericAPIView from rest_framework.permissions import AllowAny from rest_framework.response import Response from rest_framework.views import APIView from nopassword.rest import serializers class LoginView(GenericAPIView): serializer_class = serializers.LoginSerializer permission_classes = (AllowAny,) def post(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) serializer.save() return Response( {"detail": _("Login code has been sent.")}, status=status.HTTP_200_OK ) @method_decorator(sensitive_post_parameters('code'), 'dispatch') class LoginCodeView(GenericAPIView): permission_classes = (AllowAny,) serializer_class = serializers.LoginCodeSerializer token_serializer_class = serializers.TokenSerializer token_model = Token def process_login(self): django_login(self.request, self.user) def login(self): self.user = self.serializer.validated_data['user'] self.token, created = self.token_model.objects.get_or_create(user=self.user) if getattr(settings, 'REST_SESSION_LOGIN', True): self.process_login() def get_response(self): token_serializer = self.token_serializer_class( instance=self.token, context=self.get_serializer_context(), ) data = token_serializer.data data['next'] = self.serializer.validated_data['user'].login_code.next return Response(data, status=status.HTTP_200_OK) def post(self, request, *args, **kwargs): self.serializer = self.get_serializer(data=request.data) self.serializer.is_valid(raise_exception=True) self.serializer.save() self.login() return self.get_response() class LogoutView(APIView): permission_classes = (AllowAny,) def post(self, request, *args, **kwargs): return self.logout(request) def logout(self, request): try: request.user.auth_token.delete() except (AttributeError, ObjectDoesNotExist): pass django_logout(request) return Response( {"detail": _("Successfully logged out.")}, status=status.HTTP_200_OK, )
./CrossVul/dataset_final_sorted/CWE-522/py/good_765_6
crossvul-python_data_bad_3871_0
404: Not Found
./CrossVul/dataset_final_sorted/CWE-120/py/bad_3871_0
crossvul-python_data_good_3871_0
#!/usr/bin/env python # Reproductions/tests for crashes/read errors in TiffDecode.c # When run in python, all of these images should fail for # one reason or another, either as a buffer overrun, # unrecognized datastream, or truncated image file. # There shouldn't be any segfaults. # # if run like # `valgrind --tool=memcheck python check_tiff_crashes.py 2>&1 | grep TiffDecode.c` # the output should be empty. There may be python issues # in the valgrind especially if run in a debug python # version. from PIL import Image repro_read_strip = ( "images/crash_1.tif", "images/crash_2.tif", ) for path in repro_read_strip: with Image.open(path) as im: try: im.load() except Exception as msg: print(msg)
./CrossVul/dataset_final_sorted/CWE-120/py/good_3871_0
crossvul-python_data_good_4296_2
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function from cryptography import utils from cryptography.exceptions import ( InvalidSignature, UnsupportedAlgorithm, _Reasons, ) from cryptography.hazmat.backends.openssl.utils import ( _calculate_digest_and_algorithm, _check_not_prehashed, _warn_sign_verify_deprecated, ) from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.asymmetric import ( AsymmetricSignatureContext, AsymmetricVerificationContext, rsa, ) from cryptography.hazmat.primitives.asymmetric.padding import ( AsymmetricPadding, MGF1, OAEP, PKCS1v15, PSS, calculate_max_pss_salt_length, ) from cryptography.hazmat.primitives.asymmetric.rsa import ( RSAPrivateKeyWithSerialization, RSAPublicKeyWithSerialization, ) def _get_rsa_pss_salt_length(pss, key, hash_algorithm): salt = pss._salt_length if salt is MGF1.MAX_LENGTH or salt is PSS.MAX_LENGTH: return calculate_max_pss_salt_length(key, hash_algorithm) else: return salt def _enc_dec_rsa(backend, key, data, padding): if not isinstance(padding, AsymmetricPadding): raise TypeError("Padding must be an instance of AsymmetricPadding.") if isinstance(padding, PKCS1v15): padding_enum = backend._lib.RSA_PKCS1_PADDING elif isinstance(padding, OAEP): padding_enum = backend._lib.RSA_PKCS1_OAEP_PADDING if not isinstance(padding._mgf, MGF1): raise UnsupportedAlgorithm( "Only MGF1 is supported by this backend.", _Reasons.UNSUPPORTED_MGF, ) if not backend.rsa_padding_supported(padding): raise UnsupportedAlgorithm( "This combination of padding and hash algorithm is not " "supported by this backend.", _Reasons.UNSUPPORTED_PADDING, ) else: raise UnsupportedAlgorithm( "{} is not supported by this backend.".format(padding.name), _Reasons.UNSUPPORTED_PADDING, ) return _enc_dec_rsa_pkey_ctx(backend, key, data, padding_enum, padding) def _enc_dec_rsa_pkey_ctx(backend, key, data, padding_enum, padding): if isinstance(key, _RSAPublicKey): init = backend._lib.EVP_PKEY_encrypt_init crypt = backend._lib.EVP_PKEY_encrypt else: init = backend._lib.EVP_PKEY_decrypt_init crypt = backend._lib.EVP_PKEY_decrypt pkey_ctx = backend._lib.EVP_PKEY_CTX_new(key._evp_pkey, backend._ffi.NULL) backend.openssl_assert(pkey_ctx != backend._ffi.NULL) pkey_ctx = backend._ffi.gc(pkey_ctx, backend._lib.EVP_PKEY_CTX_free) res = init(pkey_ctx) backend.openssl_assert(res == 1) res = backend._lib.EVP_PKEY_CTX_set_rsa_padding(pkey_ctx, padding_enum) backend.openssl_assert(res > 0) buf_size = backend._lib.EVP_PKEY_size(key._evp_pkey) backend.openssl_assert(buf_size > 0) if isinstance(padding, OAEP) and backend._lib.Cryptography_HAS_RSA_OAEP_MD: mgf1_md = backend._evp_md_non_null_from_algorithm( padding._mgf._algorithm ) res = backend._lib.EVP_PKEY_CTX_set_rsa_mgf1_md(pkey_ctx, mgf1_md) backend.openssl_assert(res > 0) oaep_md = backend._evp_md_non_null_from_algorithm(padding._algorithm) res = backend._lib.EVP_PKEY_CTX_set_rsa_oaep_md(pkey_ctx, oaep_md) backend.openssl_assert(res > 0) if ( isinstance(padding, OAEP) and padding._label is not None and len(padding._label) > 0 ): # set0_rsa_oaep_label takes ownership of the char * so we need to # copy it into some new memory labelptr = backend._lib.OPENSSL_malloc(len(padding._label)) backend.openssl_assert(labelptr != backend._ffi.NULL) backend._ffi.memmove(labelptr, padding._label, len(padding._label)) res = backend._lib.EVP_PKEY_CTX_set0_rsa_oaep_label( pkey_ctx, labelptr, len(padding._label) ) backend.openssl_assert(res == 1) outlen = backend._ffi.new("size_t *", buf_size) buf = backend._ffi.new("unsigned char[]", buf_size) # Everything from this line onwards is written with the goal of being as # constant-time as is practical given the constraints of Python and our # API. See Bleichenbacher's '98 attack on RSA, and its many many variants. # As such, you should not attempt to change this (particularly to "clean it # up") without understanding why it was written this way (see # Chesterton's Fence), and without measuring to verify you have not # introduced observable time differences. res = crypt(pkey_ctx, buf, outlen, data, len(data)) resbuf = backend._ffi.buffer(buf)[: outlen[0]] backend._lib.ERR_clear_error() if res <= 0: raise ValueError("Encryption/decryption failed.") return resbuf def _rsa_sig_determine_padding(backend, key, padding, algorithm): if not isinstance(padding, AsymmetricPadding): raise TypeError("Expected provider of AsymmetricPadding.") pkey_size = backend._lib.EVP_PKEY_size(key._evp_pkey) backend.openssl_assert(pkey_size > 0) if isinstance(padding, PKCS1v15): padding_enum = backend._lib.RSA_PKCS1_PADDING elif isinstance(padding, PSS): if not isinstance(padding._mgf, MGF1): raise UnsupportedAlgorithm( "Only MGF1 is supported by this backend.", _Reasons.UNSUPPORTED_MGF, ) # Size of key in bytes - 2 is the maximum # PSS signature length (salt length is checked later) if pkey_size - algorithm.digest_size - 2 < 0: raise ValueError( "Digest too large for key size. Use a larger " "key or different digest." ) padding_enum = backend._lib.RSA_PKCS1_PSS_PADDING else: raise UnsupportedAlgorithm( "{} is not supported by this backend.".format(padding.name), _Reasons.UNSUPPORTED_PADDING, ) return padding_enum def _rsa_sig_setup(backend, padding, algorithm, key, data, init_func): padding_enum = _rsa_sig_determine_padding(backend, key, padding, algorithm) evp_md = backend._evp_md_non_null_from_algorithm(algorithm) pkey_ctx = backend._lib.EVP_PKEY_CTX_new(key._evp_pkey, backend._ffi.NULL) backend.openssl_assert(pkey_ctx != backend._ffi.NULL) pkey_ctx = backend._ffi.gc(pkey_ctx, backend._lib.EVP_PKEY_CTX_free) res = init_func(pkey_ctx) backend.openssl_assert(res == 1) res = backend._lib.EVP_PKEY_CTX_set_signature_md(pkey_ctx, evp_md) if res == 0: backend._consume_errors() raise UnsupportedAlgorithm( "{} is not supported by this backend for RSA signing.".format( algorithm.name ), _Reasons.UNSUPPORTED_HASH, ) res = backend._lib.EVP_PKEY_CTX_set_rsa_padding(pkey_ctx, padding_enum) backend.openssl_assert(res > 0) if isinstance(padding, PSS): res = backend._lib.EVP_PKEY_CTX_set_rsa_pss_saltlen( pkey_ctx, _get_rsa_pss_salt_length(padding, key, algorithm) ) backend.openssl_assert(res > 0) mgf1_md = backend._evp_md_non_null_from_algorithm( padding._mgf._algorithm ) res = backend._lib.EVP_PKEY_CTX_set_rsa_mgf1_md(pkey_ctx, mgf1_md) backend.openssl_assert(res > 0) return pkey_ctx def _rsa_sig_sign(backend, padding, algorithm, private_key, data): pkey_ctx = _rsa_sig_setup( backend, padding, algorithm, private_key, data, backend._lib.EVP_PKEY_sign_init, ) buflen = backend._ffi.new("size_t *") res = backend._lib.EVP_PKEY_sign( pkey_ctx, backend._ffi.NULL, buflen, data, len(data) ) backend.openssl_assert(res == 1) buf = backend._ffi.new("unsigned char[]", buflen[0]) res = backend._lib.EVP_PKEY_sign(pkey_ctx, buf, buflen, data, len(data)) if res != 1: errors = backend._consume_errors_with_text() raise ValueError( "Digest or salt length too long for key size. Use a larger key " "or shorter salt length if you are specifying a PSS salt", errors, ) return backend._ffi.buffer(buf)[:] def _rsa_sig_verify(backend, padding, algorithm, public_key, signature, data): pkey_ctx = _rsa_sig_setup( backend, padding, algorithm, public_key, data, backend._lib.EVP_PKEY_verify_init, ) res = backend._lib.EVP_PKEY_verify( pkey_ctx, signature, len(signature), data, len(data) ) # The previous call can return negative numbers in the event of an # error. This is not a signature failure but we need to fail if it # occurs. backend.openssl_assert(res >= 0) if res == 0: backend._consume_errors() raise InvalidSignature @utils.register_interface(AsymmetricSignatureContext) class _RSASignatureContext(object): def __init__(self, backend, private_key, padding, algorithm): self._backend = backend self._private_key = private_key # We now call _rsa_sig_determine_padding in _rsa_sig_setup. However # we need to make a pointless call to it here so we maintain the # API of erroring on init with this context if the values are invalid. _rsa_sig_determine_padding(backend, private_key, padding, algorithm) self._padding = padding self._algorithm = algorithm self._hash_ctx = hashes.Hash(self._algorithm, self._backend) def update(self, data): self._hash_ctx.update(data) def finalize(self): return _rsa_sig_sign( self._backend, self._padding, self._algorithm, self._private_key, self._hash_ctx.finalize(), ) @utils.register_interface(AsymmetricVerificationContext) class _RSAVerificationContext(object): def __init__(self, backend, public_key, signature, padding, algorithm): self._backend = backend self._public_key = public_key self._signature = signature self._padding = padding # We now call _rsa_sig_determine_padding in _rsa_sig_setup. However # we need to make a pointless call to it here so we maintain the # API of erroring on init with this context if the values are invalid. _rsa_sig_determine_padding(backend, public_key, padding, algorithm) padding = padding self._algorithm = algorithm self._hash_ctx = hashes.Hash(self._algorithm, self._backend) def update(self, data): self._hash_ctx.update(data) def verify(self): return _rsa_sig_verify( self._backend, self._padding, self._algorithm, self._public_key, self._signature, self._hash_ctx.finalize(), ) @utils.register_interface(RSAPrivateKeyWithSerialization) class _RSAPrivateKey(object): def __init__(self, backend, rsa_cdata, evp_pkey): res = backend._lib.RSA_check_key(rsa_cdata) if res != 1: errors = backend._consume_errors_with_text() raise ValueError("Invalid private key", errors) self._backend = backend self._rsa_cdata = rsa_cdata self._evp_pkey = evp_pkey n = self._backend._ffi.new("BIGNUM **") self._backend._lib.RSA_get0_key( self._rsa_cdata, n, self._backend._ffi.NULL, self._backend._ffi.NULL, ) self._backend.openssl_assert(n[0] != self._backend._ffi.NULL) self._key_size = self._backend._lib.BN_num_bits(n[0]) key_size = utils.read_only_property("_key_size") def signer(self, padding, algorithm): _warn_sign_verify_deprecated() _check_not_prehashed(algorithm) return _RSASignatureContext(self._backend, self, padding, algorithm) def decrypt(self, ciphertext, padding): key_size_bytes = (self.key_size + 7) // 8 if key_size_bytes != len(ciphertext): raise ValueError("Ciphertext length must be equal to key size.") return _enc_dec_rsa(self._backend, self, ciphertext, padding) def public_key(self): ctx = self._backend._lib.RSAPublicKey_dup(self._rsa_cdata) self._backend.openssl_assert(ctx != self._backend._ffi.NULL) ctx = self._backend._ffi.gc(ctx, self._backend._lib.RSA_free) res = self._backend._lib.RSA_blinding_on(ctx, self._backend._ffi.NULL) self._backend.openssl_assert(res == 1) evp_pkey = self._backend._rsa_cdata_to_evp_pkey(ctx) return _RSAPublicKey(self._backend, ctx, evp_pkey) def private_numbers(self): n = self._backend._ffi.new("BIGNUM **") e = self._backend._ffi.new("BIGNUM **") d = self._backend._ffi.new("BIGNUM **") p = self._backend._ffi.new("BIGNUM **") q = self._backend._ffi.new("BIGNUM **") dmp1 = self._backend._ffi.new("BIGNUM **") dmq1 = self._backend._ffi.new("BIGNUM **") iqmp = self._backend._ffi.new("BIGNUM **") self._backend._lib.RSA_get0_key(self._rsa_cdata, n, e, d) self._backend.openssl_assert(n[0] != self._backend._ffi.NULL) self._backend.openssl_assert(e[0] != self._backend._ffi.NULL) self._backend.openssl_assert(d[0] != self._backend._ffi.NULL) self._backend._lib.RSA_get0_factors(self._rsa_cdata, p, q) self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) self._backend.openssl_assert(q[0] != self._backend._ffi.NULL) self._backend._lib.RSA_get0_crt_params( self._rsa_cdata, dmp1, dmq1, iqmp ) self._backend.openssl_assert(dmp1[0] != self._backend._ffi.NULL) self._backend.openssl_assert(dmq1[0] != self._backend._ffi.NULL) self._backend.openssl_assert(iqmp[0] != self._backend._ffi.NULL) return rsa.RSAPrivateNumbers( p=self._backend._bn_to_int(p[0]), q=self._backend._bn_to_int(q[0]), d=self._backend._bn_to_int(d[0]), dmp1=self._backend._bn_to_int(dmp1[0]), dmq1=self._backend._bn_to_int(dmq1[0]), iqmp=self._backend._bn_to_int(iqmp[0]), public_numbers=rsa.RSAPublicNumbers( e=self._backend._bn_to_int(e[0]), n=self._backend._bn_to_int(n[0]), ), ) def private_bytes(self, encoding, format, encryption_algorithm): return self._backend._private_key_bytes( encoding, format, encryption_algorithm, self, self._evp_pkey, self._rsa_cdata, ) def sign(self, data, padding, algorithm): data, algorithm = _calculate_digest_and_algorithm( self._backend, data, algorithm ) return _rsa_sig_sign(self._backend, padding, algorithm, self, data) @utils.register_interface(RSAPublicKeyWithSerialization) class _RSAPublicKey(object): def __init__(self, backend, rsa_cdata, evp_pkey): self._backend = backend self._rsa_cdata = rsa_cdata self._evp_pkey = evp_pkey n = self._backend._ffi.new("BIGNUM **") self._backend._lib.RSA_get0_key( self._rsa_cdata, n, self._backend._ffi.NULL, self._backend._ffi.NULL, ) self._backend.openssl_assert(n[0] != self._backend._ffi.NULL) self._key_size = self._backend._lib.BN_num_bits(n[0]) key_size = utils.read_only_property("_key_size") def verifier(self, signature, padding, algorithm): _warn_sign_verify_deprecated() utils._check_bytes("signature", signature) _check_not_prehashed(algorithm) return _RSAVerificationContext( self._backend, self, signature, padding, algorithm ) def encrypt(self, plaintext, padding): return _enc_dec_rsa(self._backend, self, plaintext, padding) def public_numbers(self): n = self._backend._ffi.new("BIGNUM **") e = self._backend._ffi.new("BIGNUM **") self._backend._lib.RSA_get0_key( self._rsa_cdata, n, e, self._backend._ffi.NULL ) self._backend.openssl_assert(n[0] != self._backend._ffi.NULL) self._backend.openssl_assert(e[0] != self._backend._ffi.NULL) return rsa.RSAPublicNumbers( e=self._backend._bn_to_int(e[0]), n=self._backend._bn_to_int(n[0]), ) def public_bytes(self, encoding, format): return self._backend._public_key_bytes( encoding, format, self, self._evp_pkey, self._rsa_cdata ) def verify(self, signature, data, padding, algorithm): data, algorithm = _calculate_digest_and_algorithm( self._backend, data, algorithm ) return _rsa_sig_verify( self._backend, padding, algorithm, self, signature, data )
./CrossVul/dataset_final_sorted/CWE-385/py/good_4296_2
crossvul-python_data_bad_4296_2
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function from cryptography import utils from cryptography.exceptions import ( InvalidSignature, UnsupportedAlgorithm, _Reasons, ) from cryptography.hazmat.backends.openssl.utils import ( _calculate_digest_and_algorithm, _check_not_prehashed, _warn_sign_verify_deprecated, ) from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.asymmetric import ( AsymmetricSignatureContext, AsymmetricVerificationContext, rsa, ) from cryptography.hazmat.primitives.asymmetric.padding import ( AsymmetricPadding, MGF1, OAEP, PKCS1v15, PSS, calculate_max_pss_salt_length, ) from cryptography.hazmat.primitives.asymmetric.rsa import ( RSAPrivateKeyWithSerialization, RSAPublicKeyWithSerialization, ) def _get_rsa_pss_salt_length(pss, key, hash_algorithm): salt = pss._salt_length if salt is MGF1.MAX_LENGTH or salt is PSS.MAX_LENGTH: return calculate_max_pss_salt_length(key, hash_algorithm) else: return salt def _enc_dec_rsa(backend, key, data, padding): if not isinstance(padding, AsymmetricPadding): raise TypeError("Padding must be an instance of AsymmetricPadding.") if isinstance(padding, PKCS1v15): padding_enum = backend._lib.RSA_PKCS1_PADDING elif isinstance(padding, OAEP): padding_enum = backend._lib.RSA_PKCS1_OAEP_PADDING if not isinstance(padding._mgf, MGF1): raise UnsupportedAlgorithm( "Only MGF1 is supported by this backend.", _Reasons.UNSUPPORTED_MGF, ) if not backend.rsa_padding_supported(padding): raise UnsupportedAlgorithm( "This combination of padding and hash algorithm is not " "supported by this backend.", _Reasons.UNSUPPORTED_PADDING, ) else: raise UnsupportedAlgorithm( "{} is not supported by this backend.".format(padding.name), _Reasons.UNSUPPORTED_PADDING, ) return _enc_dec_rsa_pkey_ctx(backend, key, data, padding_enum, padding) def _enc_dec_rsa_pkey_ctx(backend, key, data, padding_enum, padding): if isinstance(key, _RSAPublicKey): init = backend._lib.EVP_PKEY_encrypt_init crypt = backend._lib.EVP_PKEY_encrypt else: init = backend._lib.EVP_PKEY_decrypt_init crypt = backend._lib.EVP_PKEY_decrypt pkey_ctx = backend._lib.EVP_PKEY_CTX_new(key._evp_pkey, backend._ffi.NULL) backend.openssl_assert(pkey_ctx != backend._ffi.NULL) pkey_ctx = backend._ffi.gc(pkey_ctx, backend._lib.EVP_PKEY_CTX_free) res = init(pkey_ctx) backend.openssl_assert(res == 1) res = backend._lib.EVP_PKEY_CTX_set_rsa_padding(pkey_ctx, padding_enum) backend.openssl_assert(res > 0) buf_size = backend._lib.EVP_PKEY_size(key._evp_pkey) backend.openssl_assert(buf_size > 0) if isinstance(padding, OAEP) and backend._lib.Cryptography_HAS_RSA_OAEP_MD: mgf1_md = backend._evp_md_non_null_from_algorithm( padding._mgf._algorithm ) res = backend._lib.EVP_PKEY_CTX_set_rsa_mgf1_md(pkey_ctx, mgf1_md) backend.openssl_assert(res > 0) oaep_md = backend._evp_md_non_null_from_algorithm(padding._algorithm) res = backend._lib.EVP_PKEY_CTX_set_rsa_oaep_md(pkey_ctx, oaep_md) backend.openssl_assert(res > 0) if ( isinstance(padding, OAEP) and padding._label is not None and len(padding._label) > 0 ): # set0_rsa_oaep_label takes ownership of the char * so we need to # copy it into some new memory labelptr = backend._lib.OPENSSL_malloc(len(padding._label)) backend.openssl_assert(labelptr != backend._ffi.NULL) backend._ffi.memmove(labelptr, padding._label, len(padding._label)) res = backend._lib.EVP_PKEY_CTX_set0_rsa_oaep_label( pkey_ctx, labelptr, len(padding._label) ) backend.openssl_assert(res == 1) outlen = backend._ffi.new("size_t *", buf_size) buf = backend._ffi.new("unsigned char[]", buf_size) res = crypt(pkey_ctx, buf, outlen, data, len(data)) if res <= 0: _handle_rsa_enc_dec_error(backend, key) return backend._ffi.buffer(buf)[: outlen[0]] def _handle_rsa_enc_dec_error(backend, key): errors = backend._consume_errors_with_text() if isinstance(key, _RSAPublicKey): raise ValueError( "Data too long for key size. Encrypt less data or use a " "larger key size.", errors, ) else: raise ValueError("Decryption failed.", errors) def _rsa_sig_determine_padding(backend, key, padding, algorithm): if not isinstance(padding, AsymmetricPadding): raise TypeError("Expected provider of AsymmetricPadding.") pkey_size = backend._lib.EVP_PKEY_size(key._evp_pkey) backend.openssl_assert(pkey_size > 0) if isinstance(padding, PKCS1v15): padding_enum = backend._lib.RSA_PKCS1_PADDING elif isinstance(padding, PSS): if not isinstance(padding._mgf, MGF1): raise UnsupportedAlgorithm( "Only MGF1 is supported by this backend.", _Reasons.UNSUPPORTED_MGF, ) # Size of key in bytes - 2 is the maximum # PSS signature length (salt length is checked later) if pkey_size - algorithm.digest_size - 2 < 0: raise ValueError( "Digest too large for key size. Use a larger " "key or different digest." ) padding_enum = backend._lib.RSA_PKCS1_PSS_PADDING else: raise UnsupportedAlgorithm( "{} is not supported by this backend.".format(padding.name), _Reasons.UNSUPPORTED_PADDING, ) return padding_enum def _rsa_sig_setup(backend, padding, algorithm, key, data, init_func): padding_enum = _rsa_sig_determine_padding(backend, key, padding, algorithm) evp_md = backend._evp_md_non_null_from_algorithm(algorithm) pkey_ctx = backend._lib.EVP_PKEY_CTX_new(key._evp_pkey, backend._ffi.NULL) backend.openssl_assert(pkey_ctx != backend._ffi.NULL) pkey_ctx = backend._ffi.gc(pkey_ctx, backend._lib.EVP_PKEY_CTX_free) res = init_func(pkey_ctx) backend.openssl_assert(res == 1) res = backend._lib.EVP_PKEY_CTX_set_signature_md(pkey_ctx, evp_md) if res == 0: backend._consume_errors() raise UnsupportedAlgorithm( "{} is not supported by this backend for RSA signing.".format( algorithm.name ), _Reasons.UNSUPPORTED_HASH, ) res = backend._lib.EVP_PKEY_CTX_set_rsa_padding(pkey_ctx, padding_enum) backend.openssl_assert(res > 0) if isinstance(padding, PSS): res = backend._lib.EVP_PKEY_CTX_set_rsa_pss_saltlen( pkey_ctx, _get_rsa_pss_salt_length(padding, key, algorithm) ) backend.openssl_assert(res > 0) mgf1_md = backend._evp_md_non_null_from_algorithm( padding._mgf._algorithm ) res = backend._lib.EVP_PKEY_CTX_set_rsa_mgf1_md(pkey_ctx, mgf1_md) backend.openssl_assert(res > 0) return pkey_ctx def _rsa_sig_sign(backend, padding, algorithm, private_key, data): pkey_ctx = _rsa_sig_setup( backend, padding, algorithm, private_key, data, backend._lib.EVP_PKEY_sign_init, ) buflen = backend._ffi.new("size_t *") res = backend._lib.EVP_PKEY_sign( pkey_ctx, backend._ffi.NULL, buflen, data, len(data) ) backend.openssl_assert(res == 1) buf = backend._ffi.new("unsigned char[]", buflen[0]) res = backend._lib.EVP_PKEY_sign(pkey_ctx, buf, buflen, data, len(data)) if res != 1: errors = backend._consume_errors_with_text() raise ValueError( "Digest or salt length too long for key size. Use a larger key " "or shorter salt length if you are specifying a PSS salt", errors, ) return backend._ffi.buffer(buf)[:] def _rsa_sig_verify(backend, padding, algorithm, public_key, signature, data): pkey_ctx = _rsa_sig_setup( backend, padding, algorithm, public_key, data, backend._lib.EVP_PKEY_verify_init, ) res = backend._lib.EVP_PKEY_verify( pkey_ctx, signature, len(signature), data, len(data) ) # The previous call can return negative numbers in the event of an # error. This is not a signature failure but we need to fail if it # occurs. backend.openssl_assert(res >= 0) if res == 0: backend._consume_errors() raise InvalidSignature @utils.register_interface(AsymmetricSignatureContext) class _RSASignatureContext(object): def __init__(self, backend, private_key, padding, algorithm): self._backend = backend self._private_key = private_key # We now call _rsa_sig_determine_padding in _rsa_sig_setup. However # we need to make a pointless call to it here so we maintain the # API of erroring on init with this context if the values are invalid. _rsa_sig_determine_padding(backend, private_key, padding, algorithm) self._padding = padding self._algorithm = algorithm self._hash_ctx = hashes.Hash(self._algorithm, self._backend) def update(self, data): self._hash_ctx.update(data) def finalize(self): return _rsa_sig_sign( self._backend, self._padding, self._algorithm, self._private_key, self._hash_ctx.finalize(), ) @utils.register_interface(AsymmetricVerificationContext) class _RSAVerificationContext(object): def __init__(self, backend, public_key, signature, padding, algorithm): self._backend = backend self._public_key = public_key self._signature = signature self._padding = padding # We now call _rsa_sig_determine_padding in _rsa_sig_setup. However # we need to make a pointless call to it here so we maintain the # API of erroring on init with this context if the values are invalid. _rsa_sig_determine_padding(backend, public_key, padding, algorithm) padding = padding self._algorithm = algorithm self._hash_ctx = hashes.Hash(self._algorithm, self._backend) def update(self, data): self._hash_ctx.update(data) def verify(self): return _rsa_sig_verify( self._backend, self._padding, self._algorithm, self._public_key, self._signature, self._hash_ctx.finalize(), ) @utils.register_interface(RSAPrivateKeyWithSerialization) class _RSAPrivateKey(object): def __init__(self, backend, rsa_cdata, evp_pkey): res = backend._lib.RSA_check_key(rsa_cdata) if res != 1: errors = backend._consume_errors_with_text() raise ValueError("Invalid private key", errors) self._backend = backend self._rsa_cdata = rsa_cdata self._evp_pkey = evp_pkey n = self._backend._ffi.new("BIGNUM **") self._backend._lib.RSA_get0_key( self._rsa_cdata, n, self._backend._ffi.NULL, self._backend._ffi.NULL, ) self._backend.openssl_assert(n[0] != self._backend._ffi.NULL) self._key_size = self._backend._lib.BN_num_bits(n[0]) key_size = utils.read_only_property("_key_size") def signer(self, padding, algorithm): _warn_sign_verify_deprecated() _check_not_prehashed(algorithm) return _RSASignatureContext(self._backend, self, padding, algorithm) def decrypt(self, ciphertext, padding): key_size_bytes = (self.key_size + 7) // 8 if key_size_bytes != len(ciphertext): raise ValueError("Ciphertext length must be equal to key size.") return _enc_dec_rsa(self._backend, self, ciphertext, padding) def public_key(self): ctx = self._backend._lib.RSAPublicKey_dup(self._rsa_cdata) self._backend.openssl_assert(ctx != self._backend._ffi.NULL) ctx = self._backend._ffi.gc(ctx, self._backend._lib.RSA_free) res = self._backend._lib.RSA_blinding_on(ctx, self._backend._ffi.NULL) self._backend.openssl_assert(res == 1) evp_pkey = self._backend._rsa_cdata_to_evp_pkey(ctx) return _RSAPublicKey(self._backend, ctx, evp_pkey) def private_numbers(self): n = self._backend._ffi.new("BIGNUM **") e = self._backend._ffi.new("BIGNUM **") d = self._backend._ffi.new("BIGNUM **") p = self._backend._ffi.new("BIGNUM **") q = self._backend._ffi.new("BIGNUM **") dmp1 = self._backend._ffi.new("BIGNUM **") dmq1 = self._backend._ffi.new("BIGNUM **") iqmp = self._backend._ffi.new("BIGNUM **") self._backend._lib.RSA_get0_key(self._rsa_cdata, n, e, d) self._backend.openssl_assert(n[0] != self._backend._ffi.NULL) self._backend.openssl_assert(e[0] != self._backend._ffi.NULL) self._backend.openssl_assert(d[0] != self._backend._ffi.NULL) self._backend._lib.RSA_get0_factors(self._rsa_cdata, p, q) self._backend.openssl_assert(p[0] != self._backend._ffi.NULL) self._backend.openssl_assert(q[0] != self._backend._ffi.NULL) self._backend._lib.RSA_get0_crt_params( self._rsa_cdata, dmp1, dmq1, iqmp ) self._backend.openssl_assert(dmp1[0] != self._backend._ffi.NULL) self._backend.openssl_assert(dmq1[0] != self._backend._ffi.NULL) self._backend.openssl_assert(iqmp[0] != self._backend._ffi.NULL) return rsa.RSAPrivateNumbers( p=self._backend._bn_to_int(p[0]), q=self._backend._bn_to_int(q[0]), d=self._backend._bn_to_int(d[0]), dmp1=self._backend._bn_to_int(dmp1[0]), dmq1=self._backend._bn_to_int(dmq1[0]), iqmp=self._backend._bn_to_int(iqmp[0]), public_numbers=rsa.RSAPublicNumbers( e=self._backend._bn_to_int(e[0]), n=self._backend._bn_to_int(n[0]), ), ) def private_bytes(self, encoding, format, encryption_algorithm): return self._backend._private_key_bytes( encoding, format, encryption_algorithm, self, self._evp_pkey, self._rsa_cdata, ) def sign(self, data, padding, algorithm): data, algorithm = _calculate_digest_and_algorithm( self._backend, data, algorithm ) return _rsa_sig_sign(self._backend, padding, algorithm, self, data) @utils.register_interface(RSAPublicKeyWithSerialization) class _RSAPublicKey(object): def __init__(self, backend, rsa_cdata, evp_pkey): self._backend = backend self._rsa_cdata = rsa_cdata self._evp_pkey = evp_pkey n = self._backend._ffi.new("BIGNUM **") self._backend._lib.RSA_get0_key( self._rsa_cdata, n, self._backend._ffi.NULL, self._backend._ffi.NULL, ) self._backend.openssl_assert(n[0] != self._backend._ffi.NULL) self._key_size = self._backend._lib.BN_num_bits(n[0]) key_size = utils.read_only_property("_key_size") def verifier(self, signature, padding, algorithm): _warn_sign_verify_deprecated() utils._check_bytes("signature", signature) _check_not_prehashed(algorithm) return _RSAVerificationContext( self._backend, self, signature, padding, algorithm ) def encrypt(self, plaintext, padding): return _enc_dec_rsa(self._backend, self, plaintext, padding) def public_numbers(self): n = self._backend._ffi.new("BIGNUM **") e = self._backend._ffi.new("BIGNUM **") self._backend._lib.RSA_get0_key( self._rsa_cdata, n, e, self._backend._ffi.NULL ) self._backend.openssl_assert(n[0] != self._backend._ffi.NULL) self._backend.openssl_assert(e[0] != self._backend._ffi.NULL) return rsa.RSAPublicNumbers( e=self._backend._bn_to_int(e[0]), n=self._backend._bn_to_int(n[0]), ) def public_bytes(self, encoding, format): return self._backend._public_key_bytes( encoding, format, self, self._evp_pkey, self._rsa_cdata ) def verify(self, signature, data, padding, algorithm): data, algorithm = _calculate_digest_and_algorithm( self._backend, data, algorithm ) return _rsa_sig_verify( self._backend, padding, algorithm, self, signature, data )
./CrossVul/dataset_final_sorted/CWE-385/py/bad_4296_2
crossvul-python_data_bad_4650_1
from typing import List, Optional, Tuple import graphene from django.conf import settings from django.core.exceptions import ObjectDoesNotExist, ValidationError from django.db import transaction from django.db.models import Prefetch from ...account.error_codes import AccountErrorCode from ...checkout import models from ...checkout.error_codes import CheckoutErrorCode from ...checkout.utils import ( abort_order_data, add_promo_code_to_checkout, add_variant_to_checkout, change_billing_address_in_checkout, change_shipping_address_in_checkout, clean_checkout, create_order, get_user_checkout, get_valid_shipping_methods_for_checkout, prepare_order_data, recalculate_checkout_discount, remove_promo_code_from_checkout, ) from ...core import analytics from ...core.exceptions import InsufficientStock from ...core.permissions import OrderPermissions from ...core.taxes import TaxError from ...core.utils.url import validate_storefront_url from ...discount import models as voucher_model from ...payment import PaymentError, gateway, models as payment_models from ...payment.interface import AddressData from ...payment.utils import store_customer_id from ...product import models as product_models from ...warehouse.availability import check_stock_quantity, get_available_quantity from ..account.i18n import I18nMixin from ..account.types import AddressInput, User from ..core.mutations import ( BaseMutation, ClearMetaBaseMutation, ModelMutation, UpdateMetaBaseMutation, ) from ..core.types.common import CheckoutError from ..core.utils import from_global_id_strict_type from ..order.types import Order from ..product.types import ProductVariant from ..shipping.types import ShippingMethod from .types import Checkout, CheckoutLine ERROR_DOES_NOT_SHIP = "This checkout doesn't need shipping" def clean_shipping_method( checkout: models.Checkout, method: Optional[models.ShippingMethod], discounts ) -> bool: """Check if current shipping method is valid.""" if not method: # no shipping method was provided, it is valid return True if not checkout.is_shipping_required(): raise ValidationError( ERROR_DOES_NOT_SHIP, code=CheckoutErrorCode.SHIPPING_NOT_REQUIRED.value ) if not checkout.shipping_address: raise ValidationError( "Cannot choose a shipping method for a checkout without the " "shipping address.", code=CheckoutErrorCode.SHIPPING_ADDRESS_NOT_SET.value, ) valid_methods = get_valid_shipping_methods_for_checkout(checkout, discounts) return method in valid_methods def update_checkout_shipping_method_if_invalid(checkout: models.Checkout, discounts): # remove shipping method when empty checkout if checkout.quantity == 0 or not checkout.is_shipping_required(): checkout.shipping_method = None checkout.save(update_fields=["shipping_method", "last_change"]) is_valid = clean_shipping_method( checkout=checkout, method=checkout.shipping_method, discounts=discounts ) if not is_valid: cheapest_alternative = get_valid_shipping_methods_for_checkout( checkout, discounts ).first() checkout.shipping_method = cheapest_alternative checkout.save(update_fields=["shipping_method", "last_change"]) def check_lines_quantity(variants, quantities, country): """Check if stock is sufficient for each line in the list of dicts.""" for variant, quantity in zip(variants, quantities): if quantity < 0: raise ValidationError( { "quantity": ValidationError( "The quantity should be higher than zero.", code=CheckoutErrorCode.ZERO_QUANTITY, ) } ) if quantity > settings.MAX_CHECKOUT_LINE_QUANTITY: raise ValidationError( { "quantity": ValidationError( "Cannot add more than %d times this item." "" % settings.MAX_CHECKOUT_LINE_QUANTITY, code=CheckoutErrorCode.QUANTITY_GREATER_THAN_LIMIT, ) } ) try: check_stock_quantity(variant, country, quantity) except InsufficientStock as e: available_quantity = get_available_quantity(e.item, country) message = ( "Could not add item " + "%(item_name)s. Only %(remaining)d remaining in stock." % { "remaining": available_quantity, "item_name": e.item.display_product(), } ) raise ValidationError({"quantity": ValidationError(message, code=e.code)}) class CheckoutLineInput(graphene.InputObjectType): quantity = graphene.Int(required=True, description="The number of items purchased.") variant_id = graphene.ID(required=True, description="ID of the product variant.") class CheckoutCreateInput(graphene.InputObjectType): lines = graphene.List( CheckoutLineInput, description=( "A list of checkout lines, each containing information about " "an item in the checkout." ), required=True, ) email = graphene.String(description="The customer's email address.") shipping_address = AddressInput( description=( "The mailing address to where the checkout will be shipped. " "Note: the address will be ignored if the checkout " "doesn't contain shippable items." ) ) billing_address = AddressInput(description="Billing address of the customer.") class CheckoutCreate(ModelMutation, I18nMixin): created = graphene.Field( graphene.Boolean, description=( "Whether the checkout was created or the current active one was returned. " "Refer to checkoutLinesAdd and checkoutLinesUpdate to merge a cart " "with an active checkout." ), ) class Arguments: input = CheckoutCreateInput( required=True, description="Fields required to create checkout." ) class Meta: description = "Create a new checkout." model = models.Checkout return_field_name = "checkout" error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def process_checkout_lines( cls, lines, country ) -> Tuple[List[product_models.ProductVariant], List[int]]: variant_ids = [line.get("variant_id") for line in lines] variants = cls.get_nodes_or_error( variant_ids, "variant_id", ProductVariant, qs=product_models.ProductVariant.objects.prefetch_related( "product__product_type" ), ) quantities = [line.get("quantity") for line in lines] check_lines_quantity(variants, quantities, country) return variants, quantities @classmethod def retrieve_shipping_address(cls, user, data: dict) -> Optional[models.Address]: if "shipping_address" in data: return cls.validate_address(data["shipping_address"]) if user.is_authenticated: return user.default_shipping_address return None @classmethod def retrieve_billing_address(cls, user, data: dict) -> Optional[models.Address]: if "billing_address" in data: return cls.validate_address(data["billing_address"]) if user.is_authenticated: return user.default_billing_address return None @classmethod def clean_input(cls, info, instance: models.Checkout, data, input_cls=None): cleaned_input = super().clean_input(info, instance, data) user = info.context.user country = info.context.country.code # Resolve and process the lines, retrieving the variants and quantities lines = data.pop("lines", None) if lines: ( cleaned_input["variants"], cleaned_input["quantities"], ) = cls.process_checkout_lines(lines, country) cleaned_input["shipping_address"] = cls.retrieve_shipping_address(user, data) cleaned_input["billing_address"] = cls.retrieve_billing_address(user, data) # Use authenticated user's email as default email if user.is_authenticated: email = data.pop("email", None) cleaned_input["email"] = email or user.email return cleaned_input @classmethod def save_addresses(cls, instance: models.Checkout, cleaned_input: dict): shipping_address = cleaned_input.get("shipping_address") billing_address = cleaned_input.get("billing_address") updated_fields = ["last_change"] if shipping_address and instance.is_shipping_required(): shipping_address.save() instance.shipping_address = shipping_address.get_copy() updated_fields.append("shipping_address") if billing_address: billing_address.save() instance.billing_address = billing_address.get_copy() updated_fields.append("billing_address") # Note django will simply return if the list is empty instance.save(update_fields=updated_fields) @classmethod @transaction.atomic() def save(cls, info, instance: models.Checkout, cleaned_input): # Create the checkout object instance.save() country = info.context.country instance.set_country(country.code, commit=True) # Retrieve the lines to create variants = cleaned_input.get("variants") quantities = cleaned_input.get("quantities") # Create the checkout lines if variants and quantities: for variant, quantity in zip(variants, quantities): try: add_variant_to_checkout(instance, variant, quantity) except InsufficientStock as exc: raise ValidationError( f"Insufficient product stock: {exc.item}", code=exc.code ) # Save provided addresses and associate them to the checkout cls.save_addresses(instance, cleaned_input) @classmethod def perform_mutation(cls, _root, info, **data): user = info.context.user # `perform_mutation` is overridden to properly get or create a checkout # instance here and abort mutation if needed. if user.is_authenticated: checkout, _ = get_user_checkout(user) if checkout is not None: # If user has an active checkout, return it without any # modifications. return CheckoutCreate(checkout=checkout, created=False) checkout = models.Checkout(user=user) else: checkout = models.Checkout() cleaned_input = cls.clean_input(info, checkout, data.get("input")) checkout = cls.construct_instance(checkout, cleaned_input) cls.clean_instance(info, checkout) cls.save(info, checkout, cleaned_input) cls._save_m2m(info, checkout, cleaned_input) return CheckoutCreate(checkout=checkout, created=True) class CheckoutLinesAdd(BaseMutation): checkout = graphene.Field(Checkout, description="An updated checkout.") class Arguments: checkout_id = graphene.ID(description="The ID of the checkout.", required=True) lines = graphene.List( CheckoutLineInput, required=True, description=( "A list of checkout lines, each containing information about " "an item in the checkout." ), ) class Meta: description = "Adds a checkout line to the existing checkout." error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def perform_mutation(cls, _root, info, checkout_id, lines, replace=False): checkout = cls.get_node_or_error( info, checkout_id, only_type=Checkout, field="checkout_id" ) variant_ids = [line.get("variant_id") for line in lines] variants = cls.get_nodes_or_error(variant_ids, "variant_id", ProductVariant) quantities = [line.get("quantity") for line in lines] check_lines_quantity(variants, quantities, checkout.get_country()) if variants and quantities: for variant, quantity in zip(variants, quantities): try: add_variant_to_checkout( checkout, variant, quantity, replace=replace ) except InsufficientStock as exc: raise ValidationError( f"Insufficient product stock: {exc.item}", code=exc.code ) update_checkout_shipping_method_if_invalid(checkout, info.context.discounts) recalculate_checkout_discount(checkout, info.context.discounts) return CheckoutLinesAdd(checkout=checkout) class CheckoutLinesUpdate(CheckoutLinesAdd): checkout = graphene.Field(Checkout, description="An updated checkout.") class Meta: description = "Updates checkout line in the existing checkout." error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def perform_mutation(cls, root, info, checkout_id, lines): return super().perform_mutation(root, info, checkout_id, lines, replace=True) class CheckoutLineDelete(BaseMutation): checkout = graphene.Field(Checkout, description="An updated checkout.") class Arguments: checkout_id = graphene.ID(description="The ID of the checkout.", required=True) line_id = graphene.ID(description="ID of the checkout line to delete.") class Meta: description = "Deletes a CheckoutLine." error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def perform_mutation(cls, _root, info, checkout_id, line_id): checkout = cls.get_node_or_error( info, checkout_id, only_type=Checkout, field="checkout_id" ) line = cls.get_node_or_error( info, line_id, only_type=CheckoutLine, field="line_id" ) if line and line in checkout.lines.all(): line.delete() update_checkout_shipping_method_if_invalid(checkout, info.context.discounts) recalculate_checkout_discount(checkout, info.context.discounts) return CheckoutLineDelete(checkout=checkout) class CheckoutCustomerAttach(BaseMutation): checkout = graphene.Field(Checkout, description="An updated checkout.") class Arguments: checkout_id = graphene.ID(required=True, description="ID of the checkout.") customer_id = graphene.ID(required=True, description="The ID of the customer.") class Meta: description = "Sets the customer as the owner of the checkout." error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def perform_mutation(cls, _root, info, checkout_id, customer_id): checkout = cls.get_node_or_error( info, checkout_id, only_type=Checkout, field="checkout_id" ) customer = cls.get_node_or_error( info, customer_id, only_type=User, field="customer_id" ) checkout.user = customer checkout.save(update_fields=["user", "last_change"]) return CheckoutCustomerAttach(checkout=checkout) class CheckoutCustomerDetach(BaseMutation): checkout = graphene.Field(Checkout, description="An updated checkout.") class Arguments: checkout_id = graphene.ID(description="Checkout ID.", required=True) class Meta: description = "Removes the user assigned as the owner of the checkout." error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def perform_mutation(cls, _root, info, checkout_id): checkout = cls.get_node_or_error( info, checkout_id, only_type=Checkout, field="checkout_id" ) checkout.user = None checkout.save(update_fields=["user", "last_change"]) return CheckoutCustomerDetach(checkout=checkout) class CheckoutShippingAddressUpdate(BaseMutation, I18nMixin): checkout = graphene.Field(Checkout, description="An updated checkout.") class Arguments: checkout_id = graphene.ID(required=True, description="ID of the checkout.") shipping_address = AddressInput( required=True, description="The mailing address to where the checkout will be shipped.", ) class Meta: description = "Update shipping address in the existing checkout." error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def perform_mutation(cls, _root, info, checkout_id, shipping_address): pk = from_global_id_strict_type(checkout_id, Checkout, field="checkout_id") try: checkout = models.Checkout.objects.prefetch_related( "lines__variant__product__product_type" ).get(pk=pk) except ObjectDoesNotExist: raise ValidationError( { "checkout_id": ValidationError( f"Couldn't resolve to a node: {checkout_id}", code=CheckoutErrorCode.NOT_FOUND, ) } ) if not checkout.is_shipping_required(): raise ValidationError( { "shipping_address": ValidationError( ERROR_DOES_NOT_SHIP, code=CheckoutErrorCode.SHIPPING_NOT_REQUIRED, ) } ) shipping_address = cls.validate_address( shipping_address, instance=checkout.shipping_address, info=info ) update_checkout_shipping_method_if_invalid(checkout, info.context.discounts) with transaction.atomic(): shipping_address.save() change_shipping_address_in_checkout(checkout, shipping_address) recalculate_checkout_discount(checkout, info.context.discounts) return CheckoutShippingAddressUpdate(checkout=checkout) class CheckoutBillingAddressUpdate(CheckoutShippingAddressUpdate): checkout = graphene.Field(Checkout, description="An updated checkout.") class Arguments: checkout_id = graphene.ID(required=True, description="ID of the checkout.") billing_address = AddressInput( required=True, description="The billing address of the checkout." ) class Meta: description = "Update billing address in the existing checkout." error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def perform_mutation(cls, _root, info, checkout_id, billing_address): checkout = cls.get_node_or_error( info, checkout_id, only_type=Checkout, field="checkout_id" ) billing_address = cls.validate_address( billing_address, instance=checkout.billing_address, info=info ) with transaction.atomic(): billing_address.save() change_billing_address_in_checkout(checkout, billing_address) return CheckoutBillingAddressUpdate(checkout=checkout) class CheckoutEmailUpdate(BaseMutation): checkout = graphene.Field(Checkout, description="An updated checkout.") class Arguments: checkout_id = graphene.ID(description="Checkout ID.") email = graphene.String(required=True, description="email.") class Meta: description = "Updates email address in the existing checkout object." error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def perform_mutation(cls, _root, info, checkout_id, email): checkout = cls.get_node_or_error( info, checkout_id, only_type=Checkout, field="checkout_id" ) checkout.email = email cls.clean_instance(info, checkout) checkout.save(update_fields=["email", "last_change"]) return CheckoutEmailUpdate(checkout=checkout) class CheckoutShippingMethodUpdate(BaseMutation): checkout = graphene.Field(Checkout, description="An updated checkout.") class Arguments: checkout_id = graphene.ID(description="Checkout ID.") shipping_method_id = graphene.ID(required=True, description="Shipping method.") class Meta: description = "Updates the shipping address of the checkout." error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def perform_mutation(cls, _root, info, checkout_id, shipping_method_id): pk = from_global_id_strict_type( checkout_id, only_type=Checkout, field="checkout_id" ) try: checkout = models.Checkout.objects.prefetch_related( "lines__variant__product__collections", "lines__variant__product__product_type", ).get(pk=pk) except ObjectDoesNotExist: raise ValidationError( { "checkout_id": ValidationError( f"Couldn't resolve to a node: {checkout_id}", code=CheckoutErrorCode.NOT_FOUND, ) } ) if not checkout.is_shipping_required(): raise ValidationError( { "shipping_method": ValidationError( ERROR_DOES_NOT_SHIP, code=CheckoutErrorCode.SHIPPING_NOT_REQUIRED, ) } ) shipping_method = cls.get_node_or_error( info, shipping_method_id, only_type=ShippingMethod, field="shipping_method_id", ) shipping_method_is_valid = clean_shipping_method( checkout=checkout, method=shipping_method, discounts=info.context.discounts ) if not shipping_method_is_valid: raise ValidationError( { "shipping_method": ValidationError( "This shipping method is not applicable.", code=CheckoutErrorCode.SHIPPING_METHOD_NOT_APPLICABLE, ) } ) checkout.shipping_method = shipping_method checkout.save(update_fields=["shipping_method", "last_change"]) recalculate_checkout_discount(checkout, info.context.discounts) return CheckoutShippingMethodUpdate(checkout=checkout) class CheckoutComplete(BaseMutation): order = graphene.Field(Order, description="Placed order.") class Arguments: checkout_id = graphene.ID(description="Checkout ID.", required=True) store_source = graphene.Boolean( default_value=False, description=( "Determines whether to store the payment source for future usage." ), ) redirect_url = graphene.String( required=False, description=( "URL of a view where users should be redirected to " "see the order details. URL in RFC 1808 format." ), ) class Meta: description = ( "Completes the checkout. As a result a new order is created and " "a payment charge is made. This action requires a successful " "payment before it can be performed." ) error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def perform_mutation(cls, _root, info, checkout_id, store_source, **data): checkout = cls.get_node_or_error( info, checkout_id, only_type=Checkout, field="checkout_id", qs=models.Checkout.objects.prefetch_related( "gift_cards", "lines", Prefetch( "payments", queryset=payment_models.Payment.objects.prefetch_related( "order", "order__lines" ), ), ).select_related("shipping_method", "shipping_method__shipping_zone"), ) discounts = info.context.discounts user = info.context.user clean_checkout(checkout, discounts) payment = checkout.get_last_active_payment() with transaction.atomic(): try: order_data = prepare_order_data( checkout=checkout, tracking_code=analytics.get_client_id(info.context), discounts=discounts, ) except InsufficientStock as e: raise ValidationError( f"Insufficient product stock: {e.item}", code=e.code ) except voucher_model.NotApplicable: raise ValidationError( "Voucher not applicable", code=CheckoutErrorCode.VOUCHER_NOT_APPLICABLE, ) except TaxError as tax_error: return ValidationError( "Unable to calculate taxes - %s" % str(tax_error), code=CheckoutErrorCode.TAX_ERROR, ) billing_address = order_data["billing_address"] shipping_address = order_data.get("shipping_address", None) billing_address = AddressData(**billing_address.as_data()) if shipping_address is not None: shipping_address = AddressData(**shipping_address.as_data()) try: txn = gateway.process_payment( payment=payment, token=payment.token, store_source=store_source ) if not txn.is_success: raise PaymentError(txn.error) except PaymentError as e: abort_order_data(order_data) raise ValidationError(str(e), code=CheckoutErrorCode.PAYMENT_ERROR) if txn.customer_id and user.is_authenticated: store_customer_id(user, payment.gateway, txn.customer_id) redirect_url = data.get("redirect_url", "") if redirect_url: try: validate_storefront_url(redirect_url) except ValidationError as error: raise ValidationError( {"redirect_url": error}, code=AccountErrorCode.INVALID ) # create the order into the database order = create_order( checkout=checkout, order_data=order_data, user=user, redirect_url=redirect_url, ) # remove checkout after order is successfully paid checkout.delete() # return the success response with the newly created order data return CheckoutComplete(order=order) class CheckoutAddPromoCode(BaseMutation): checkout = graphene.Field( Checkout, description="The checkout with the added gift card or voucher." ) class Arguments: checkout_id = graphene.ID(description="Checkout ID.", required=True) promo_code = graphene.String( description="Gift card code or voucher code.", required=True ) class Meta: description = "Adds a gift card or a voucher to a checkout." error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def perform_mutation(cls, _root, info, checkout_id, promo_code): checkout = cls.get_node_or_error( info, checkout_id, only_type=Checkout, field="checkout_id" ) add_promo_code_to_checkout(checkout, promo_code, info.context.discounts) return CheckoutAddPromoCode(checkout=checkout) class CheckoutRemovePromoCode(BaseMutation): checkout = graphene.Field( Checkout, description="The checkout with the removed gift card or voucher." ) class Arguments: checkout_id = graphene.ID(description="Checkout ID.", required=True) promo_code = graphene.String( description="Gift card code or voucher code.", required=True ) class Meta: description = "Remove a gift card or a voucher from a checkout." error_type_class = CheckoutError error_type_field = "checkout_errors" @classmethod def perform_mutation(cls, _root, info, checkout_id, promo_code): checkout = cls.get_node_or_error( info, checkout_id, only_type=Checkout, field="checkout_id" ) remove_promo_code_from_checkout(checkout, promo_code) return CheckoutRemovePromoCode(checkout=checkout) class CheckoutUpdateMeta(UpdateMetaBaseMutation): class Meta: description = "Updates metadata for checkout." permissions = (OrderPermissions.MANAGE_ORDERS,) model = models.Checkout public = True error_type_class = CheckoutError error_type_field = "checkout_errors" class CheckoutUpdatePrivateMeta(UpdateMetaBaseMutation): class Meta: description = "Updates private metadata for checkout." permissions = (OrderPermissions.MANAGE_ORDERS,) model = models.Checkout public = False error_type_class = CheckoutError error_type_field = "checkout_errors" class CheckoutClearMeta(ClearMetaBaseMutation): class Meta: description = "Clear metadata for checkout." permissions = (OrderPermissions.MANAGE_ORDERS,) model = models.Checkout public = True error_type_class = CheckoutError error_type_field = "checkout_errors" class CheckoutClearPrivateMeta(ClearMetaBaseMutation): class Meta: description = "Clear private metadata for checkout." permissions = (OrderPermissions.MANAGE_ORDERS,) model = models.Checkout public = False error_type_class = CheckoutError error_type_field = "checkout_errors"
./CrossVul/dataset_final_sorted/CWE-200/py/bad_4650_1
crossvul-python_data_bad_5541_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2011 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Storage backend for SWIFT""" from __future__ import absolute_import import hashlib import httplib import logging import math import urlparse from glance.common import cfg from glance.common import exception import glance.store import glance.store.base import glance.store.location try: from swift.common import client as swift_client except ImportError: pass DEFAULT_CONTAINER = 'glance' DEFAULT_LARGE_OBJECT_SIZE = 5 * 1024 # 5GB DEFAULT_LARGE_OBJECT_CHUNK_SIZE = 200 # 200M ONE_MB = 1000 * 1024 logger = logging.getLogger('glance.store.swift') class StoreLocation(glance.store.location.StoreLocation): """ Class describing a Swift URI. A Swift URI can look like any of the following: swift://user:pass@authurl.com/container/obj-id swift://account:user:pass@authurl.com/container/obj-id swift+http://user:pass@authurl.com/container/obj-id swift+https://user:pass@authurl.com/container/obj-id The swift+http:// URIs indicate there is an HTTP authentication URL. The default for Swift is an HTTPS authentication URL, so swift:// and swift+https:// are the same... """ def process_specs(self): self.scheme = self.specs.get('scheme', 'swift+https') self.user = self.specs.get('user') self.key = self.specs.get('key') self.authurl = self.specs.get('authurl') self.container = self.specs.get('container') self.obj = self.specs.get('obj') def _get_credstring(self): if self.user: return '%s:%s@' % (self.user, self.key) return '' def get_uri(self): authurl = self.authurl if authurl.startswith('http://'): authurl = authurl[7:] elif authurl.startswith('https://'): authurl = authurl[8:] credstring = self._get_credstring() authurl = authurl.strip('/') container = self.container.strip('/') obj = self.obj.strip('/') return '%s://%s%s/%s/%s' % (self.scheme, credstring, authurl, container, obj) def parse_uri(self, uri): """ Parse URLs. This method fixes an issue where credentials specified in the URL are interpreted differently in Python 2.6.1+ than prior versions of Python. It also deals with the peculiarity that new-style Swift URIs have where a username can contain a ':', like so: swift://account:user:pass@authurl.com/container/obj """ # Make sure that URIs that contain multiple schemes, such as: # swift://user:pass@http://authurl.com/v1/container/obj # are immediately rejected. if uri.count('://') != 1: reason = _( "URI cannot contain more than one occurrence of a scheme." "If you have specified a URI like " "swift://user:pass@http://authurl.com/v1/container/obj" ", you need to change it to use the swift+http:// scheme, " "like so: " "swift+http://user:pass@authurl.com/v1/container/obj" ) logger.error(_("Invalid store uri %(uri)s: %(reason)s") % locals()) raise exception.BadStoreUri() pieces = urlparse.urlparse(uri) assert pieces.scheme in ('swift', 'swift+http', 'swift+https') self.scheme = pieces.scheme netloc = pieces.netloc path = pieces.path.lstrip('/') if netloc != '': # > Python 2.6.1 if '@' in netloc: creds, netloc = netloc.split('@') else: creds = None else: # Python 2.6.1 compat # see lp659445 and Python issue7904 if '@' in path: creds, path = path.split('@') else: creds = None netloc = path[0:path.find('/')].strip('/') path = path[path.find('/'):].strip('/') if creds: cred_parts = creds.split(':') # User can be account:user, in which case cred_parts[0:2] will be # the account and user. Combine them into a single username of # account:user if len(cred_parts) == 1: reason = (_("Badly formed credentials '%(creds)s' in Swift " "URI") % locals()) logger.error(reason) raise exception.BadStoreUri() elif len(cred_parts) == 3: user = ':'.join(cred_parts[0:2]) else: user = cred_parts[0] key = cred_parts[-1] self.user = user self.key = key else: self.user = None path_parts = path.split('/') try: self.obj = path_parts.pop() self.container = path_parts.pop() if not netloc.startswith('http'): # push hostname back into the remaining to build full authurl path_parts.insert(0, netloc) self.authurl = '/'.join(path_parts) except IndexError: reason = _("Badly formed Swift URI: %s") % uri logger.error(reason) raise exception.BadStoreUri() @property def swift_auth_url(self): """ Creates a fully-qualified auth url that the Swift client library can use. The scheme for the auth_url is determined using the scheme included in the `location` field. HTTPS is assumed, unless 'swift+http' is specified. """ if self.scheme in ('swift+https', 'swift'): auth_scheme = 'https://' else: auth_scheme = 'http://' full_url = ''.join([auth_scheme, self.authurl]) return full_url class Store(glance.store.base.Store): """An implementation of the swift backend adapter.""" EXAMPLE_URL = "swift://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<FILE>" CHUNKSIZE = 65536 opts = [ cfg.BoolOpt('swift_enable_snet', default=False), cfg.StrOpt('swift_store_auth_address'), cfg.StrOpt('swift_store_user', secret=True), cfg.StrOpt('swift_store_key', secret=True), cfg.StrOpt('swift_store_auth_version', default='2'), cfg.StrOpt('swift_store_container', default=DEFAULT_CONTAINER), cfg.IntOpt('swift_store_large_object_size', default=DEFAULT_LARGE_OBJECT_SIZE), cfg.IntOpt('swift_store_large_object_chunk_size', default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE), cfg.BoolOpt('swift_store_create_container_on_put', default=False), ] def configure(self): self.conf.register_opts(self.opts) self.snet = self.conf.swift_enable_snet self.auth_version = self._option_get('swift_store_auth_version') def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.auth_address = self._option_get('swift_store_auth_address') self.user = self._option_get('swift_store_user') self.key = self._option_get('swift_store_key') self.container = self.conf.swift_store_container try: # The config file has swift_store_large_object_*size in MB, but # internally we store it in bytes, since the image_size parameter # passed to add() is also in bytes. self.large_object_size = \ self.conf.swift_store_large_object_size * ONE_MB self.large_object_chunk_size = \ self.conf.swift_store_large_object_chunk_size * ONE_MB except cfg.ConfigFileValueError, e: reason = _("Error in configuration conf: %s") % e logger.error(reason) raise exception.BadStoreConfiguration(store_name="swift", reason=reason) self.scheme = 'swift+https' if self.auth_address.startswith('http://'): self.scheme = 'swift+http' self.full_auth_address = self.auth_address elif self.auth_address.startswith('https://'): self.full_auth_address = self.auth_address else: # Defaults https self.full_auth_address = 'https://' + self.auth_address def get(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file, and returns a tuple of generator (for reading the image file) and image_size :param location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() :raises `glance.exception.NotFound` if image does not exist """ loc = location.store_location swift_conn = self._make_swift_connection( auth_url=loc.swift_auth_url, user=loc.user, key=loc.key) try: (resp_headers, resp_body) = swift_conn.get_object( container=loc.container, obj=loc.obj, resp_chunk_size=self.CHUNKSIZE) except swift_client.ClientException, e: if e.http_status == httplib.NOT_FOUND: uri = location.get_store_uri() raise exception.NotFound(_("Swift could not find image at " "uri %(uri)s") % locals()) else: raise class ResponseIndexable(glance.store.Indexable): def another(self): try: return self.wrapped.next() except StopIteration: return '' length = resp_headers.get('content-length') return (ResponseIndexable(resp_body, length), length) def get_size(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file, and returns the image_size (or 0 if unavailable) :param location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() """ loc = location.store_location swift_conn = self._make_swift_connection( auth_url=loc.swift_auth_url, user=loc.user, key=loc.key) try: resp_headers = swift_conn.head_object(container=loc.container, obj=loc.obj) return resp_headers.get('content-length', 0) except Exception: return 0 def _make_swift_connection(self, auth_url, user, key): """ Creates a connection using the Swift client library. """ snet = self.snet auth_version = self.auth_version full_auth_url = (auth_url if not auth_url or auth_url.endswith('/') else auth_url + '/') logger.debug(_("Creating Swift connection with " "(auth_address=%(full_auth_url)s, user=%(user)s, " "snet=%(snet)s, auth_version=%(auth_version)s)") % locals()) return swift_client.Connection( authurl=full_auth_url, user=user, key=key, snet=snet, auth_version=auth_version) def _option_get(self, param): result = getattr(self.conf, param) if not result: reason = (_("Could not find %(param)s in configuration " "options.") % locals()) logger.error(reason) raise exception.BadStoreConfiguration(store_name="swift", reason=reason) return result def add(self, image_id, image_file, image_size): """ Stores an image file with supplied identifier to the backend storage system and returns an `glance.store.ImageAddResult` object containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval `glance.store.ImageAddResult` object :raises `glance.common.exception.Duplicate` if the image already existed Swift writes the image data using the scheme: ``swift://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<ID>` where: <USER> = ``swift_store_user`` <KEY> = ``swift_store_key`` <AUTH_ADDRESS> = ``swift_store_auth_address`` <CONTAINER> = ``swift_store_container`` <ID> = The id of the image being added :note Swift auth URLs by default use HTTPS. To specify an HTTP auth URL, you can specify http://someurl.com for the swift_store_auth_address config option :note Swift cannot natively/transparently handle objects >5GB in size. So, if the image is greater than 5GB, we write chunks of image data to Swift and then write an manifest to Swift that contains information about the chunks. This same chunking process is used by default for images of an unknown size, as pushing them directly to swift would fail if the image turns out to be greater than 5GB. """ swift_conn = self._make_swift_connection( auth_url=self.full_auth_address, user=self.user, key=self.key) create_container_if_missing(self.container, swift_conn, self.conf) obj_name = str(image_id) location = StoreLocation({'scheme': self.scheme, 'container': self.container, 'obj': obj_name, 'authurl': self.auth_address, 'user': self.user, 'key': self.key}) logger.debug(_("Adding image object '%(obj_name)s' " "to Swift") % locals()) try: if image_size > 0 and image_size < self.large_object_size: # Image size is known, and is less than large_object_size. # Send to Swift with regular PUT. obj_etag = swift_conn.put_object(self.container, obj_name, image_file, content_length=image_size) else: # Write the image into Swift in chunks. chunk_id = 1 if image_size > 0: total_chunks = str(int( math.ceil(float(image_size) / float(self.large_object_chunk_size)))) else: # image_size == 0 is when we don't know the size # of the image. This can occur with older clients # that don't inspect the payload size. logger.debug(_("Cannot determine image size. Adding as a " "segmented object to Swift.")) total_chunks = '?' checksum = hashlib.md5() combined_chunks_size = 0 while True: chunk_size = self.large_object_chunk_size if image_size == 0: content_length = None else: left = image_size - combined_chunks_size if left == 0: break if chunk_size > left: chunk_size = left content_length = chunk_size chunk_name = "%s-%05d" % (obj_name, chunk_id) reader = ChunkReader(image_file, checksum, chunk_size) chunk_etag = swift_conn.put_object( self.container, chunk_name, reader, content_length=content_length) bytes_read = reader.bytes_read msg = _("Wrote chunk %(chunk_name)s (%(chunk_id)d/" "%(total_chunks)s) of length %(bytes_read)d " "to Swift returning MD5 of content: " "%(chunk_etag)s") logger.debug(msg % locals()) if bytes_read == 0: # Delete the last chunk, because it's of zero size. # This will happen if image_size == 0. logger.debug(_("Deleting final zero-length chunk")) swift_conn.delete_object(self.container, chunk_name) break chunk_id += 1 combined_chunks_size += bytes_read # In the case we have been given an unknown image size, # set the image_size to the total size of the combined chunks. if image_size == 0: image_size = combined_chunks_size # Now we write the object manifest and return the # manifest's etag... manifest = "%s/%s" % (self.container, obj_name) headers = {'ETag': hashlib.md5("").hexdigest(), 'X-Object-Manifest': manifest} # The ETag returned for the manifest is actually the # MD5 hash of the concatenated checksums of the strings # of each chunk...so we ignore this result in favour of # the MD5 of the entire image file contents, so that # users can verify the image file contents accordingly swift_conn.put_object(self.container, obj_name, None, headers=headers) obj_etag = checksum.hexdigest() # NOTE: We return the user and key here! Have to because # location is used by the API server to return the actual # image data. We *really* should consider NOT returning # the location attribute from GET /images/<ID> and # GET /images/details return (location.get_uri(), image_size, obj_etag) except swift_client.ClientException, e: if e.http_status == httplib.CONFLICT: raise exception.Duplicate(_("Swift already has an image at " "location %s") % location.get_uri()) msg = (_("Failed to add object to Swift.\n" "Got error from Swift: %(e)s") % locals()) logger.error(msg) raise glance.store.BackendException(msg) def delete(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file to delete :location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() :raises NotFound if image does not exist """ loc = location.store_location swift_conn = self._make_swift_connection( auth_url=loc.swift_auth_url, user=loc.user, key=loc.key) try: # We request the manifest for the object. If one exists, # that means the object was uploaded in chunks/segments, # and we need to delete all the chunks as well as the # manifest. manifest = None try: headers = swift_conn.head_object(loc.container, loc.obj) manifest = headers.get('x-object-manifest') except swift_client.ClientException, e: if e.http_status != httplib.NOT_FOUND: raise if manifest: # Delete all the chunks before the object manifest itself obj_container, obj_prefix = manifest.split('/', 1) for segment in swift_conn.get_container(obj_container, prefix=obj_prefix)[1]: # TODO(jaypipes): This would be an easy area to parallelize # since we're simply sending off parallelizable requests # to Swift to delete stuff. It's not like we're going to # be hogging up network or file I/O here... swift_conn.delete_object(obj_container, segment['name']) else: swift_conn.delete_object(loc.container, loc.obj) except swift_client.ClientException, e: if e.http_status == httplib.NOT_FOUND: uri = location.get_store_uri() raise exception.NotFound(_("Swift could not find image at " "uri %(uri)s") % locals()) else: raise class ChunkReader(object): def __init__(self, fd, checksum, total): self.fd = fd self.checksum = checksum self.total = total self.bytes_read = 0 def read(self, i): left = self.total - self.bytes_read if i > left: i = left result = self.fd.read(i) self.bytes_read += len(result) self.checksum.update(result) return result def create_container_if_missing(container, swift_conn, conf): """ Creates a missing container in Swift if the ``swift_store_create_container_on_put`` option is set. :param container: Name of container to create :param swift_conn: Connection to Swift :param conf: Option mapping """ try: swift_conn.head_container(container) except swift_client.ClientException, e: if e.http_status == httplib.NOT_FOUND: if conf.swift_store_create_container_on_put: try: swift_conn.put_container(container) except swift_client.ClientException, e: msg = _("Failed to add container to Swift.\n" "Got error from Swift: %(e)s") % locals() raise glance.store.BackendException(msg) else: msg = (_("The container %(container)s does not exist in " "Swift. Please set the " "swift_store_create_container_on_put option" "to add container to Swift automatically.") % locals()) raise glance.store.BackendException(msg) else: raise glance.store.register_store(__name__, ['swift', 'swift+http', 'swift+https'])
./CrossVul/dataset_final_sorted/CWE-200/py/bad_5541_0
crossvul-python_data_bad_3325_0
# -*- coding: utf-8 -*- ''' Classes that manage file clients ''' from __future__ import absolute_import # Import python libs import contextlib import errno import logging import os import string import shutil import ftplib from tornado.httputil import parse_response_start_line, HTTPInputError # Import salt libs from salt.exceptions import ( CommandExecutionError, MinionError ) import salt.client import salt.crypt import salt.loader import salt.payload import salt.transport import salt.fileserver import salt.utils import salt.utils.files import salt.utils.templates import salt.utils.url import salt.utils.gzip_util import salt.utils.http import salt.ext.six as six from salt.utils.locales import sdecode from salt.utils.openstack.swift import SaltSwift # pylint: disable=no-name-in-module,import-error import salt.ext.six.moves.BaseHTTPServer as BaseHTTPServer from salt.ext.six.moves.urllib.error import HTTPError, URLError from salt.ext.six.moves.urllib.parse import urlparse, urlunparse # pylint: enable=no-name-in-module,import-error log = logging.getLogger(__name__) def get_file_client(opts, pillar=False): ''' Read in the ``file_client`` option and return the correct type of file server ''' client = opts.get('file_client', 'remote') if pillar and client == 'local': client = 'pillar' return { 'remote': RemoteClient, 'local': FSClient, 'pillar': LocalClient, }.get(client, RemoteClient)(opts) def decode_dict_keys_to_str(src): ''' Convert top level keys from bytes to strings if possible. This is necessary because Python 3 makes a distinction between these types. ''' if not six.PY3 or not isinstance(src, dict): return src output = {} for key, val in six.iteritems(src): if isinstance(key, bytes): try: key = key.decode() except UnicodeError: pass output[key] = val return output class Client(object): ''' Base class for Salt file interactions ''' def __init__(self, opts): self.opts = opts self.utils = salt.loader.utils(self.opts) self.serial = salt.payload.Serial(self.opts) # Add __setstate__ and __getstate__ so that the object may be # deep copied. It normally can't be deep copied because its # constructor requires an 'opts' parameter. # The TCP transport needs to be able to deep copy this class # due to 'salt.utils.context.ContextDict.clone'. def __setstate__(self, state): # This will polymorphically call __init__ # in the derived class. self.__init__(state['opts']) def __getstate__(self): return {'opts': self.opts} def _check_proto(self, path): ''' Make sure that this path is intended for the salt master and trim it ''' if not path.startswith('salt://'): raise MinionError(u'Unsupported path: {0}'.format(path)) file_path, saltenv = salt.utils.url.parse(path) return file_path def _file_local_list(self, dest): ''' Helper util to return a list of files in a directory ''' if os.path.isdir(dest): destdir = dest else: destdir = os.path.dirname(dest) filelist = set() for root, dirs, files in os.walk(destdir, followlinks=True): for name in files: path = os.path.join(root, name) filelist.add(path) return filelist @contextlib.contextmanager def _cache_loc(self, path, saltenv='base', cachedir=None): ''' Return the local location to cache the file, cache dirs will be made ''' if cachedir is None: cachedir = self.opts['cachedir'] elif not os.path.isabs(cachedir): cachedir = os.path.join(self.opts['cachedir'], cachedir) dest = salt.utils.path_join(cachedir, 'files', saltenv, path) destdir = os.path.dirname(dest) cumask = os.umask(63) # remove destdir if it is a regular file to avoid an OSError when # running os.makedirs below if os.path.isfile(destdir): os.remove(destdir) # ensure destdir exists try: os.makedirs(destdir) except OSError as exc: if exc.errno != errno.EEXIST: # ignore if it was there already raise yield dest os.umask(cumask) def get_file(self, path, dest='', makedirs=False, saltenv='base', gzip=None, cachedir=None): ''' Copies a file from the local files or master depending on implementation ''' raise NotImplementedError def file_list_emptydirs(self, saltenv='base', prefix=''): ''' List the empty dirs ''' raise NotImplementedError def cache_file(self, path, saltenv='base', cachedir=None): ''' Pull a file down from the file server and store it in the minion file cache ''' return self.get_url(path, '', True, saltenv, cachedir=cachedir) def cache_files(self, paths, saltenv='base', cachedir=None): ''' Download a list of files stored on the master and put them in the minion file cache ''' ret = [] if isinstance(paths, str): paths = paths.split(',') for path in paths: ret.append(self.cache_file(path, saltenv, cachedir=cachedir)) return ret def cache_master(self, saltenv='base', cachedir=None): ''' Download and cache all files on a master in a specified environment ''' ret = [] for path in self.file_list(saltenv): ret.append( self.cache_file( salt.utils.url.create(path), saltenv, cachedir=cachedir) ) return ret def cache_dir(self, path, saltenv='base', include_empty=False, include_pat=None, exclude_pat=None, cachedir=None): ''' Download all of the files in a subdir of the master ''' ret = [] path = self._check_proto(sdecode(path)) # We want to make sure files start with this *directory*, use # '/' explicitly because the master (that's generating the # list of files) only runs on POSIX if not path.endswith('/'): path = path + '/' log.info( 'Caching directory \'{0}\' for environment \'{1}\''.format( path, saltenv ) ) # go through the list of all files finding ones that are in # the target directory and caching them for fn_ in self.file_list(saltenv): fn_ = sdecode(fn_) if fn_.strip() and fn_.startswith(path): if salt.utils.check_include_exclude( fn_, include_pat, exclude_pat): fn_ = self.cache_file( salt.utils.url.create(fn_), saltenv, cachedir=cachedir) if fn_: ret.append(fn_) if include_empty: # Break up the path into a list containing the bottom-level # directory (the one being recursively copied) and the directories # preceding it # separated = string.rsplit(path, '/', 1) # if len(separated) != 2: # # No slashes in path. (So all files in saltenv will be copied) # prefix = '' # else: # prefix = separated[0] if cachedir is None: cachedir = self.opts['cachedir'] elif not os.path.isabs(cachedir): cachedir = os.path.join(self.opts['cachedir'], cachedir) dest = salt.utils.path_join(cachedir, 'files', saltenv) for fn_ in self.file_list_emptydirs(saltenv): fn_ = sdecode(fn_) if fn_.startswith(path): minion_dir = '{0}/{1}'.format(dest, fn_) if not os.path.isdir(minion_dir): os.makedirs(minion_dir) ret.append(minion_dir) return ret def cache_local_file(self, path, **kwargs): ''' Cache a local file on the minion in the localfiles cache ''' dest = os.path.join(self.opts['cachedir'], 'localfiles', path.lstrip('/')) destdir = os.path.dirname(dest) if not os.path.isdir(destdir): os.makedirs(destdir) shutil.copyfile(path, dest) return dest def file_local_list(self, saltenv='base'): ''' List files in the local minion files and localfiles caches ''' filesdest = os.path.join(self.opts['cachedir'], 'files', saltenv) localfilesdest = os.path.join(self.opts['cachedir'], 'localfiles') fdest = self._file_local_list(filesdest) ldest = self._file_local_list(localfilesdest) return sorted(fdest.union(ldest)) def file_list(self, saltenv='base', prefix=''): ''' This function must be overwritten ''' return [] def dir_list(self, saltenv='base', prefix=''): ''' This function must be overwritten ''' return [] def symlink_list(self, saltenv='base', prefix=''): ''' This function must be overwritten ''' return {} def is_cached(self, path, saltenv='base', cachedir=None): ''' Returns the full path to a file if it is cached locally on the minion otherwise returns a blank string ''' if path.startswith('salt://'): path, senv = salt.utils.url.parse(path) if senv: saltenv = senv escaped = True if salt.utils.url.is_escaped(path) else False # also strip escape character '|' localsfilesdest = os.path.join( self.opts['cachedir'], 'localfiles', path.lstrip('|/')) filesdest = os.path.join( self.opts['cachedir'], 'files', saltenv, path.lstrip('|/')) extrndest = self._extrn_path(path, saltenv, cachedir=cachedir) if os.path.exists(filesdest): return salt.utils.url.escape(filesdest) if escaped else filesdest elif os.path.exists(localsfilesdest): return salt.utils.url.escape(localsfilesdest) \ if escaped \ else localsfilesdest elif os.path.exists(extrndest): return extrndest return '' def list_states(self, saltenv): ''' Return a list of all available sls modules on the master for a given environment ''' limit_traversal = self.opts.get('fileserver_limit_traversal', False) states = [] if limit_traversal: if saltenv not in self.opts['file_roots']: log.warning( 'During an attempt to list states for saltenv \'{0}\', ' 'the environment could not be found in the configured ' 'file roots'.format(saltenv) ) return states for path in self.opts['file_roots'][saltenv]: for root, dirs, files in os.walk(path, topdown=True): log.debug('Searching for states in dirs {0} and files ' '{1}'.format(dirs, files)) if not [filename.endswith('.sls') for filename in files]: # Use shallow copy so we don't disturb the memory used by os.walk. Otherwise this breaks! del dirs[:] else: for found_file in files: stripped_root = os.path.relpath(root, path).replace('/', '.') if salt.utils.is_windows(): stripped_root = stripped_root.replace('\\', '/') if found_file.endswith(('.sls')): if found_file.endswith('init.sls'): if stripped_root.endswith('.'): stripped_root = stripped_root.rstrip('.') states.append(stripped_root) else: if not stripped_root.endswith('.'): stripped_root += '.' if stripped_root.startswith('.'): stripped_root = stripped_root.lstrip('.') states.append(stripped_root + found_file[:-4]) else: for path in self.file_list(saltenv): if salt.utils.is_windows(): path = path.replace('\\', '/') if path.endswith('.sls'): # is an sls module! if path.endswith('{0}init.sls'.format('/')): states.append(path.replace('/', '.')[:-9]) else: states.append(path.replace('/', '.')[:-4]) return states def get_state(self, sls, saltenv, cachedir=None): ''' Get a state file from the master and store it in the local minion cache; return the location of the file ''' if '.' in sls: sls = sls.replace('.', '/') sls_url = salt.utils.url.create(sls + '.sls') init_url = salt.utils.url.create(sls + '/init.sls') for path in [sls_url, init_url]: dest = self.cache_file(path, saltenv, cachedir=cachedir) if dest: return {'source': path, 'dest': dest} return {} def get_dir(self, path, dest='', saltenv='base', gzip=None, cachedir=None): ''' Get a directory recursively from the salt-master ''' ret = [] # Strip trailing slash path = self._check_proto(path).rstrip('/') # Break up the path into a list containing the bottom-level directory # (the one being recursively copied) and the directories preceding it separated = path.rsplit('/', 1) if len(separated) != 2: # No slashes in path. (This means all files in saltenv will be # copied) prefix = '' else: prefix = separated[0] # Copy files from master for fn_ in self.file_list(saltenv, prefix=path): # Prevent files in "salt://foobar/" (or salt://foo.sh) from # matching a path of "salt://foo" try: if fn_[len(path)] != '/': continue except IndexError: continue # Remove the leading directories from path to derive # the relative path on the minion. minion_relpath = fn_[len(prefix):].lstrip('/') ret.append( self.get_file( salt.utils.url.create(fn_), '{0}/{1}'.format(dest, minion_relpath), True, saltenv, gzip ) ) # Replicate empty dirs from master try: for fn_ in self.file_list_emptydirs(saltenv, prefix=path): # Prevent an empty dir "salt://foobar/" from matching a path of # "salt://foo" try: if fn_[len(path)] != '/': continue except IndexError: continue # Remove the leading directories from path to derive # the relative path on the minion. minion_relpath = fn_[len(prefix):].lstrip('/') minion_mkdir = '{0}/{1}'.format(dest, minion_relpath) if not os.path.isdir(minion_mkdir): os.makedirs(minion_mkdir) ret.append(minion_mkdir) except TypeError: pass ret.sort() return ret def get_url(self, url, dest, makedirs=False, saltenv='base', no_cache=False, cachedir=None): ''' Get a single file from a URL. ''' url_data = urlparse(url) url_scheme = url_data.scheme url_path = os.path.join( url_data.netloc, url_data.path).rstrip(os.sep) if url_scheme and url_scheme.lower() in string.ascii_lowercase: url_path = ':'.join((url_scheme, url_path)) url_scheme = 'file' if url_scheme in ('file', ''): # Local filesystem if not os.path.isabs(url_path): raise CommandExecutionError( 'Path \'{0}\' is not absolute'.format(url_path) ) if dest is None: with salt.utils.fopen(url_path, 'r') as fp_: data = fp_.read() return data return url_path if url_scheme == 'salt': result = self.get_file(url, dest, makedirs, saltenv, cachedir=cachedir) if result and dest is None: with salt.utils.fopen(result, 'r') as fp_: data = fp_.read() return data return result if dest: destdir = os.path.dirname(dest) if not os.path.isdir(destdir): if makedirs: os.makedirs(destdir) else: return '' elif not no_cache: dest = self._extrn_path(url, saltenv, cachedir=cachedir) destdir = os.path.dirname(dest) if not os.path.isdir(destdir): os.makedirs(destdir) if url_data.scheme == 's3': try: def s3_opt(key, default=None): '''Get value of s3.<key> from Minion config or from Pillar''' if 's3.' + key in self.opts: return self.opts['s3.' + key] try: return self.opts['pillar']['s3'][key] except (KeyError, TypeError): return default self.utils['s3.query'](method='GET', bucket=url_data.netloc, path=url_data.path[1:], return_bin=False, local_file=dest, action=None, key=s3_opt('key'), keyid=s3_opt('keyid'), service_url=s3_opt('service_url'), verify_ssl=s3_opt('verify_ssl', True), location=s3_opt('location')) return dest except Exception as exc: raise MinionError( 'Could not fetch from {0}. Exception: {1}'.format(url, exc) ) if url_data.scheme == 'ftp': try: ftp = ftplib.FTP(url_data.hostname) ftp.login() with salt.utils.fopen(dest, 'wb') as fp_: ftp.retrbinary('RETR {0}'.format(url_data.path), fp_.write) return dest except Exception as exc: raise MinionError('Could not retrieve {0} from FTP server. Exception: {1}'.format(url, exc)) if url_data.scheme == 'swift': try: def swift_opt(key, default): '''Get value of <key> from Minion config or from Pillar''' if key in self.opts: return self.opts[key] try: return self.opts['pillar'][key] except (KeyError, TypeError): return default swift_conn = SaltSwift(swift_opt('keystone.user', None), swift_opt('keystone.tenant', None), swift_opt('keystone.auth_url', None), swift_opt('keystone.password', None)) swift_conn.get_object(url_data.netloc, url_data.path[1:], dest) return dest except Exception: raise MinionError('Could not fetch from {0}'.format(url)) get_kwargs = {} if url_data.username is not None \ and url_data.scheme in ('http', 'https'): netloc = url_data.netloc at_sign_pos = netloc.rfind('@') if at_sign_pos != -1: netloc = netloc[at_sign_pos + 1:] fixed_url = urlunparse( (url_data.scheme, netloc, url_data.path, url_data.params, url_data.query, url_data.fragment)) get_kwargs['auth'] = (url_data.username, url_data.password) else: fixed_url = url destfp = None try: # Tornado calls streaming_callback on redirect response bodies. # But we need streaming to support fetching large files (> RAM avail). # Here we working this around by disabling recording the body for redirections. # The issue is fixed in Tornado 4.3.0 so on_header callback could be removed # when we'll deprecate Tornado<4.3.0. # See #27093 and #30431 for details. # Use list here to make it writable inside the on_header callback. Simple bool doesn't # work here: on_header creates a new local variable instead. This could be avoided in # Py3 with 'nonlocal' statement. There is no Py2 alternative for this. write_body = [False] def on_header(hdr): try: hdr = parse_response_start_line(hdr) except HTTPInputError: # Not the first line, do nothing return write_body[0] = hdr.code not in [301, 302, 303, 307] if no_cache: result = [] def on_chunk(chunk): if write_body[0]: result.append(chunk) else: dest_tmp = "{0}.part".format(dest) # We need an open filehandle to use in the on_chunk callback, # that's why we're not using a with clause here. destfp = salt.utils.fopen(dest_tmp, 'wb') def on_chunk(chunk): if write_body[0]: destfp.write(chunk) query = salt.utils.http.query( fixed_url, stream=True, streaming_callback=on_chunk, header_callback=on_header, username=url_data.username, password=url_data.password, opts=self.opts, **get_kwargs ) if 'handle' not in query: raise MinionError('Error: {0} reading {1}'.format(query['error'], url)) if no_cache: return ''.join(result) else: destfp.close() destfp = None salt.utils.files.rename(dest_tmp, dest) return dest except HTTPError as exc: raise MinionError('HTTP error {0} reading {1}: {3}'.format( exc.code, url, *BaseHTTPServer.BaseHTTPRequestHandler.responses[exc.code])) except URLError as exc: raise MinionError('Error reading {0}: {1}'.format(url, exc.reason)) finally: if destfp is not None: destfp.close() def get_template( self, url, dest, template='jinja', makedirs=False, saltenv='base', cachedir=None, **kwargs): ''' Cache a file then process it as a template ''' if 'env' in kwargs: salt.utils.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' ) kwargs.pop('env') kwargs['saltenv'] = saltenv url_data = urlparse(url) sfn = self.cache_file(url, saltenv, cachedir=cachedir) if not os.path.exists(sfn): return '' if template in salt.utils.templates.TEMPLATE_REGISTRY: data = salt.utils.templates.TEMPLATE_REGISTRY[template]( sfn, **kwargs ) else: log.error('Attempted to render template with unavailable engine ' '{0}'.format(template)) return '' if not data['result']: # Failed to render the template log.error( 'Failed to render template with error: {0}'.format( data['data'] ) ) return '' if not dest: # No destination passed, set the dest as an extrn_files cache dest = self._extrn_path(url, saltenv, cachedir=cachedir) # If Salt generated the dest name, create any required dirs makedirs = True destdir = os.path.dirname(dest) if not os.path.isdir(destdir): if makedirs: os.makedirs(destdir) else: salt.utils.safe_rm(data['data']) return '' shutil.move(data['data'], dest) return dest def _extrn_path(self, url, saltenv, cachedir=None): ''' Return the extn_filepath for a given url ''' url_data = urlparse(url) if salt.utils.is_windows(): netloc = salt.utils.sanitize_win_path_string(url_data.netloc) else: netloc = url_data.netloc # Strip user:pass from URLs netloc = netloc.split('@')[-1] if cachedir is None: cachedir = self.opts['cachedir'] elif not os.path.isabs(cachedir): cachedir = os.path.join(self.opts['cachedir'], cachedir) if url_data.query: file_name = '-'.join([url_data.path, url_data.query]) else: file_name = url_data.path return salt.utils.path_join( cachedir, 'extrn_files', saltenv, netloc, file_name ) class LocalClient(Client): ''' Use the local_roots option to parse a local file root ''' def __init__(self, opts): Client.__init__(self, opts) def _find_file(self, path, saltenv='base'): ''' Locate the file path ''' fnd = {'path': '', 'rel': ''} if saltenv not in self.opts['file_roots']: return fnd if salt.utils.url.is_escaped(path): # The path arguments are escaped path = salt.utils.url.unescape(path) for root in self.opts['file_roots'][saltenv]: full = os.path.join(root, path) if os.path.isfile(full): fnd['path'] = full fnd['rel'] = path return fnd return fnd def get_file(self, path, dest='', makedirs=False, saltenv='base', gzip=None, cachedir=None): ''' Copies a file from the local files directory into :param:`dest` gzip compression settings are ignored for local files ''' path = self._check_proto(path) fnd = self._find_file(path, saltenv) fnd_path = fnd.get('path') if not fnd_path: return '' try: fnd_mode = fnd.get('stat', [])[0] except (IndexError, TypeError): fnd_mode = None if not salt.utils.is_windows(): if fnd_mode is not None: try: if os.stat(dest).st_mode != fnd_mode: try: os.chmod(dest, fnd_mode) except OSError as exc: log.warning('Failed to chmod %s: %s', dest, exc) except Exception: pass return fnd_path def file_list(self, saltenv='base', prefix=''): ''' Return a list of files in the given environment with optional relative prefix path to limit directory traversal ''' ret = [] if saltenv not in self.opts['file_roots']: return ret prefix = prefix.strip('/') for path in self.opts['file_roots'][saltenv]: for root, dirs, files in os.walk( os.path.join(path, prefix), followlinks=True ): # Don't walk any directories that match file_ignore_regex or glob dirs[:] = [d for d in dirs if not salt.fileserver.is_file_ignored(self.opts, d)] for fname in files: relpath = os.path.relpath(os.path.join(root, fname), path) ret.append(sdecode(relpath)) return ret def file_list_emptydirs(self, saltenv='base', prefix=''): ''' List the empty dirs in the file_roots with optional relative prefix path to limit directory traversal ''' ret = [] prefix = prefix.strip('/') if saltenv not in self.opts['file_roots']: return ret for path in self.opts['file_roots'][saltenv]: for root, dirs, files in os.walk( os.path.join(path, prefix), followlinks=True ): # Don't walk any directories that match file_ignore_regex or glob dirs[:] = [d for d in dirs if not salt.fileserver.is_file_ignored(self.opts, d)] if len(dirs) == 0 and len(files) == 0: ret.append(sdecode(os.path.relpath(root, path))) return ret def dir_list(self, saltenv='base', prefix=''): ''' List the dirs in the file_roots with optional relative prefix path to limit directory traversal ''' ret = [] if saltenv not in self.opts['file_roots']: return ret prefix = prefix.strip('/') for path in self.opts['file_roots'][saltenv]: for root, dirs, files in os.walk( os.path.join(path, prefix), followlinks=True ): ret.append(sdecode(os.path.relpath(root, path))) return ret def __get_file_path(self, path, saltenv='base'): ''' Return either a file path or the result of a remote find_file call. ''' try: path = self._check_proto(path) except MinionError as err: # Local file path if not os.path.isfile(path): msg = 'specified file {0} is not present to generate hash: {1}' log.warning(msg.format(path, err)) return None else: return path return self._find_file(path, saltenv) def hash_file(self, path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file in the file_roots prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. ''' ret = {} fnd = self.__get_file_path(path, saltenv) if fnd is None: return ret try: # Remote file path (self._find_file() invoked) fnd_path = fnd['path'] except TypeError: # Local file path fnd_path = fnd hash_type = self.opts.get('hash_type', 'md5') ret['hsum'] = salt.utils.get_hash(fnd_path, form=hash_type) ret['hash_type'] = hash_type return ret def hash_and_stat_file(self, path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file in the file_roots prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. Additionally, return the stat result of the file, or None if no stat results were found. ''' ret = {} fnd = self.__get_file_path(path, saltenv) if fnd is None: return ret, None try: # Remote file path (self._find_file() invoked) fnd_path = fnd['path'] fnd_stat = fnd.get('stat') except TypeError: # Local file path fnd_path = fnd try: fnd_stat = list(os.stat(fnd_path)) except Exception: fnd_stat = None hash_type = self.opts.get('hash_type', 'md5') ret['hsum'] = salt.utils.get_hash(fnd_path, form=hash_type) ret['hash_type'] = hash_type return ret, fnd_stat def list_env(self, saltenv='base'): ''' Return a list of the files in the file server's specified environment ''' return self.file_list(saltenv) def master_opts(self): ''' Return the master opts data ''' return self.opts def envs(self): ''' Return the available environments ''' ret = [] for saltenv in self.opts['file_roots']: ret.append(saltenv) return ret def ext_nodes(self): ''' Originally returned information via the external_nodes subsystem. External_nodes was deprecated and removed in 2014.1.6 in favor of master_tops (which had been around since pre-0.17). salt-call --local state.show_top ends up here, but master_tops has not been extended to support show_top in a completely local environment yet. It's worth noting that originally this fn started with if 'external_nodes' not in opts: return {} So since external_nodes is gone now, we are just returning the empty dict. ''' return {} class RemoteClient(Client): ''' Interact with the salt master file server. ''' def __init__(self, opts): Client.__init__(self, opts) self.channel = salt.transport.Channel.factory(self.opts) if hasattr(self.channel, 'auth'): self.auth = self.channel.auth else: self.auth = '' def _refresh_channel(self): ''' Reset the channel, in the event of an interruption ''' self.channel = salt.transport.Channel.factory(self.opts) return self.channel def get_file(self, path, dest='', makedirs=False, saltenv='base', gzip=None, cachedir=None): ''' Get a single file from the salt-master path must be a salt server location, aka, salt://path/to/file, if dest is omitted, then the downloaded file will be placed in the minion cache ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv if not salt.utils.is_windows(): hash_server, stat_server = self.hash_and_stat_file(path, saltenv) try: mode_server = stat_server[0] except (IndexError, TypeError): mode_server = None else: hash_server = self.hash_file(path, saltenv) mode_server = None # Check if file exists on server, before creating files and # directories if hash_server == '': log.debug( 'Could not find file \'%s\' in saltenv \'%s\'', path, saltenv ) return False # Hash compare local copy with master and skip download # if no difference found. dest2check = dest if not dest2check: rel_path = self._check_proto(path) log.debug( 'In saltenv \'%s\', looking at rel_path \'%s\' to resolve ' '\'%s\'', saltenv, rel_path, path ) with self._cache_loc( rel_path, saltenv, cachedir=cachedir) as cache_dest: dest2check = cache_dest log.debug( 'In saltenv \'%s\', ** considering ** path \'%s\' to resolve ' '\'%s\'', saltenv, dest2check, path ) if dest2check and os.path.isfile(dest2check): if not salt.utils.is_windows(): hash_local, stat_local = \ self.hash_and_stat_file(dest2check, saltenv) try: mode_local = stat_local[0] except (IndexError, TypeError): mode_local = None else: hash_local = self.hash_file(dest2check, saltenv) mode_local = None if hash_local == hash_server: if not salt.utils.is_windows(): if mode_server is None: log.debug('No file mode available for \'%s\'', path) elif mode_local is None: log.debug( 'No file mode available for \'%s\'', dest2check ) else: if mode_server == mode_local: log.info( 'Fetching file from saltenv \'%s\', ' '** skipped ** latest already in cache ' '\'%s\', mode up-to-date', saltenv, path ) else: try: os.chmod(dest2check, mode_server) log.info( 'Fetching file from saltenv \'%s\', ' '** updated ** latest already in cache, ' '\'%s\', mode updated from %s to %s', saltenv, path, salt.utils.st_mode_to_octal(mode_local), salt.utils.st_mode_to_octal(mode_server) ) except OSError as exc: log.warning( 'Failed to chmod %s: %s', dest2check, exc ) # We may not have been able to check/set the mode, but we # don't want to re-download the file because of a failure # in mode checking. Return the cached path. return dest2check else: log.info( 'Fetching file from saltenv \'%s\', ** skipped ** ' 'latest already in cache \'%s\'', saltenv, path ) return dest2check log.debug( 'Fetching file from saltenv \'%s\', ** attempting ** \'%s\'', saltenv, path ) d_tries = 0 transport_tries = 0 path = self._check_proto(path) load = {'path': path, 'saltenv': saltenv, 'cmd': '_serve_file'} if gzip: gzip = int(gzip) load['gzip'] = gzip fn_ = None if dest: destdir = os.path.dirname(dest) if not os.path.isdir(destdir): if makedirs: os.makedirs(destdir) else: return False # We need an open filehandle here, that's why we're not using a # with clause: fn_ = salt.utils.fopen(dest, 'wb+') else: log.debug('No dest file found') while True: if not fn_: load['loc'] = 0 else: load['loc'] = fn_.tell() data = self.channel.send(load, raw=True) if six.PY3: # Sometimes the source is local (eg when using # 'salt.filesystem.FSChan'), in which case the keys are # already strings. Sometimes the source is remote, in which # case the keys are bytes due to raw mode. Standardize on # strings for the top-level keys to simplify things. data = decode_dict_keys_to_str(data) try: if not data['data']: if not fn_ and data['dest']: # This is a 0 byte file on the master with self._cache_loc( data['dest'], saltenv, cachedir=cachedir) as cache_dest: dest = cache_dest with salt.utils.fopen(cache_dest, 'wb+') as ofile: ofile.write(data['data']) if 'hsum' in data and d_tries < 3: # Master has prompted a file verification, if the # verification fails, re-download the file. Try 3 times d_tries += 1 hsum = salt.utils.get_hash(dest, salt.utils.to_str(data.get('hash_type', b'md5'))) if hsum != data['hsum']: log.warning( 'Bad download of file %s, attempt %d of 3', path, d_tries ) continue break if not fn_: with self._cache_loc( data['dest'], saltenv, cachedir=cachedir) as cache_dest: dest = cache_dest # If a directory was formerly cached at this path, then # remove it to avoid a traceback trying to write the file if os.path.isdir(dest): salt.utils.rm_rf(dest) fn_ = salt.utils.fopen(dest, 'wb+') if data.get('gzip', None): data = salt.utils.gzip_util.uncompress(data['data']) else: data = data['data'] if six.PY3 and isinstance(data, str): data = data.encode() fn_.write(data) except (TypeError, KeyError) as exc: try: data_type = type(data).__name__ except AttributeError: # Shouldn't happen, but don't let this cause a traceback. data_type = str(type(data)) transport_tries += 1 log.warning( 'Data transport is broken, got: %s, type: %s, ' 'exception: %s, attempt %d of 3', data, data_type, exc, transport_tries ) self._refresh_channel() if transport_tries > 3: log.error( 'Data transport is broken, got: %s, type: %s, ' 'exception: %s, retry attempts exhausted', data, data_type, exc ) break if fn_: fn_.close() log.info( 'Fetching file from saltenv \'%s\', ** done ** \'%s\'', saltenv, path ) else: log.debug( 'In saltenv \'%s\', we are ** missing ** the file \'%s\'', saltenv, path ) if not salt.utils.is_windows(): if mode_server is not None: try: if os.stat(dest).st_mode != mode_server: try: os.chmod(dest, mode_server) log.info( 'Fetching file from saltenv \'%s\', ' '** done ** \'%s\', mode set to %s', saltenv, path, salt.utils.st_mode_to_octal(mode_server) ) except OSError: log.warning('Failed to chmod %s: %s', dest, exc) except OSError: pass return dest def file_list(self, saltenv='base', prefix=''): ''' List the files on the master ''' load = {'saltenv': saltenv, 'prefix': prefix, 'cmd': '_file_list'} return [sdecode(fn_) for fn_ in self.channel.send(load)] def file_list_emptydirs(self, saltenv='base', prefix=''): ''' List the empty dirs on the master ''' load = {'saltenv': saltenv, 'prefix': prefix, 'cmd': '_file_list_emptydirs'} self.channel.send(load) def dir_list(self, saltenv='base', prefix=''): ''' List the dirs on the master ''' load = {'saltenv': saltenv, 'prefix': prefix, 'cmd': '_dir_list'} return self.channel.send(load) def symlink_list(self, saltenv='base', prefix=''): ''' List symlinked files and dirs on the master ''' load = {'saltenv': saltenv, 'prefix': prefix, 'cmd': '_symlink_list'} return self.channel.send(load) def __hash_and_stat_file(self, path, saltenv='base'): ''' Common code for hashing and stating files ''' try: path = self._check_proto(path) except MinionError as err: if not os.path.isfile(path): msg = 'specified file {0} is not present to generate hash: {1}' log.warning(msg.format(path, err)) return {} else: ret = {} hash_type = self.opts.get('hash_type', 'md5') ret['hsum'] = salt.utils.get_hash(path, form=hash_type) ret['hash_type'] = hash_type return ret load = {'path': path, 'saltenv': saltenv, 'cmd': '_file_hash'} return self.channel.send(load) def hash_file(self, path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. ''' return self.__hash_and_stat_file(path, saltenv) def hash_and_stat_file(self, path, saltenv='base'): ''' The same as hash_file, but also return the file's mode, or None if no mode data is present. ''' hash_result = self.hash_file(path, saltenv) try: path = self._check_proto(path) except MinionError as err: if not os.path.isfile(path): return hash_result, None else: try: return hash_result, list(os.stat(path)) except Exception: return hash_result, None load = {'path': path, 'saltenv': saltenv, 'cmd': '_file_find'} fnd = self.channel.send(load) try: stat_result = fnd.get('stat') except AttributeError: stat_result = None return hash_result, stat_result def list_env(self, saltenv='base'): ''' Return a list of the files in the file server's specified environment ''' load = {'saltenv': saltenv, 'cmd': '_file_list'} return self.channel.send(load) def envs(self): ''' Return a list of available environments ''' load = {'cmd': '_file_envs'} return self.channel.send(load) def master_opts(self): ''' Return the master opts data ''' load = {'cmd': '_master_opts'} return self.channel.send(load) def ext_nodes(self): ''' Return the metadata derived from the external nodes system on the master. ''' load = {'cmd': '_ext_nodes', 'id': self.opts['id'], 'opts': self.opts} if self.auth: load['tok'] = self.auth.gen_token('salt') return self.channel.send(load) class FSClient(RemoteClient): ''' A local client that uses the RemoteClient but substitutes the channel for the FSChan object ''' def __init__(self, opts): # pylint: disable=W0231 Client.__init__(self, opts) # pylint: disable=W0233 self.channel = salt.fileserver.FSChan(opts) self.auth = DumbAuth() class DumbAuth(object): ''' The dumbauth class is used to stub out auth calls fired from the FSClient subsystem ''' def gen_token(self, clear_tok): return clear_tok
./CrossVul/dataset_final_sorted/CWE-200/py/bad_3325_0
crossvul-python_data_good_3325_2
# -*- coding: utf-8 -*- ''' Manage information about regular files, directories, and special files on the minion, set/read user, group, mode, and data ''' # TODO: We should add the capability to do u+r type operations here # some time in the future from __future__ import absolute_import, print_function # Import python libs import datetime import difflib import errno import fileinput import fnmatch import itertools import logging import operator import os import re import shutil import stat import string import sys import tempfile import time import glob import hashlib import mmap from functools import reduce # pylint: disable=redefined-builtin from collections import Iterable, Mapping # pylint: disable=import-error,no-name-in-module,redefined-builtin import salt.ext.six as six from salt.ext.six.moves import range, zip from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: enable=import-error,no-name-in-module,redefined-builtin try: import grp import pwd except ImportError: pass # Import salt libs import salt.utils import salt.utils.atomicfile import salt.utils.find import salt.utils.filebuffer import salt.utils.files import salt.utils.locales import salt.utils.templates import salt.utils.url from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError, get_error_message as _get_error_message log = logging.getLogger(__name__) __func_alias__ = { 'makedirs_': 'makedirs' } HASHES = { 'sha512': 128, 'sha384': 96, 'sha256': 64, 'sha224': 56, 'sha1': 40, 'md5': 32, } HASHES_REVMAP = dict([(y, x) for x, y in six.iteritems(HASHES)]) def __virtual__(): ''' Only work on POSIX-like systems ''' # win_file takes care of windows if salt.utils.is_windows(): return (False, 'The file execution module cannot be loaded: only available on non-Windows systems - use win_file instead.') return True def __clean_tmp(sfn): ''' Clean out a template temp file ''' if sfn.startswith(os.path.join(tempfile.gettempdir(), salt.utils.files.TEMPFILE_PREFIX)): # Don't remove if it exists in file_roots (any saltenv) all_roots = itertools.chain.from_iterable( six.itervalues(__opts__['file_roots'])) in_roots = any(sfn.startswith(root) for root in all_roots) # Only clean up files that exist if os.path.exists(sfn) and not in_roots: os.remove(sfn) def _error(ret, err_msg): ''' Common function for setting error information for return dicts ''' ret['result'] = False ret['comment'] = err_msg return ret def _binary_replace(old, new): ''' This function does NOT do any diffing, it just checks the old and new files to see if either is binary, and provides an appropriate string noting the difference between the two files. If neither file is binary, an empty string is returned. This function should only be run AFTER it has been determined that the files differ. ''' old_isbin = not salt.utils.istextfile(old) new_isbin = not salt.utils.istextfile(new) if any((old_isbin, new_isbin)): if all((old_isbin, new_isbin)): return 'Replace binary file' elif old_isbin: return 'Replace binary file with text file' elif new_isbin: return 'Replace text file with binary file' return '' def _get_bkroot(): ''' Get the location of the backup dir in the minion cache ''' # Get the cachedir from the minion config return os.path.join(__salt__['config.get']('cachedir'), 'file_backup') def _splitlines_preserving_trailing_newline(str): ''' Returns a list of the lines in the string, breaking at line boundaries and preserving a trailing newline (if present). Essentially, this works like ``str.striplines(False)`` but preserves an empty line at the end. This is equivalent to the following code: .. code-block:: python lines = str.splitlines() if str.endswith('\n') or str.endswith('\r'): lines.append('') ''' lines = str.splitlines() if str.endswith('\n') or str.endswith('\r'): lines.append('') return lines def gid_to_group(gid): ''' Convert the group id to the group name on this system gid gid to convert to a group name CLI Example: .. code-block:: bash salt '*' file.gid_to_group 0 ''' try: gid = int(gid) except ValueError: # This is not an integer, maybe it's already the group name? gid = group_to_gid(gid) if gid == '': # Don't even bother to feed it to grp return '' try: return grp.getgrgid(gid).gr_name except (KeyError, NameError): # If group is not present, fall back to the gid. return gid def group_to_gid(group): ''' Convert the group to the gid on this system group group to convert to its gid CLI Example: .. code-block:: bash salt '*' file.group_to_gid root ''' if group is None: return '' try: if isinstance(group, int): return group return grp.getgrnam(group).gr_gid except KeyError: return '' def get_gid(path, follow_symlinks=True): ''' Return the id of the group that owns a given file path file or directory of which to get the gid follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_gid /etc/passwd .. versionchanged:: 0.16.4 ``follow_symlinks`` option added ''' return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('gid', -1) def get_group(path, follow_symlinks=True): ''' Return the group that owns a given file path file or directory of which to get the group follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_group /etc/passwd .. versionchanged:: 0.16.4 ``follow_symlinks`` option added ''' return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('group', False) def uid_to_user(uid): ''' Convert a uid to a user name uid uid to convert to a username CLI Example: .. code-block:: bash salt '*' file.uid_to_user 0 ''' try: return pwd.getpwuid(uid).pw_name except (KeyError, NameError): # If user is not present, fall back to the uid. return uid def user_to_uid(user): ''' Convert user name to a uid user user name to convert to its uid CLI Example: .. code-block:: bash salt '*' file.user_to_uid root ''' if user is None: user = salt.utils.get_user() try: if isinstance(user, int): return user return pwd.getpwnam(user).pw_uid except KeyError: return '' def get_uid(path, follow_symlinks=True): ''' Return the id of the user that owns a given file path file or directory of which to get the uid follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_uid /etc/passwd .. versionchanged:: 0.16.4 ``follow_symlinks`` option added ''' return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('uid', -1) def get_user(path, follow_symlinks=True): ''' Return the user that owns a given file path file or directory of which to get the user follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_user /etc/passwd .. versionchanged:: 0.16.4 ``follow_symlinks`` option added ''' return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('user', False) def get_mode(path, follow_symlinks=True): ''' Return the mode of a file path file or directory of which to get the mode follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_mode /etc/passwd .. versionchanged:: 2014.1.0 ``follow_symlinks`` option added ''' return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('mode', '') def set_mode(path, mode): ''' Set the mode of a file path file or directory of which to set the mode mode mode to set the path to CLI Example: .. code-block:: bash salt '*' file.set_mode /etc/passwd 0644 ''' path = os.path.expanduser(path) mode = str(mode).lstrip('0Oo') if not mode: mode = '0' if not os.path.exists(path): raise CommandExecutionError('{0}: File not found'.format(path)) try: os.chmod(path, int(mode, 8)) except Exception: return 'Invalid Mode ' + mode return get_mode(path) def lchown(path, user, group): ''' Chown a file, pass the file the desired user and group without following symlinks. path path to the file or directory user user owner group group owner CLI Example: .. code-block:: bash salt '*' file.chown /etc/passwd root root ''' path = os.path.expanduser(path) uid = user_to_uid(user) gid = group_to_gid(group) err = '' if uid == '': if user: err += 'User does not exist\n' else: uid = -1 if gid == '': if group: err += 'Group does not exist\n' else: gid = -1 return os.lchown(path, uid, gid) def chown(path, user, group): ''' Chown a file, pass the file the desired user and group path path to the file or directory user user owner group group owner CLI Example: .. code-block:: bash salt '*' file.chown /etc/passwd root root ''' path = os.path.expanduser(path) uid = user_to_uid(user) gid = group_to_gid(group) err = '' if uid == '': if user: err += 'User does not exist\n' else: uid = -1 if gid == '': if group: err += 'Group does not exist\n' else: gid = -1 if not os.path.exists(path): try: # Broken symlinks will return false, but still need to be chowned return os.lchown(path, uid, gid) except OSError: pass err += 'File not found' if err: return err return os.chown(path, uid, gid) def chgrp(path, group): ''' Change the group of a file path path to the file or directory group group owner CLI Example: .. code-block:: bash salt '*' file.chgrp /etc/passwd root ''' path = os.path.expanduser(path) user = get_user(path) return chown(path, user, group) def get_sum(path, form='sha256'): ''' Return the checksum for the given file. The following checksum algorithms are supported: * md5 * sha1 * sha224 * sha256 **(default)** * sha384 * sha512 path path to the file or directory form desired sum format CLI Example: .. code-block:: bash salt '*' file.get_sum /etc/passwd sha512 ''' path = os.path.expanduser(path) if not os.path.isfile(path): return 'File not found' return salt.utils.get_hash(path, form, 4096) def get_hash(path, form='sha256', chunk_size=65536): ''' Get the hash sum of a file This is better than ``get_sum`` for the following reasons: - It does not read the entire file into memory. - It does not return a string on error. The returned value of ``get_sum`` cannot really be trusted since it is vulnerable to collisions: ``get_sum(..., 'xyz') == 'Hash xyz not supported'`` path path to the file or directory form desired sum format chunk_size amount to sum at once CLI Example: .. code-block:: bash salt '*' file.get_hash /etc/shadow ''' return salt.utils.get_hash(os.path.expanduser(path), form, chunk_size) def get_source_sum(file_name='', source='', source_hash=None, source_hash_name=None, saltenv='base'): ''' .. versionadded:: 2016.11.0 Used by :py:func:`file.get_managed <salt.modules.file.get_managed>` to obtain the hash and hash type from the parameters specified below. file_name Optional file name being managed, for matching with :py:func:`file.extract_hash <salt.modules.file.extract_hash>`. .. versionadded:: 2016.11.0 source Source file, as used in :py:mod:`file <salt.states.file>` and other states. If ``source_hash`` refers to a file containing hashes, then this filename will be used to match a filename in that file. If the ``source_hash`` is a hash expression, then this argument will be ignored. source_hash Hash file/expression, as used in :py:mod:`file <salt.states.file>` and other states. If this value refers to a remote URL or absolute path to a local file, it will be cached and :py:func:`file.extract_hash <salt.modules.file.extract_hash>` will be used to obtain a hash from it. source_hash_name Specific file name to look for when ``source_hash`` refers to a remote file, used to disambiguate ambiguous matches. .. versionadded:: 2016.11.0 saltenv : base Salt fileserver environment from which to retrive the source_hash. This value will only be used when ``source_hash`` refers to a file on the Salt fileserver (i.e. one beginning with ``salt://``). CLI Example: .. code-block:: bash salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=499ae16dcae71eeb7c3a30c75ea7a1a6 salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=https://mydomain.tld/hashes.md5 salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=https://mydomain.tld/hashes.md5 source_hash_name=./dir2/foo.tar.gz ''' def _invalid_source_hash_format(): ''' DRY helper for reporting invalid source_hash input ''' raise CommandExecutionError( 'Source hash {0} format is invalid. The supported formats are: ' '1) a hash, 2) an expression in the format <hash_type>=<hash>, or ' '3) either a path to a local file containing hashes, or a URI of ' 'a remote hash file. Supported protocols for remote hash files ' 'are: {1}. The hash may also not be of a valid length, the ' 'following are supported hash types and lengths: {2}.'.format( source_hash, ', '.join(salt.utils.files.VALID_PROTOS), ', '.join( ['{0} ({1})'.format(HASHES_REVMAP[x], x) for x in sorted(HASHES_REVMAP)] ), ) ) hash_fn = None if os.path.isabs(source_hash): hash_fn = source_hash else: try: proto = _urlparse(source_hash).scheme if proto in salt.utils.files.VALID_PROTOS: hash_fn = __salt__['cp.cache_file'](source_hash, saltenv) if not hash_fn: raise CommandExecutionError( 'Source hash file {0} not found'.format(source_hash) ) else: if proto != '': # Some unsupported protocol (e.g. foo://) is being used. # We'll get into this else block if a hash expression # (like md5=<md5 checksum here>), but in those cases, the # protocol will be an empty string, in which case we avoid # this error condition. _invalid_source_hash_format() except (AttributeError, TypeError): _invalid_source_hash_format() if hash_fn is not None: ret = extract_hash(hash_fn, '', file_name, source, source_hash_name) if ret is None: _invalid_source_hash_format() return ret else: # The source_hash is a hash expression ret = {} try: ret['hash_type'], ret['hsum'] = \ [x.strip() for x in source_hash.split('=', 1)] except AttributeError: _invalid_source_hash_format() except ValueError: # No hash type, try to figure out by hash length if not re.match('^[{0}]+$'.format(string.hexdigits), source_hash): _invalid_source_hash_format() ret['hsum'] = source_hash source_hash_len = len(source_hash) if source_hash_len in HASHES_REVMAP: ret['hash_type'] = HASHES_REVMAP[source_hash_len] else: _invalid_source_hash_format() if ret['hash_type'] not in HASHES: raise CommandExecutionError( 'Invalid hash type \'{0}\'. Supported hash types are: {1}. ' 'Either remove the hash type and simply use \'{2}\' as the ' 'source_hash, or change the hash type to a supported type.' .format(ret['hash_type'], ', '.join(HASHES), ret['hsum']) ) else: hsum_len = len(ret['hsum']) if hsum_len not in HASHES_REVMAP: _invalid_source_hash_format() elif hsum_len != HASHES[ret['hash_type']]: raise CommandExecutionError( 'Invalid length ({0}) for hash type \'{1}\'. Either ' 'remove the hash type and simply use \'{2}\' as the ' 'source_hash, or change the hash type to \'{3}\''.format( hsum_len, ret['hash_type'], ret['hsum'], HASHES_REVMAP[hsum_len], ) ) return ret def check_hash(path, file_hash): ''' Check if a file matches the given hash string Returns ``True`` if the hash matches, otherwise ``False``. path Path to a file local to the minion. hash The hash to check against the file specified in the ``path`` argument. For versions 2016.11.4 and newer, the hash can be specified without an accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``), but for earlier releases it is necessary to also specify the hash type in the format ``<hash_type>:<hash_value>`` (e.g. ``md5:e138491e9d5b97023cea823fe17bac22``). CLI Example: .. code-block:: bash salt '*' file.check_hash /etc/fstab e138491e9d5b97023cea823fe17bac22 salt '*' file.check_hash /etc/fstab md5:e138491e9d5b97023cea823fe17bac22 ''' path = os.path.expanduser(path) if not isinstance(file_hash, six.string_types): raise SaltInvocationError('hash must be a string') for sep in (':', '='): if sep in file_hash: hash_type, hash_value = file_hash.split(sep, 1) break else: hash_value = file_hash hash_len = len(file_hash) hash_type = HASHES_REVMAP.get(hash_len) if hash_type is None: raise SaltInvocationError( 'Hash {0} (length: {1}) could not be matched to a supported ' 'hash type. The supported hash types and lengths are: ' '{2}'.format( file_hash, hash_len, ', '.join( ['{0} ({1})'.format(HASHES_REVMAP[x], x) for x in sorted(HASHES_REVMAP)] ), ) ) return get_hash(path, hash_type) == hash_value def find(path, *args, **kwargs): ''' Approximate the Unix ``find(1)`` command and return a list of paths that meet the specified criteria. The options include match criteria: .. code-block:: text name = path-glob # case sensitive iname = path-glob # case insensitive regex = path-regex # case sensitive iregex = path-regex # case insensitive type = file-types # match any listed type user = users # match any listed user group = groups # match any listed group size = [+-]number[size-unit] # default unit = byte mtime = interval # modified since date grep = regex # search file contents and/or actions: .. code-block:: text delete [= file-types] # default type = 'f' exec = command [arg ...] # where {} is replaced by pathname print [= print-opts] and/or depth criteria: .. code-block:: text maxdepth = maximum depth to transverse in path mindepth = minimum depth to transverse before checking files or directories The default action is ``print=path`` ``path-glob``: .. code-block:: text * = match zero or more chars ? = match any char [abc] = match a, b, or c [!abc] or [^abc] = match anything except a, b, and c [x-y] = match chars x through y [!x-y] or [^x-y] = match anything except chars x through y {a,b,c} = match a or b or c ``path-regex``: a Python Regex (regular expression) pattern to match pathnames ``file-types``: a string of one or more of the following: .. code-block:: text a: all file types b: block device c: character device d: directory p: FIFO (named pipe) f: plain file l: symlink s: socket ``users``: a space and/or comma separated list of user names and/or uids ``groups``: a space and/or comma separated list of group names and/or gids ``size-unit``: .. code-block:: text b: bytes k: kilobytes m: megabytes g: gigabytes t: terabytes interval: .. code-block:: text [<num>w] [<num>d] [<num>h] [<num>m] [<num>s] where: w: week d: day h: hour m: minute s: second print-opts: a comma and/or space separated list of one or more of the following: .. code-block:: text group: group name md5: MD5 digest of file contents mode: file permissions (as integer) mtime: last modification time (as time_t) name: file basename path: file absolute path size: file size in bytes type: file type user: user name CLI Examples: .. code-block:: bash salt '*' file.find / type=f name=\\*.bak size=+10m salt '*' file.find /var mtime=+30d size=+10m print=path,size,mtime salt '*' file.find /var/log name=\\*.[0-9] mtime=+30d size=+10m delete ''' if 'delete' in args: kwargs['delete'] = 'f' elif 'print' in args: kwargs['print'] = 'path' try: finder = salt.utils.find.Finder(kwargs) except ValueError as ex: return 'error: {0}'.format(ex) ret = [item for i in [finder.find(p) for p in glob.glob(os.path.expanduser(path))] for item in i] ret.sort() return ret def _sed_esc(string, escape_all=False): ''' Escape single quotes and forward slashes ''' special_chars = "^.[$()|*+?{" string = string.replace("'", "'\"'\"'").replace("/", "\\/") if escape_all is True: for char in special_chars: string = string.replace(char, "\\" + char) return string def sed(path, before, after, limit='', backup='.bak', options='-r -e', flags='g', escape_all=False, negate_match=False): ''' .. deprecated:: 0.17.0 Use :py:func:`~salt.modules.file.replace` instead. Make a simple edit to a file Equivalent to: .. code-block:: bash sed <backup> <options> "/<limit>/ s/<before>/<after>/<flags> <file>" path The full path to the file to be edited before A pattern to find in order to replace with ``after`` after Text that will replace ``before`` limit : ``''`` An initial pattern to search for before searching for ``before`` backup : ``.bak`` The file will be backed up before edit with this file extension; **WARNING:** each time ``sed``/``comment``/``uncomment`` is called will overwrite this backup options : ``-r -e`` Options to pass to sed flags : ``g`` Flags to modify the sed search; e.g., ``i`` for case-insensitive pattern matching negate_match : False Negate the search command (``!``) .. versionadded:: 0.17.0 Forward slashes and single quotes will be escaped automatically in the ``before`` and ``after`` patterns. CLI Example: .. code-block:: bash salt '*' file.sed /etc/httpd/httpd.conf 'LogLevel warn' 'LogLevel info' ''' # Largely inspired by Fabric's contrib.files.sed() # XXX:dc: Do we really want to always force escaping? # path = os.path.expanduser(path) if not os.path.exists(path): return False # Mandate that before and after are strings before = str(before) after = str(after) before = _sed_esc(before, escape_all) after = _sed_esc(after, escape_all) limit = _sed_esc(limit, escape_all) if sys.platform == 'darwin': options = options.replace('-r', '-E') cmd = ['sed'] cmd.append('-i{0}'.format(backup) if backup else '-i') cmd.extend(salt.utils.shlex_split(options)) cmd.append( r'{limit}{negate_match}s/{before}/{after}/{flags}'.format( limit='/{0}/ '.format(limit) if limit else '', negate_match='!' if negate_match else '', before=before, after=after, flags=flags ) ) cmd.append(path) return __salt__['cmd.run_all'](cmd, python_shell=False) def sed_contains(path, text, limit='', flags='g'): ''' .. deprecated:: 0.17.0 Use :func:`search` instead. Return True if the file at ``path`` contains ``text``. Utilizes sed to perform the search (line-wise search). Note: the ``p`` flag will be added to any flags you pass in. CLI Example: .. code-block:: bash salt '*' file.contains /etc/crontab 'mymaintenance.sh' ''' # Largely inspired by Fabric's contrib.files.contains() path = os.path.expanduser(path) if not os.path.exists(path): return False before = _sed_esc(str(text), False) limit = _sed_esc(str(limit), False) options = '-n -r -e' if sys.platform == 'darwin': options = options.replace('-r', '-E') cmd = ['sed'] cmd.extend(salt.utils.shlex_split(options)) cmd.append( r'{limit}s/{before}/$/{flags}'.format( limit='/{0}/ '.format(limit) if limit else '', before=before, flags='p{0}'.format(flags) ) ) cmd.append(path) result = __salt__['cmd.run'](cmd, python_shell=False) return bool(result) def psed(path, before, after, limit='', backup='.bak', flags='gMS', escape_all=False, multi=False): ''' .. deprecated:: 0.17.0 Use :py:func:`~salt.modules.file.replace` instead. Make a simple edit to a file (pure Python version) Equivalent to: .. code-block:: bash sed <backup> <options> "/<limit>/ s/<before>/<after>/<flags> <file>" path The full path to the file to be edited before A pattern to find in order to replace with ``after`` after Text that will replace ``before`` limit : ``''`` An initial pattern to search for before searching for ``before`` backup : ``.bak`` The file will be backed up before edit with this file extension; **WARNING:** each time ``sed``/``comment``/``uncomment`` is called will overwrite this backup flags : ``gMS`` Flags to modify the search. Valid values are: - ``g``: Replace all occurrences of the pattern, not just the first. - ``I``: Ignore case. - ``L``: Make ``\\w``, ``\\W``, ``\\b``, ``\\B``, ``\\s`` and ``\\S`` dependent on the locale. - ``M``: Treat multiple lines as a single line. - ``S``: Make `.` match all characters, including newlines. - ``U``: Make ``\\w``, ``\\W``, ``\\b``, ``\\B``, ``\\d``, ``\\D``, ``\\s`` and ``\\S`` dependent on Unicode. - ``X``: Verbose (whitespace is ignored). multi: ``False`` If True, treat the entire file as a single line Forward slashes and single quotes will be escaped automatically in the ``before`` and ``after`` patterns. CLI Example: .. code-block:: bash salt '*' file.sed /etc/httpd/httpd.conf 'LogLevel warn' 'LogLevel info' ''' # Largely inspired by Fabric's contrib.files.sed() # XXX:dc: Do we really want to always force escaping? # # Mandate that before and after are strings path = os.path.expanduser(path) multi = bool(multi) before = str(before) after = str(after) before = _sed_esc(before, escape_all) # The pattern to replace with does not need to be escaped!!! #after = _sed_esc(after, escape_all) limit = _sed_esc(limit, escape_all) shutil.copy2(path, '{0}{1}'.format(path, backup)) with salt.utils.fopen(path, 'w') as ofile: with salt.utils.fopen('{0}{1}'.format(path, backup), 'r') as ifile: if multi is True: for line in ifile.readline(): ofile.write(_psed(line, before, after, limit, flags)) else: ofile.write(_psed(ifile.read(), before, after, limit, flags)) RE_FLAG_TABLE = {'I': re.I, 'L': re.L, 'M': re.M, 'S': re.S, 'U': re.U, 'X': re.X} def _psed(text, before, after, limit, flags): ''' Does the actual work for file.psed, so that single lines can be passed in ''' atext = text if limit: limit = re.compile(limit) comps = text.split(limit) atext = ''.join(comps[1:]) count = 1 if 'g' in flags: count = 0 flags = flags.replace('g', '') aflags = 0 for flag in flags: aflags |= RE_FLAG_TABLE[flag] before = re.compile(before, flags=aflags) text = re.sub(before, after, atext, count=count) return text def uncomment(path, regex, char='#', backup='.bak'): ''' .. deprecated:: 0.17.0 Use :py:func:`~salt.modules.file.replace` instead. Uncomment specified commented lines in a file path The full path to the file to be edited regex A regular expression used to find the lines that are to be uncommented. This regex should not include the comment character. A leading ``^`` character will be stripped for convenience (for easily switching between comment() and uncomment()). char : ``#`` The character to remove in order to uncomment a line backup : ``.bak`` The file will be backed up before edit with this file extension; **WARNING:** each time ``sed``/``comment``/``uncomment`` is called will overwrite this backup CLI Example: .. code-block:: bash salt '*' file.uncomment /etc/hosts.deny 'ALL: PARANOID' ''' return comment_line(path=path, regex=regex, char=char, cmnt=False, backup=backup) def comment(path, regex, char='#', backup='.bak'): ''' .. deprecated:: 0.17.0 Use :py:func:`~salt.modules.file.replace` instead. Comment out specified lines in a file path The full path to the file to be edited regex A regular expression used to find the lines that are to be commented; this pattern will be wrapped in parenthesis and will move any preceding/trailing ``^`` or ``$`` characters outside the parenthesis (e.g., the pattern ``^foo$`` will be rewritten as ``^(foo)$``) char : ``#`` The character to be inserted at the beginning of a line in order to comment it out backup : ``.bak`` The file will be backed up before edit with this file extension .. warning:: This backup will be overwritten each time ``sed`` / ``comment`` / ``uncomment`` is called. Meaning the backup will only be useful after the first invocation. CLI Example: .. code-block:: bash salt '*' file.comment /etc/modules pcspkr ''' return comment_line(path=path, regex=regex, char=char, cmnt=True, backup=backup) def comment_line(path, regex, char='#', cmnt=True, backup='.bak'): r''' Comment or Uncomment a line in a text file. :param path: string The full path to the text file. :param regex: string A regex expression that begins with ``^`` that will find the line you wish to comment. Can be as simple as ``^color =`` :param char: string The character used to comment a line in the type of file you're referencing. Default is ``#`` :param cmnt: boolean True to comment the line. False to uncomment the line. Default is True. :param backup: string The file extension to give the backup file. Default is ``.bak`` Set to False/None to not keep a backup. :return: boolean Returns True if successful, False if not CLI Example: The following example will comment out the ``pcspkr`` line in the ``/etc/modules`` file using the default ``#`` character and create a backup file named ``modules.bak`` .. code-block:: bash salt '*' file.comment_line '/etc/modules' '^pcspkr' CLI Example: The following example will uncomment the ``log_level`` setting in ``minion`` config file if it is set to either ``warning``, ``info``, or ``debug`` using the ``#`` character and create a backup file named ``minion.bk`` .. code-block:: bash salt '*' file.comment_line 'C:\salt\conf\minion' '^log_level: (warning|info|debug)' '#' False '.bk' ''' # Get the regex for comment or uncomment if cmnt: regex = '{0}({1}){2}'.format( '^' if regex.startswith('^') else '', regex.lstrip('^').rstrip('$'), '$' if regex.endswith('$') else '') else: regex = r'^{0}\s*({1}){2}'.format( char, regex.lstrip('^').rstrip('$'), '$' if regex.endswith('$') else '') # Load the real path to the file path = os.path.realpath(os.path.expanduser(path)) # Make sure the file exists if not os.path.isfile(path): raise SaltInvocationError('File not found: {0}'.format(path)) # Make sure it is a text file if not salt.utils.istextfile(path): raise SaltInvocationError( 'Cannot perform string replacements on a binary file: {0}'.format(path)) # First check the whole file, determine whether to make the replacement # Searching first avoids modifying the time stamp if there are no changes found = False # Dictionaries for comparing changes orig_file = [] new_file = [] # Buffer size for fopen bufsize = os.path.getsize(path) try: # Use a read-only handle to open the file with salt.utils.fopen(path, mode='rb', buffering=bufsize) as r_file: # Loop through each line of the file and look for a match for line in r_file: # Is it in this line if re.match(regex, line): # Load lines into dictionaries, set found to True orig_file.append(line) if cmnt: new_file.append('{0}{1}'.format(char, line)) else: new_file.append(line.lstrip(char)) found = True except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to open file '{0}'. " "Exception: {1}".format(path, exc) ) # We've searched the whole file. If we didn't find anything, return False if not found: return False if not salt.utils.is_windows(): pre_user = get_user(path) pre_group = get_group(path) pre_mode = salt.utils.normalize_mode(get_mode(path)) # Create a copy to read from and to use as a backup later try: temp_file = _mkstemp_copy(path=path, preserve_inode=False) except (OSError, IOError) as exc: raise CommandExecutionError("Exception: {0}".format(exc)) try: # Open the file in write mode with salt.utils.fopen(path, mode='wb', buffering=bufsize) as w_file: try: # Open the temp file in read mode with salt.utils.fopen(temp_file, mode='rb', buffering=bufsize) as r_file: # Loop through each line of the file and look for a match for line in r_file: try: # Is it in this line if re.match(regex, line): # Write the new line if cmnt: w_file.write('{0}{1}'.format(char, line)) else: w_file.write(line.lstrip(char)) else: # Write the existing line (no change) w_file.write(line) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to write file '{0}'. Contents may " "be truncated. Temporary file contains copy " "at '{1}'. " "Exception: {2}".format(path, temp_file, exc) ) except (OSError, IOError) as exc: raise CommandExecutionError("Exception: {0}".format(exc)) except (OSError, IOError) as exc: raise CommandExecutionError("Exception: {0}".format(exc)) if backup: # Move the backup file to the original directory backup_name = '{0}{1}'.format(path, backup) try: shutil.move(temp_file, backup_name) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to move the temp file '{0}' to the " "backup file '{1}'. " "Exception: {2}".format(path, temp_file, exc) ) else: os.remove(temp_file) if not salt.utils.is_windows(): check_perms(path, None, pre_user, pre_group, pre_mode) # Return a diff using the two dictionaries return ''.join(difflib.unified_diff(orig_file, new_file)) def _get_flags(flags): ''' Return an integer appropriate for use as a flag for the re module from a list of human-readable strings .. code-block:: python >>> _get_flags(['MULTILINE', 'IGNORECASE']) 10 >>> _get_flags('MULTILINE') 8 >>> _get_flags(2) 2 ''' if isinstance(flags, six.string_types): flags = [flags] if isinstance(flags, Iterable) and not isinstance(flags, Mapping): _flags_acc = [] for flag in flags: _flag = getattr(re, str(flag).upper()) if not isinstance(_flag, six.integer_types): raise SaltInvocationError( 'Invalid re flag given: {0}'.format(flag) ) _flags_acc.append(_flag) return reduce(operator.__or__, _flags_acc) elif isinstance(flags, six.integer_types): return flags else: raise SaltInvocationError( 'Invalid re flags: "{0}", must be given either as a single flag ' 'string, a list of strings, or as an integer'.format(flags) ) def _add_flags(flags, new_flags): ''' Combine ``flags`` and ``new_flags`` ''' flags = _get_flags(flags) new_flags = _get_flags(new_flags) return flags | new_flags def _mkstemp_copy(path, preserve_inode=True): ''' Create a temp file and move/copy the contents of ``path`` to the temp file. Return the path to the temp file. path The full path to the file whose contents will be moved/copied to a temp file. Whether it's moved or copied depends on the value of ``preserve_inode``. preserve_inode Preserve the inode of the file, so that any hard links continue to share the inode with the original filename. This works by *copying* the file, reading from the copy, and writing to the file at the original inode. If ``False``, the file will be *moved* rather than copied, and a new file will be written to a new inode, but using the original filename. Hard links will then share an inode with the backup, instead (if using ``backup`` to create a backup copy). Default is ``True``. ''' temp_file = None # Create the temp file try: temp_file = salt.utils.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to create temp file. " "Exception: {0}".format(exc) ) # use `copy` to preserve the inode of the # original file, and thus preserve hardlinks # to the inode. otherwise, use `move` to # preserve prior behavior, which results in # writing the file to a new inode. if preserve_inode: try: shutil.copy2(path, temp_file) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to copy file '{0}' to the " "temp file '{1}'. " "Exception: {2}".format(path, temp_file, exc) ) else: try: shutil.move(path, temp_file) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to move file '{0}' to the " "temp file '{1}'. " "Exception: {2}".format(path, temp_file, exc) ) return temp_file def _starts_till(src, probe, strip_comments=True): ''' Returns True if src and probe at least begins till some point. ''' def _strip_comments(txt): ''' Strip possible comments. Usually commends are one or two symbols ''' buff = txt.split(" ", 1) return len(buff) == 2 and len(buff[0]) < 2 and buff[1] or txt def _to_words(txt): ''' Split by words ''' return txt and [w for w in txt.strip().split(" ") if w.strip()] or txt no_match = -1 equal = 0 if not src or not probe: return no_match if src == probe: return equal src = _to_words(strip_comments and _strip_comments(src) or src) probe = _to_words(strip_comments and _strip_comments(probe) or probe) a_buff, b_buff = len(src) < len(probe) and (src, probe) or (probe, src) b_buff = ' '.join(b_buff) for idx in range(len(a_buff)): prb = ' '.join(a_buff[:-(idx + 1)]) if prb and b_buff.startswith(prb): return idx return no_match def _regex_to_static(src, regex): ''' Expand regular expression to static match. ''' if not src or not regex: return None try: src = re.search(regex, src) except Exception as ex: raise CommandExecutionError("{0}: '{1}'".format(_get_error_message(ex), regex)) return src and src.group() or regex def _assert_occurrence(src, probe, target, amount=1): ''' Raise an exception, if there are different amount of specified occurrences in src. ''' occ = src.count(probe) if occ > amount: msg = 'more than' elif occ < amount: msg = 'less than' elif not occ: msg = 'no' else: msg = None if msg: raise CommandExecutionError('Found {0} expected occurrences in "{1}" expression'.format(msg, target)) def _get_line_indent(src, line, indent): ''' Indent the line with the source line. ''' if not indent: return line idt = [] for c in src: if c not in ['\t', ' ']: break idt.append(c) return ''.join(idt) + line.strip() def line(path, content, match=None, mode=None, location=None, before=None, after=None, show_changes=True, backup=False, quiet=False, indent=True): ''' .. versionadded:: 2015.8.0 Edit a line in the configuration file. The ``path`` and ``content`` arguments are required, as well as passing in one of the ``mode`` options. path Filesystem path to the file to be edited. content Content of the line. match Match the target line for an action by a fragment of a string or regular expression. If neither ``before`` nor ``after`` are provided, and ``match`` is also ``None``, match becomes the ``content`` value. mode Defines how to edit a line. One of the following options is required: - ensure If line does not exist, it will be added. This is based on the ``content`` argument. - replace If line already exists, it will be replaced. - delete Delete the line, once found. - insert Insert a line. .. note:: If ``mode=insert`` is used, at least one of the following options must also be defined: ``location``, ``before``, or ``after``. If ``location`` is used, it takes precedence over the other two options. location Defines where to place content in the line. Note this option is only used when ``mode=insert`` is specified. If a location is passed in, it takes precedence over both the ``before`` and ``after`` kwargs. Valid locations are: - start Place the content at the beginning of the file. - end Place the content at the end of the file. before Regular expression or an exact case-sensitive fragment of the string. This option is only used when either the ``ensure`` or ``insert`` mode is defined. after Regular expression or an exact case-sensitive fragment of the string. This option is only used when either the ``ensure`` or ``insert`` mode is defined. show_changes Output a unified diff of the old file and the new file. If ``False`` return a boolean if any changes were made. Default is ``True`` .. note:: Using this option will store two copies of the file in-memory (the original version and the edited version) in order to generate the diff. backup Create a backup of the original file with the extension: "Year-Month-Day-Hour-Minutes-Seconds". quiet Do not raise any exceptions. E.g. ignore the fact that the file that is tried to be edited does not exist and nothing really happened. indent Keep indentation with the previous line. This option is not considered when the ``delete`` mode is specified. CLI Example: .. code-block:: bash salt '*' file.line /etc/nsswitch.conf "networks:\tfiles dns" after="hosts:.*?" mode='ensure' .. note:: If an equal sign (``=``) appears in an argument to a Salt command, it is interpreted as a keyword argument in the format of ``key=val``. That processing can be bypassed in order to pass an equal sign through to the remote shell command by manually specifying the kwarg: .. code-block:: bash salt '*' file.line /path/to/file content="CREATEMAIL_SPOOL=no" match="CREATE_MAIL_SPOOL=yes" mode="replace" ''' path = os.path.realpath(os.path.expanduser(path)) if not os.path.isfile(path): if not quiet: raise CommandExecutionError('File "{0}" does not exists or is not a file.'.format(path)) return False # No changes had happened mode = mode and mode.lower() or mode if mode not in ['insert', 'ensure', 'delete', 'replace']: if mode is None: raise CommandExecutionError('Mode was not defined. How to process the file?') else: raise CommandExecutionError('Unknown mode: "{0}"'.format(mode)) # Before/after has privilege. If nothing defined, match is used by content. if before is None and after is None and not match: match = content with salt.utils.fopen(path, mode='r') as fp_: body = fp_.read() body_before = hashlib.sha256(salt.utils.to_bytes(body)).hexdigest() after = _regex_to_static(body, after) before = _regex_to_static(body, before) match = _regex_to_static(body, match) if os.stat(path).st_size == 0 and mode in ('delete', 'replace'): log.warning('Cannot find text to {0}. File \'{1}\' is empty.'.format(mode, path)) body = '' elif mode == 'delete': body = os.linesep.join([line for line in body.split(os.linesep) if line.find(match) < 0]) elif mode == 'replace': body = os.linesep.join([(_get_line_indent(file_line, content, indent) if (file_line.find(match) > -1 and not file_line == content) else file_line) for file_line in body.split(os.linesep)]) elif mode == 'insert': if not location and not before and not after: raise CommandExecutionError('On insert must be defined either "location" or "before/after" conditions.') if not location: if before and after: _assert_occurrence(body, before, 'before') _assert_occurrence(body, after, 'after') out = [] lines = body.split(os.linesep) for idx in range(len(lines)): _line = lines[idx] if _line.find(before) > -1 and idx <= len(lines) and lines[idx - 1].find(after) > -1: out.append(_get_line_indent(_line, content, indent)) out.append(_line) else: out.append(_line) body = os.linesep.join(out) if before and not after: _assert_occurrence(body, before, 'before') out = [] lines = body.split(os.linesep) for idx in range(len(lines)): _line = lines[idx] if _line.find(before) > -1: cnd = _get_line_indent(_line, content, indent) if not idx or (idx and _starts_till(lines[idx - 1], cnd) < 0): # Job for replace instead out.append(cnd) out.append(_line) body = os.linesep.join(out) elif after and not before: _assert_occurrence(body, after, 'after') out = [] lines = body.split(os.linesep) for idx in range(len(lines)): _line = lines[idx] out.append(_line) cnd = _get_line_indent(_line, content, indent) if _line.find(after) > -1: # No dupes or append, if "after" is the last line if (idx < len(lines) and _starts_till(lines[idx + 1], cnd) < 0) or idx + 1 == len(lines): out.append(cnd) body = os.linesep.join(out) else: if location == 'start': body = ''.join([content, body]) elif location == 'end': body = ''.join([body, _get_line_indent(body[-1], content, indent) if body else content]) elif mode == 'ensure': after = after and after.strip() before = before and before.strip() if before and after: _assert_occurrence(body, before, 'before') _assert_occurrence(body, after, 'after') a_idx = b_idx = -1 idx = 0 body = body.split(os.linesep) for _line in body: idx += 1 if _line.find(before) > -1 and b_idx < 0: b_idx = idx if _line.find(after) > -1 and a_idx < 0: a_idx = idx # Add if not b_idx - a_idx - 1: body = body[:a_idx] + [content] + body[b_idx - 1:] elif b_idx - a_idx - 1 == 1: if _starts_till(body[a_idx:b_idx - 1][0], content) > -1: body[a_idx] = _get_line_indent(body[a_idx - 1], content, indent) else: raise CommandExecutionError('Found more than one line between boundaries "before" and "after".') body = os.linesep.join(body) elif before and not after: _assert_occurrence(body, before, 'before') body = body.split(os.linesep) out = [] for idx in range(len(body)): if body[idx].find(before) > -1: prev = (idx > 0 and idx or 1) - 1 out.append(_get_line_indent(body[prev], content, indent)) if _starts_till(out[prev], content) > -1: del out[prev] out.append(body[idx]) body = os.linesep.join(out) elif not before and after: _assert_occurrence(body, after, 'after') body = body.split(os.linesep) skip = None out = [] for idx in range(len(body)): if skip != body[idx]: out.append(body[idx]) if body[idx].find(after) > -1: next_line = idx + 1 < len(body) and body[idx + 1] or None if next_line is not None and _starts_till(next_line, content) > -1: skip = next_line out.append(_get_line_indent(body[idx], content, indent)) body = os.linesep.join(out) else: raise CommandExecutionError("Wrong conditions? " "Unable to ensure line without knowing " "where to put it before and/or after.") changed = body_before != hashlib.sha256(salt.utils.to_bytes(body)).hexdigest() if backup and changed and __opts__['test'] is False: try: temp_file = _mkstemp_copy(path=path, preserve_inode=True) shutil.move(temp_file, '{0}.{1}'.format(path, time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()))) except (OSError, IOError) as exc: raise CommandExecutionError("Unable to create the backup file of {0}. Exception: {1}".format(path, exc)) changes_diff = None if changed: if show_changes: with salt.utils.fopen(path, 'r') as fp_: path_content = _splitlines_preserving_trailing_newline( fp_.read()) changes_diff = ''.join(difflib.unified_diff( path_content, _splitlines_preserving_trailing_newline(body))) if __opts__['test'] is False: fh_ = None try: fh_ = salt.utils.atomicfile.atomic_open(path, 'w') fh_.write(body) finally: if fh_: fh_.close() return show_changes and changes_diff or changed def replace(path, pattern, repl, count=0, flags=8, bufsize=1, append_if_not_found=False, prepend_if_not_found=False, not_found_content=None, backup='.bak', dry_run=False, search_only=False, show_changes=True, ignore_if_missing=False, preserve_inode=True, ): ''' .. versionadded:: 0.17.0 Replace occurrences of a pattern in a file. If ``show_changes`` is ``True``, then a diff of what changed will be returned, otherwise a ``True`` will be returned when changes are made, and ``False`` when no changes are made. This is a pure Python implementation that wraps Python's :py:func:`~re.sub`. path Filesystem path to the file to be edited. If a symlink is specified, it will be resolved to its target. pattern A regular expression, to be matched using Python's :py:func:`~re.search`. repl The replacement text count : 0 Maximum number of pattern occurrences to be replaced. If count is a positive integer ``n``, only ``n`` occurrences will be replaced, otherwise all occurrences will be replaced. flags (list or int) A list of flags defined in the :ref:`re module documentation <contents-of-module-re>`. Each list item should be a string that will correlate to the human-friendly flag name. E.g., ``['IGNORECASE', 'MULTILINE']``. Optionally, ``flags`` may be an int, with a value corresponding to the XOR (``|``) of all the desired flags. Defaults to 8 (which supports 'MULTILINE'). bufsize (int or str) How much of the file to buffer into memory at once. The default value ``1`` processes one line at a time. The special value ``file`` may be specified which will read the entire file into memory before processing. append_if_not_found : False .. versionadded:: 2014.7.0 If set to ``True``, and pattern is not found, then the content will be appended to the file. prepend_if_not_found : False .. versionadded:: 2014.7.0 If set to ``True`` and pattern is not found, then the content will be prepended to the file. not_found_content .. versionadded:: 2014.7.0 Content to use for append/prepend if not found. If None (default), uses ``repl``. Useful when ``repl`` uses references to group in pattern. backup : .bak The file extension to use for a backup of the file before editing. Set to ``False`` to skip making a backup. dry_run : False If set to ``True``, no changes will be made to the file, the function will just return the changes that would have been made (or a ``True``/``False`` value if ``show_changes`` is set to ``False``). search_only : False If set to true, this no changes will be performed on the file, and this function will simply return ``True`` if the pattern was matched, and ``False`` if not. show_changes : True If ``True``, return a diff of changes made. Otherwise, return ``True`` if changes were made, and ``False`` if not. .. note:: Using this option will store two copies of the file in memory (the original version and the edited version) in order to generate the diff. This may not normally be a concern, but could impact performance if used with large files. ignore_if_missing : False .. versionadded:: 2015.8.0 If set to ``True``, this function will simply return ``False`` if the file doesn't exist. Otherwise, an error will be thrown. preserve_inode : True .. versionadded:: 2015.8.0 Preserve the inode of the file, so that any hard links continue to share the inode with the original filename. This works by *copying* the file, reading from the copy, and writing to the file at the original inode. If ``False``, the file will be *moved* rather than copied, and a new file will be written to a new inode, but using the original filename. Hard links will then share an inode with the backup, instead (if using ``backup`` to create a backup copy). If an equal sign (``=``) appears in an argument to a Salt command it is interpreted as a keyword argument in the format ``key=val``. That processing can be bypassed in order to pass an equal sign through to the remote shell command by manually specifying the kwarg: .. code-block:: bash salt '*' file.replace /path/to/file pattern='=' repl=':' salt '*' file.replace /path/to/file pattern="bind-address\\s*=" repl='bind-address:' CLI Examples: .. code-block:: bash salt '*' file.replace /etc/httpd/httpd.conf pattern='LogLevel warn' repl='LogLevel info' salt '*' file.replace /some/file pattern='before' repl='after' flags='[MULTILINE, IGNORECASE]' ''' symlink = False if is_link(path): symlink = True target_path = os.readlink(path) given_path = os.path.expanduser(path) path = os.path.realpath(os.path.expanduser(path)) if not os.path.exists(path): if ignore_if_missing: return False else: raise SaltInvocationError('File not found: {0}'.format(path)) if not salt.utils.istextfile(path): raise SaltInvocationError( 'Cannot perform string replacements on a binary file: {0}' .format(path) ) if search_only and (append_if_not_found or prepend_if_not_found): raise SaltInvocationError( 'search_only cannot be used with append/prepend_if_not_found' ) if append_if_not_found and prepend_if_not_found: raise SaltInvocationError( 'Only one of append and prepend_if_not_found is permitted' ) flags_num = _get_flags(flags) cpattern = re.compile(salt.utils.to_bytes(pattern), flags_num) filesize = os.path.getsize(path) if bufsize == 'file': bufsize = filesize # Search the file; track if any changes have been made for the return val has_changes = False orig_file = [] # used if show_changes new_file = [] # used if show_changes if not salt.utils.is_windows(): pre_user = get_user(path) pre_group = get_group(path) pre_mode = salt.utils.normalize_mode(get_mode(path)) # Avoid TypeErrors by forcing repl to be bytearray related to mmap # Replacement text may contains integer: 123 for example repl = salt.utils.to_bytes(str(repl)) if not_found_content: not_found_content = salt.utils.to_bytes(not_found_content) found = False temp_file = None content = salt.utils.to_str(not_found_content) if not_found_content and \ (prepend_if_not_found or append_if_not_found) \ else salt.utils.to_str(repl) try: # First check the whole file, determine whether to make the replacement # Searching first avoids modifying the time stamp if there are no changes r_data = None # Use a read-only handle to open the file with salt.utils.fopen(path, mode='rb', buffering=bufsize) as r_file: try: # mmap throws a ValueError if the file is empty. r_data = mmap.mmap(r_file.fileno(), 0, access=mmap.ACCESS_READ) except (ValueError, mmap.error): # size of file in /proc is 0, but contains data r_data = salt.utils.to_bytes("".join(r_file)) if search_only: # Just search; bail as early as a match is found if re.search(cpattern, r_data): return True # `with` block handles file closure else: result, nrepl = re.subn(cpattern, repl, r_data, count) # found anything? (even if no change) if nrepl > 0: found = True # Identity check the potential change has_changes = True if pattern != repl else has_changes if prepend_if_not_found or append_if_not_found: # Search for content, to avoid pre/appending the # content if it was pre/appended in a previous run. if re.search(salt.utils.to_bytes('^{0}$'.format(re.escape(content))), r_data, flags=flags_num): # Content was found, so set found. found = True # Keep track of show_changes here, in case the file isn't # modified if show_changes or append_if_not_found or \ prepend_if_not_found: orig_file = r_data.read(filesize).splitlines(True) \ if isinstance(r_data, mmap.mmap) \ else r_data.splitlines(True) new_file = result.splitlines(True) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to open file '{0}'. " "Exception: {1}".format(path, exc) ) finally: if r_data and isinstance(r_data, mmap.mmap): r_data.close() if has_changes and not dry_run: # Write the replacement text in this block. try: # Create a copy to read from and to use as a backup later temp_file = _mkstemp_copy(path=path, preserve_inode=preserve_inode) except (OSError, IOError) as exc: raise CommandExecutionError("Exception: {0}".format(exc)) r_data = None try: # Open the file in write mode with salt.utils.fopen(path, mode='w', buffering=bufsize) as w_file: try: # Open the temp file in read mode with salt.utils.fopen(temp_file, mode='r', buffering=bufsize) as r_file: r_data = mmap.mmap(r_file.fileno(), 0, access=mmap.ACCESS_READ) result, nrepl = re.subn(cpattern, repl, r_data, count) try: w_file.write(salt.utils.to_str(result)) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to write file '{0}'. Contents may " "be truncated. Temporary file contains copy " "at '{1}'. " "Exception: {2}".format(path, temp_file, exc) ) except (OSError, IOError) as exc: raise CommandExecutionError("Exception: {0}".format(exc)) finally: if r_data and isinstance(r_data, mmap.mmap): r_data.close() except (OSError, IOError) as exc: raise CommandExecutionError("Exception: {0}".format(exc)) if not found and (append_if_not_found or prepend_if_not_found): if not_found_content is None: not_found_content = repl if prepend_if_not_found: new_file.insert(0, not_found_content + b'\n') else: # append_if_not_found # Make sure we have a newline at the end of the file if 0 != len(new_file): if not new_file[-1].endswith(b'\n'): new_file[-1] += b'\n' new_file.append(not_found_content + b'\n') has_changes = True if not dry_run: try: # Create a copy to read from and for later use as a backup temp_file = _mkstemp_copy(path=path, preserve_inode=preserve_inode) except (OSError, IOError) as exc: raise CommandExecutionError("Exception: {0}".format(exc)) # write new content in the file while avoiding partial reads try: fh_ = salt.utils.atomicfile.atomic_open(path, 'w') for line in new_file: fh_.write(salt.utils.to_str(line)) finally: fh_.close() if backup and has_changes and not dry_run: # keep the backup only if it was requested # and only if there were any changes backup_name = '{0}{1}'.format(path, backup) try: shutil.move(temp_file, backup_name) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to move the temp file '{0}' to the " "backup file '{1}'. " "Exception: {2}".format(path, temp_file, exc) ) if symlink: symlink_backup = '{0}{1}'.format(given_path, backup) target_backup = '{0}{1}'.format(target_path, backup) # Always clobber any existing symlink backup # to match the behaviour of the 'backup' option try: os.symlink(target_backup, symlink_backup) except OSError: os.remove(symlink_backup) os.symlink(target_backup, symlink_backup) except: raise CommandExecutionError( "Unable create backup symlink '{0}'. " "Target was '{1}'. " "Exception: {2}".format(symlink_backup, target_backup, exc) ) elif temp_file: try: os.remove(temp_file) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to delete temp file '{0}'. " "Exception: {1}".format(temp_file, exc) ) if not dry_run and not salt.utils.is_windows(): check_perms(path, None, pre_user, pre_group, pre_mode) if show_changes: orig_file_as_str = ''.join([salt.utils.to_str(x) for x in orig_file]) new_file_as_str = ''.join([salt.utils.to_str(x) for x in new_file]) return ''.join(difflib.unified_diff(orig_file_as_str, new_file_as_str)) return has_changes def blockreplace(path, marker_start='#-- start managed zone --', marker_end='#-- end managed zone --', content='', append_if_not_found=False, prepend_if_not_found=False, backup='.bak', dry_run=False, show_changes=True, append_newline=False, ): ''' .. versionadded:: 2014.1.0 Replace content of a text block in a file, delimited by line markers A block of content delimited by comments can help you manage several lines entries without worrying about old entries removal. .. note:: This function will store two copies of the file in-memory (the original version and the edited version) in order to detect changes and only edit the targeted file if necessary. path Filesystem path to the file to be edited marker_start The line content identifying a line as the start of the content block. Note that the whole line containing this marker will be considered, so whitespace or extra content before or after the marker is included in final output marker_end The line content identifying a line as the end of the content block. Note that the whole line containing this marker will be considered, so whitespace or extra content before or after the marker is included in final output content The content to be used between the two lines identified by marker_start and marker_stop. append_if_not_found : False If markers are not found and set to ``True`` then, the markers and content will be appended to the file. prepend_if_not_found : False If markers are not found and set to ``True`` then, the markers and content will be prepended to the file. backup The file extension to use for a backup of the file if any edit is made. Set to ``False`` to skip making a backup. dry_run Don't make any edits to the file. show_changes Output a unified diff of the old file and the new file. If ``False``, return a boolean if any changes were made. append_newline: Append a newline to the content block. For more information see: https://github.com/saltstack/salt/issues/33686 .. versionadded:: 2016.3.4 CLI Example: .. code-block:: bash salt '*' file.blockreplace /etc/hosts '#-- start managed zone foobar : DO NOT EDIT --' \\ '#-- end managed zone foobar --' $'10.0.1.1 foo.foobar\\n10.0.1.2 bar.foobar' True ''' path = os.path.expanduser(path) if not os.path.exists(path): raise SaltInvocationError('File not found: {0}'.format(path)) if append_if_not_found and prepend_if_not_found: raise SaltInvocationError( 'Only one of append and prepend_if_not_found is permitted' ) if not salt.utils.istextfile(path): raise SaltInvocationError( 'Cannot perform string replacements on a binary file: {0}' .format(path) ) # Search the file; track if any changes have been made for the return val has_changes = False orig_file = [] new_file = [] in_block = False old_content = '' done = False # we do not use in_place editing to avoid file attrs modifications when # no changes are required and to avoid any file access on a partially # written file. # we could also use salt.utils.filebuffer.BufferedReader try: fi_file = fileinput.input(path, inplace=False, backup=False, bufsize=1, mode='r') for line in fi_file: result = line if marker_start in line: # managed block start found, start recording in_block = True else: if in_block: if marker_end in line: # end of block detected in_block = False # Check for multi-line '\n' terminated content as split will # introduce an unwanted additional new line. if content and content[-1] == '\n': content = content[:-1] # push new block content in file for cline in content.split('\n'): new_file.append(cline + '\n') done = True else: # remove old content, but keep a trace old_content += line result = None # else: we are not in the marked block, keep saving things orig_file.append(line) if result is not None: new_file.append(result) # end for. If we are here without block management we maybe have some problems, # or we need to initialise the marked block finally: fi_file.close() if in_block: # unterminated block => bad, always fail raise CommandExecutionError( 'Unterminated marked block. End of file reached before marker_end.' ) if not done: if prepend_if_not_found: # add the markers and content at the beginning of file new_file.insert(0, marker_end + '\n') if append_newline is True: new_file.insert(0, content + '\n') else: new_file.insert(0, content) new_file.insert(0, marker_start + '\n') done = True elif append_if_not_found: # Make sure we have a newline at the end of the file if 0 != len(new_file): if not new_file[-1].endswith('\n'): new_file[-1] += '\n' # add the markers and content at the end of file new_file.append(marker_start + '\n') if append_newline is True: new_file.append(content + '\n') else: new_file.append(content) new_file.append(marker_end + '\n') done = True else: raise CommandExecutionError( 'Cannot edit marked block. Markers were not found in file.' ) if done: diff = ''.join(difflib.unified_diff(orig_file, new_file)) has_changes = diff is not '' if has_changes and not dry_run: # changes detected # backup file attrs perms = {} perms['user'] = get_user(path) perms['group'] = get_group(path) perms['mode'] = salt.utils.normalize_mode(get_mode(path)) # backup old content if backup is not False: backup_path = '{0}{1}'.format(path, backup) shutil.copy2(path, backup_path) # copy2 does not preserve ownership check_perms(backup_path, None, perms['user'], perms['group'], perms['mode']) # write new content in the file while avoiding partial reads try: fh_ = salt.utils.atomicfile.atomic_open(path, 'w') for line in new_file: fh_.write(line) finally: fh_.close() # this may have overwritten file attrs check_perms(path, None, perms['user'], perms['group'], perms['mode']) if show_changes: return diff return has_changes def search(path, pattern, flags=8, bufsize=1, ignore_if_missing=False, multiline=False ): ''' .. versionadded:: 0.17.0 Search for occurrences of a pattern in a file Except for multiline, params are identical to :py:func:`~salt.modules.file.replace`. multiline If true, inserts 'MULTILINE' into ``flags`` and sets ``bufsize`` to 'file'. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' file.search /etc/crontab 'mymaintenance.sh' ''' if multiline: flags = _add_flags(flags, 'MULTILINE') bufsize = 'file' # This function wraps file.replace on purpose in order to enforce # consistent usage, compatible regex's, expected behavior, *and* bugs. :) # Any enhancements or fixes to one should affect the other. return replace(path, pattern, '', flags=flags, bufsize=bufsize, dry_run=True, search_only=True, show_changes=False, ignore_if_missing=ignore_if_missing) def patch(originalfile, patchfile, options='', dry_run=False): ''' .. versionadded:: 0.10.4 Apply a patch to a file or directory. Equivalent to: .. code-block:: bash patch <options> -i <patchfile> <originalfile> Or, when a directory is patched: .. code-block:: bash patch <options> -i <patchfile> -d <originalfile> -p0 originalfile The full path to the file or directory to be patched patchfile A patch file to apply to ``originalfile`` options Options to pass to patch. CLI Example: .. code-block:: bash salt '*' file.patch /opt/file.txt /tmp/file.txt.patch ''' patchpath = salt.utils.which('patch') if not patchpath: raise CommandExecutionError( 'patch executable not found. Is the distribution\'s patch ' 'package installed?' ) cmd = [patchpath] cmd.extend(salt.utils.shlex_split(options)) if dry_run: if __grains__['kernel'] in ('FreeBSD', 'OpenBSD'): cmd.append('-C') else: cmd.append('--dry-run') # this argument prevents interactive prompts when the patch fails to apply. # the exit code will still be greater than 0 if that is the case. if '-N' not in cmd and '--forward' not in cmd: cmd.append('--forward') has_rejectfile_option = False for option in cmd: if option == '-r' or option.startswith('-r ') \ or option.startswith('--reject-file'): has_rejectfile_option = True break # by default, patch will write rejected patch files to <filename>.rej. # this option prevents that. if not has_rejectfile_option: cmd.append('--reject-file=-') cmd.extend(['-i', patchfile]) if os.path.isdir(originalfile): cmd.extend(['-d', originalfile]) has_strip_option = False for option in cmd: if option.startswith('-p') or option.startswith('--strip='): has_strip_option = True break if not has_strip_option: cmd.append('--strip=0') else: cmd.append(originalfile) return __salt__['cmd.run_all'](cmd, python_shell=False) def contains(path, text): ''' .. deprecated:: 0.17.0 Use :func:`search` instead. Return ``True`` if the file at ``path`` contains ``text`` CLI Example: .. code-block:: bash salt '*' file.contains /etc/crontab 'mymaintenance.sh' ''' path = os.path.expanduser(path) if not os.path.exists(path): return False stripped_text = str(text).strip() try: with salt.utils.filebuffer.BufferedReader(path) as breader: for chunk in breader: if stripped_text in chunk: return True return False except (IOError, OSError): return False def contains_regex(path, regex, lchar=''): ''' .. deprecated:: 0.17.0 Use :func:`search` instead. Return True if the given regular expression matches on any line in the text of a given file. If the lchar argument (leading char) is specified, it will strip `lchar` from the left side of each line before trying to match CLI Example: .. code-block:: bash salt '*' file.contains_regex /etc/crontab ''' path = os.path.expanduser(path) if not os.path.exists(path): return False try: with salt.utils.fopen(path, 'r') as target: for line in target: if lchar: line = line.lstrip(lchar) if re.search(regex, line): return True return False except (IOError, OSError): return False def contains_glob(path, glob_expr): ''' .. deprecated:: 0.17.0 Use :func:`search` instead. Return ``True`` if the given glob matches a string in the named file CLI Example: .. code-block:: bash salt '*' file.contains_glob /etc/foobar '*cheese*' ''' path = os.path.expanduser(path) if not os.path.exists(path): return False try: with salt.utils.filebuffer.BufferedReader(path) as breader: for chunk in breader: if fnmatch.fnmatch(chunk, glob_expr): return True return False except (IOError, OSError): return False def append(path, *args, **kwargs): ''' .. versionadded:: 0.9.5 Append text to the end of a file path path to file `*args` strings to append to file CLI Example: .. code-block:: bash salt '*' file.append /etc/motd \\ "With all thine offerings thou shalt offer salt." \\ "Salt is what makes things taste bad when it isn't in them." .. admonition:: Attention If you need to pass a string to append and that string contains an equal sign, you **must** include the argument name, args. For example: .. code-block:: bash salt '*' file.append /etc/motd args='cheese=spam' salt '*' file.append /etc/motd args="['cheese=spam','spam=cheese']" ''' path = os.path.expanduser(path) # Largely inspired by Fabric's contrib.files.append() if 'args' in kwargs: if isinstance(kwargs['args'], list): args = kwargs['args'] else: args = [kwargs['args']] # Make sure we have a newline at the end of the file. Do this in binary # mode so SEEK_END with nonzero offset will work. with salt.utils.fopen(path, 'rb+') as ofile: linesep = salt.utils.to_bytes(os.linesep) try: ofile.seek(-len(linesep), os.SEEK_END) except IOError as exc: if exc.errno in (errno.EINVAL, errno.ESPIPE): # Empty file, simply append lines at the beginning of the file pass else: raise else: if ofile.read(len(linesep)) != linesep: ofile.seek(0, os.SEEK_END) ofile.write(linesep) # Append lines in text mode with salt.utils.fopen(path, 'a') as ofile: for new_line in args: ofile.write('{0}{1}'.format(new_line, os.linesep)) return 'Wrote {0} lines to "{1}"'.format(len(args), path) def prepend(path, *args, **kwargs): ''' .. versionadded:: 2014.7.0 Prepend text to the beginning of a file path path to file `*args` strings to prepend to the file CLI Example: .. code-block:: bash salt '*' file.prepend /etc/motd \\ "With all thine offerings thou shalt offer salt." \\ "Salt is what makes things taste bad when it isn't in them." .. admonition:: Attention If you need to pass a string to append and that string contains an equal sign, you **must** include the argument name, args. For example: .. code-block:: bash salt '*' file.prepend /etc/motd args='cheese=spam' salt '*' file.prepend /etc/motd args="['cheese=spam','spam=cheese']" ''' path = os.path.expanduser(path) if 'args' in kwargs: if isinstance(kwargs['args'], list): args = kwargs['args'] else: args = [kwargs['args']] try: with salt.utils.fopen(path) as fhr: contents = fhr.readlines() except IOError: contents = [] preface = [] for line in args: preface.append('{0}\n'.format(line)) with salt.utils.fopen(path, "w") as ofile: contents = preface + contents ofile.write(''.join(contents)) return 'Prepended {0} lines to "{1}"'.format(len(args), path) def write(path, *args, **kwargs): ''' .. versionadded:: 2014.7.0 Write text to a file, overwriting any existing contents. path path to file `*args` strings to write to the file CLI Example: .. code-block:: bash salt '*' file.write /etc/motd \\ "With all thine offerings thou shalt offer salt." .. admonition:: Attention If you need to pass a string to append and that string contains an equal sign, you **must** include the argument name, args. For example: .. code-block:: bash salt '*' file.write /etc/motd args='cheese=spam' salt '*' file.write /etc/motd args="['cheese=spam','spam=cheese']" ''' path = os.path.expanduser(path) if 'args' in kwargs: if isinstance(kwargs['args'], list): args = kwargs['args'] else: args = [kwargs['args']] contents = [] for line in args: contents.append('{0}\n'.format(line)) with salt.utils.fopen(path, "w") as ofile: ofile.write(''.join(contents)) return 'Wrote {0} lines to "{1}"'.format(len(contents), path) def touch(name, atime=None, mtime=None): ''' .. versionadded:: 0.9.5 Just like the ``touch`` command, create a file if it doesn't exist or simply update the atime and mtime if it already does. atime: Access time in Unix epoch time mtime: Last modification in Unix epoch time CLI Example: .. code-block:: bash salt '*' file.touch /var/log/emptyfile ''' name = os.path.expanduser(name) if atime and atime.isdigit(): atime = int(atime) if mtime and mtime.isdigit(): mtime = int(mtime) try: if not os.path.exists(name): with salt.utils.fopen(name, 'a') as fhw: fhw.write('') if not atime and not mtime: times = None elif not mtime and atime: times = (atime, time.time()) elif not atime and mtime: times = (time.time(), mtime) else: times = (atime, mtime) os.utime(name, times) except TypeError: raise SaltInvocationError('atime and mtime must be integers') except (IOError, OSError) as exc: raise CommandExecutionError(exc.strerror) return os.path.exists(name) def seek_read(path, size, offset): ''' .. versionadded:: 2014.1.0 Seek to a position on a file and read it path path to file seek amount to read at once offset offset to start into the file CLI Example: .. code-block:: bash salt '*' file.seek_read /path/to/file 4096 0 ''' path = os.path.expanduser(path) try: seek_fh = os.open(path, os.O_RDONLY) os.lseek(seek_fh, int(offset), 0) data = os.read(seek_fh, int(size)) finally: os.close(seek_fh) return data def seek_write(path, data, offset): ''' .. versionadded:: 2014.1.0 Seek to a position on a file and write to it path path to file data data to write to file offset position in file to start writing CLI Example: .. code-block:: bash salt '*' file.seek_write /path/to/file 'some data' 4096 ''' path = os.path.expanduser(path) try: seek_fh = os.open(path, os.O_WRONLY) os.lseek(seek_fh, int(offset), 0) ret = os.write(seek_fh, data) os.fsync(seek_fh) finally: os.close(seek_fh) return ret def truncate(path, length): ''' .. versionadded:: 2014.1.0 Seek to a position on a file and delete everything after that point path path to file length offset into file to truncate CLI Example: .. code-block:: bash salt '*' file.truncate /path/to/file 512 ''' path = os.path.expanduser(path) with salt.utils.fopen(path, 'rb+') as seek_fh: seek_fh.truncate(int(length)) def link(src, path): ''' .. versionadded:: 2014.1.0 Create a hard link to a file CLI Example: .. code-block:: bash salt '*' file.link /path/to/file /path/to/link ''' src = os.path.expanduser(src) if not os.path.isabs(src): raise SaltInvocationError('File path must be absolute.') try: os.link(src, path) return True except (OSError, IOError): raise CommandExecutionError('Could not create \'{0}\''.format(path)) return False def is_link(path): ''' Check if the path is a symbolic link CLI Example: .. code-block:: bash salt '*' file.is_link /path/to/link ''' # This function exists because os.path.islink does not support Windows, # therefore a custom function will need to be called. This function # therefore helps API consistency by providing a single function to call for # both operating systems. return os.path.islink(os.path.expanduser(path)) def symlink(src, path): ''' Create a symbolic link (symlink, soft link) to a file CLI Example: .. code-block:: bash salt '*' file.symlink /path/to/file /path/to/link ''' path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError('File path must be absolute.') try: os.symlink(src, path) return True except (OSError, IOError): raise CommandExecutionError('Could not create \'{0}\''.format(path)) return False def rename(src, dst): ''' Rename a file or directory CLI Example: .. code-block:: bash salt '*' file.rename /path/to/src /path/to/dst ''' src = os.path.expanduser(src) dst = os.path.expanduser(dst) if not os.path.isabs(src): raise SaltInvocationError('File path must be absolute.') try: os.rename(src, dst) return True except OSError: raise CommandExecutionError( 'Could not rename \'{0}\' to \'{1}\''.format(src, dst) ) return False def copy(src, dst, recurse=False, remove_existing=False): ''' Copy a file or directory from source to dst In order to copy a directory, the recurse flag is required, and will by default overwrite files in the destination with the same path, and retain all other existing files. (similar to cp -r on unix) remove_existing will remove all files in the target directory, and then copy files from the source. .. note:: The copy function accepts paths that are local to the Salt minion. This function does not support salt://, http://, or the other additional file paths that are supported by :mod:`states.file.managed <salt.states.file.managed>` and :mod:`states.file.recurse <salt.states.file.recurse>`. CLI Example: .. code-block:: bash salt '*' file.copy /path/to/src /path/to/dst salt '*' file.copy /path/to/src_dir /path/to/dst_dir recurse=True salt '*' file.copy /path/to/src_dir /path/to/dst_dir recurse=True remove_existing=True ''' src = os.path.expanduser(src) dst = os.path.expanduser(dst) if not os.path.isabs(src): raise SaltInvocationError('File path must be absolute.') if not os.path.exists(src): raise CommandExecutionError('No such file or directory \'{0}\''.format(src)) if not salt.utils.is_windows(): pre_user = get_user(src) pre_group = get_group(src) pre_mode = salt.utils.normalize_mode(get_mode(src)) try: if (os.path.exists(dst) and os.path.isdir(dst)) or os.path.isdir(src): if not recurse: raise SaltInvocationError( "Cannot copy overwriting a directory without recurse flag set to true!") if remove_existing: if os.path.exists(dst): shutil.rmtree(dst) shutil.copytree(src, dst) else: salt.utils.files.recursive_copy(src, dst) else: shutil.copyfile(src, dst) except OSError: raise CommandExecutionError( 'Could not copy \'{0}\' to \'{1}\''.format(src, dst) ) if not salt.utils.is_windows(): check_perms(dst, None, pre_user, pre_group, pre_mode) return True def lstat(path): ''' .. versionadded:: 2014.1.0 Returns the lstat attributes for the given file or dir. Does not support symbolic links. CLI Example: .. code-block:: bash salt '*' file.lstat /path/to/file ''' path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError('Path to file must be absolute.') try: lst = os.lstat(path) return dict((key, getattr(lst, key)) for key in ('st_atime', 'st_ctime', 'st_gid', 'st_mode', 'st_mtime', 'st_nlink', 'st_size', 'st_uid')) except Exception: return {} def access(path, mode): ''' .. versionadded:: 2014.1.0 Test whether the Salt process has the specified access to the file. One of the following modes must be specified: .. code-block::text f: Test the existence of the path r: Test the readability of the path w: Test the writability of the path x: Test whether the path can be executed CLI Example: .. code-block:: bash salt '*' file.access /path/to/file f salt '*' file.access /path/to/file x ''' path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError('Path to link must be absolute.') modes = {'f': os.F_OK, 'r': os.R_OK, 'w': os.W_OK, 'x': os.X_OK} if mode in modes: return os.access(path, modes[mode]) elif mode in six.itervalues(modes): return os.access(path, mode) else: raise SaltInvocationError('Invalid mode specified.') def readlink(path, canonicalize=False): ''' .. versionadded:: 2014.1.0 Return the path that a symlink points to If canonicalize is set to True, then it return the final target CLI Example: .. code-block:: bash salt '*' file.readlink /path/to/link ''' path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError('Path to link must be absolute.') if not os.path.islink(path): raise SaltInvocationError('A valid link was not specified.') if canonicalize: return os.path.realpath(path) else: return os.readlink(path) def readdir(path): ''' .. versionadded:: 2014.1.0 Return a list containing the contents of a directory CLI Example: .. code-block:: bash salt '*' file.readdir /path/to/dir/ ''' path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError('Dir path must be absolute.') if not os.path.isdir(path): raise SaltInvocationError('A valid directory was not specified.') dirents = ['.', '..'] dirents.extend(os.listdir(path)) return dirents def statvfs(path): ''' .. versionadded:: 2014.1.0 Perform a statvfs call against the filesystem that the file resides on CLI Example: .. code-block:: bash salt '*' file.statvfs /path/to/file ''' path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError('File path must be absolute.') try: stv = os.statvfs(path) return dict((key, getattr(stv, key)) for key in ('f_bavail', 'f_bfree', 'f_blocks', 'f_bsize', 'f_favail', 'f_ffree', 'f_files', 'f_flag', 'f_frsize', 'f_namemax')) except (OSError, IOError): raise CommandExecutionError('Could not statvfs \'{0}\''.format(path)) return False def stats(path, hash_type=None, follow_symlinks=True): ''' Return a dict containing the stats for a given file CLI Example: .. code-block:: bash salt '*' file.stats /etc/passwd ''' path = os.path.expanduser(path) ret = {} if not os.path.exists(path): try: # Broken symlinks will return False for os.path.exists(), but still # have a uid and gid pstat = os.lstat(path) except OSError: # Not a broken symlink, just a nonexistent path return ret else: if follow_symlinks: pstat = os.stat(path) else: pstat = os.lstat(path) ret['inode'] = pstat.st_ino ret['uid'] = pstat.st_uid ret['gid'] = pstat.st_gid ret['group'] = gid_to_group(pstat.st_gid) ret['user'] = uid_to_user(pstat.st_uid) ret['atime'] = pstat.st_atime ret['mtime'] = pstat.st_mtime ret['ctime'] = pstat.st_ctime ret['size'] = pstat.st_size ret['mode'] = str(oct(stat.S_IMODE(pstat.st_mode))) if hash_type: ret['sum'] = get_hash(path, hash_type) ret['type'] = 'file' if stat.S_ISDIR(pstat.st_mode): ret['type'] = 'dir' if stat.S_ISCHR(pstat.st_mode): ret['type'] = 'char' if stat.S_ISBLK(pstat.st_mode): ret['type'] = 'block' if stat.S_ISREG(pstat.st_mode): ret['type'] = 'file' if stat.S_ISLNK(pstat.st_mode): ret['type'] = 'link' if stat.S_ISFIFO(pstat.st_mode): ret['type'] = 'pipe' if stat.S_ISSOCK(pstat.st_mode): ret['type'] = 'socket' ret['target'] = os.path.realpath(path) return ret def rmdir(path): ''' .. versionadded:: 2014.1.0 Remove the specified directory. Fails if a directory is not empty. CLI Example: .. code-block:: bash salt '*' file.rmdir /tmp/foo/ ''' path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError('File path must be absolute.') if not os.path.isdir(path): raise SaltInvocationError('A valid directory was not specified.') try: os.rmdir(path) return True except OSError as exc: return exc.strerror def remove(path): ''' Remove the named file. If a directory is supplied, it will be recursively deleted. CLI Example: .. code-block:: bash salt '*' file.remove /tmp/foo ''' path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError('File path must be absolute: {0}'.format(path)) try: if os.path.isfile(path) or os.path.islink(path): os.remove(path) return True elif os.path.isdir(path): shutil.rmtree(path) return True except (OSError, IOError) as exc: raise CommandExecutionError( 'Could not remove \'{0}\': {1}'.format(path, exc) ) return False def directory_exists(path): ''' Tests to see if path is a valid directory. Returns True/False. CLI Example: .. code-block:: bash salt '*' file.directory_exists /etc ''' return os.path.isdir(os.path.expanduser(path)) def file_exists(path): ''' Tests to see if path is a valid file. Returns True/False. CLI Example: .. code-block:: bash salt '*' file.file_exists /etc/passwd ''' return os.path.isfile(os.path.expanduser(path)) def path_exists_glob(path): ''' Tests to see if path after expansion is a valid path (file or directory). Expansion allows usage of ? * and character ranges []. Tilde expansion is not supported. Returns True/False. .. versionadded:: Hellium CLI Example: .. code-block:: bash salt '*' file.path_exists_glob /etc/pam*/pass* ''' return True if glob.glob(os.path.expanduser(path)) else False def restorecon(path, recursive=False): ''' Reset the SELinux context on a given path CLI Example: .. code-block:: bash salt '*' file.restorecon /home/user/.ssh/authorized_keys ''' if recursive: cmd = ['restorecon', '-FR', path] else: cmd = ['restorecon', '-F', path] return not __salt__['cmd.retcode'](cmd, python_shell=False) def get_selinux_context(path): ''' Get an SELinux context from a given path CLI Example: .. code-block:: bash salt '*' file.get_selinux_context /etc/hosts ''' out = __salt__['cmd.run'](['ls', '-Z', path], python_shell=False) try: ret = re.search(r'\w+:\w+:\w+:\w+', out).group(0) except AttributeError: ret = ( 'No selinux context information is available for {0}'.format(path) ) return ret def set_selinux_context(path, user=None, role=None, type=None, # pylint: disable=W0622 range=None): # pylint: disable=W0622 ''' Set a specific SELinux label on a given path CLI Example: .. code-block:: bash salt '*' file.set_selinux_context path <role> <type> <range> ''' if not any((user, role, type, range)): return False cmd = ['chcon'] if user: cmd.extend(['-u', user]) if role: cmd.extend(['-r', role]) if type: cmd.extend(['-t', type]) if range: cmd.extend(['-l', range]) cmd.append(path) ret = not __salt__['cmd.retcode'](cmd, python_shell=False) if ret: return get_selinux_context(path) else: return ret def source_list(source, source_hash, saltenv): ''' Check the source list and return the source to use CLI Example: .. code-block:: bash salt '*' file.source_list salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' base ''' contextkey = '{0}_|-{1}_|-{2}'.format(source, source_hash, saltenv) if contextkey in __context__: return __context__[contextkey] # get the master file list if isinstance(source, list): mfiles = [(f, saltenv) for f in __salt__['cp.list_master'](saltenv)] mdirs = [(d, saltenv) for d in __salt__['cp.list_master_dirs'](saltenv)] for single in source: if isinstance(single, dict): single = next(iter(single)) path, senv = salt.utils.url.parse(single) if senv: mfiles += [(f, senv) for f in __salt__['cp.list_master'](senv)] mdirs += [(d, senv) for d in __salt__['cp.list_master_dirs'](senv)] ret = None for single in source: if isinstance(single, dict): # check the proto, if it is http or ftp then download the file # to check, if it is salt then check the master list # if it is a local file, check if the file exists if len(single) != 1: continue single_src = next(iter(single)) single_hash = single[single_src] if single[single_src] else source_hash urlparsed_single_src = _urlparse(single_src) proto = urlparsed_single_src.scheme if proto == 'salt': path, senv = salt.utils.url.parse(single_src) if not senv: senv = saltenv if (path, saltenv) in mfiles or (path, saltenv) in mdirs: ret = (single_src, single_hash) break elif proto.startswith('http') or proto == 'ftp': try: if __salt__['cp.cache_file'](single_src): ret = (single_src, single_hash) break except MinionError as exc: # Error downloading file. Log the caught exception and # continue on to the next source. log.exception(exc) elif proto == 'file' and os.path.exists(urlparsed_single_src.path): ret = (single_src, single_hash) break elif single_src.startswith('/') and os.path.exists(single_src): ret = (single_src, single_hash) break elif isinstance(single, six.string_types): path, senv = salt.utils.url.parse(single) if not senv: senv = saltenv if (path, senv) in mfiles or (path, senv) in mdirs: ret = (single, source_hash) break urlparsed_src = _urlparse(single) proto = urlparsed_src.scheme if proto == 'file' and os.path.exists(urlparsed_src.path): ret = (single, source_hash) break elif proto.startswith('http') or proto == 'ftp': if __salt__['cp.cache_file'](single): ret = (single, source_hash) break elif single.startswith('/') and os.path.exists(single): ret = (single, source_hash) break if ret is None: # None of the list items matched raise CommandExecutionError( 'none of the specified sources were found' ) else: ret = (source, source_hash) __context__[contextkey] = ret return ret def apply_template_on_contents( contents, template, context, defaults, saltenv): ''' Return the contents after applying the templating engine contents template string template template format context Overrides default context variables passed to the template. defaults Default context passed to the template. CLI Example: .. code-block:: bash salt '*' file.apply_template_on_contents \\ contents='This is a {{ template }} string.' \\ template=jinja \\ "context={}" "defaults={'template': 'cool'}" \\ saltenv=base ''' if template in salt.utils.templates.TEMPLATE_REGISTRY: context_dict = defaults if defaults else {} if context: context_dict.update(context) # Apply templating contents = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, context=context_dict, saltenv=saltenv, grains=__opts__['grains'], pillar=__pillar__, salt=__salt__, opts=__opts__)['data'] if six.PY2: contents = contents.encode('utf-8') else: ret = {} ret['result'] = False ret['comment'] = ('Specified template format {0} is not supported' ).format(template) return ret return contents def get_managed( name, template, source, source_hash, source_hash_name, user, group, mode, saltenv, context, defaults, skip_verify=False, **kwargs): ''' Return the managed file data for file.managed name location where the file lives on the server template template format source managed source file source_hash hash of the source file source_hash_name When ``source_hash`` refers to a remote file, this specifies the filename to look for in that file. .. versionadded:: 2016.3.5 user Owner of file group Group owner of file mode Permissions of file context Variables to add to the template context defaults Default values of for context_dict skip_verify If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped, and the ``source_hash`` argument will be ignored. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' file.get_managed /etc/httpd/conf.d/httpd.conf jinja salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' None root root '755' base None None ''' # Copy the file to the minion and templatize it sfn = '' source_sum = {} def _get_local_file_source_sum(path): ''' DRY helper for getting the source_sum value from a locally cached path. ''' return {'hsum': get_hash(path, form='sha256'), 'hash_type': 'sha256'} # If we have a source defined, let's figure out what the hash is if source: urlparsed_source = _urlparse(source) parsed_scheme = urlparsed_source.scheme parsed_path = os.path.join( urlparsed_source.netloc, urlparsed_source.path).rstrip(os.sep) if parsed_scheme and parsed_scheme.lower() in 'abcdefghijklmnopqrstuvwxyz': parsed_path = ':'.join([parsed_scheme, parsed_path]) parsed_scheme = 'file' if parsed_scheme == 'salt': source_sum = __salt__['cp.hash_file'](source, saltenv) if not source_sum: return '', {}, 'Source file {0} not found'.format(source) elif not source_hash and parsed_scheme == 'file': source_sum = _get_local_file_source_sum(parsed_path) elif not source_hash and source.startswith(os.sep): source_sum = _get_local_file_source_sum(source) else: if not skip_verify: if source_hash: try: source_sum = get_source_sum(name, source, source_hash, source_hash_name, saltenv) except CommandExecutionError as exc: return '', {}, exc.strerror else: msg = ( 'Unable to verify upstream hash of source file {0}, ' 'please set source_hash or set skip_verify to True' .format(source) ) return '', {}, msg if source and (template or parsed_scheme in salt.utils.files.REMOTE_PROTOS): # Check if we have the template or remote file cached cache_refetch = False cached_dest = __salt__['cp.is_cached'](source, saltenv) if cached_dest and (source_hash or skip_verify): htype = source_sum.get('hash_type', 'sha256') cached_sum = get_hash(cached_dest, form=htype) if skip_verify: # prev: if skip_verify or cached_sum == source_sum['hsum']: # but `cached_sum == source_sum['hsum']` is elliptical as prev if sfn = cached_dest source_sum = {'hsum': cached_sum, 'hash_type': htype} elif cached_sum != source_sum.get('hsum', __opts__['hash_type']): cache_refetch = True # If we didn't have the template or remote file, let's get it # Similarly when the file has been updated and the cache has to be refreshed if not sfn or cache_refetch: try: sfn = __salt__['cp.cache_file'](source, saltenv) except Exception as exc: # A 404 or other error code may raise an exception, catch it # and return a comment that will fail the calling state. return '', {}, 'Failed to cache {0}: {1}'.format(source, exc) # If cache failed, sfn will be False, so do a truth check on sfn first # as invoking os.path.exists() on a bool raises a TypeError. if not sfn or not os.path.exists(sfn): return sfn, {}, 'Source file \'{0}\' not found'.format(source) if sfn == name: raise SaltInvocationError( 'Source file cannot be the same as destination' ) if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: context_dict = defaults if defaults else {} if context: context_dict.update(context) data = salt.utils.templates.TEMPLATE_REGISTRY[template]( sfn, name=name, source=source, user=user, group=group, mode=mode, saltenv=saltenv, context=context_dict, salt=__salt__, pillar=__pillar__, grains=__opts__['grains'], opts=__opts__, **kwargs) else: return sfn, {}, ('Specified template format {0} is not supported' ).format(template) if data['result']: sfn = data['data'] hsum = get_hash(sfn, form='sha256') source_sum = {'hash_type': 'sha256', 'hsum': hsum} else: __clean_tmp(sfn) return sfn, {}, data['data'] return sfn, source_sum, '' def extract_hash(hash_fn, hash_type='sha256', file_name='', source='', source_hash_name=None): ''' .. versionchanged:: 2016.3.5 Prior to this version, only the ``file_name`` argument was considered for filename matches in the hash file. This would be problematic for cases in which the user was relying on a remote checksum file that they do not control, and they wished to use a different name for that file on the minion from the filename on the remote server (and in the checksum file). For example, managing ``/tmp/myfile.tar.gz`` when the remote file was at ``https://mydomain.tld/different_name.tar.gz``. The :py:func:`file.managed <salt.states.file.managed>` state now also passes this function the source URI as well as the ``source_hash_name`` (if specified). In cases where ``source_hash_name`` is specified, it takes precedence over both the ``file_name`` and ``source``. When it is not specified, ``file_name`` takes precedence over ``source``. This allows for better capability for matching hashes. .. versionchanged:: 2016.11.0 File name and source URI matches are no longer disregarded when ``source_hash_name`` is specified. They will be used as fallback matches if there is no match to the ``source_hash_name`` value. This routine is called from the :mod:`file.managed <salt.states.file.managed>` state to pull a hash from a remote file. Regular expressions are used line by line on the ``source_hash`` file, to find a potential candidate of the indicated hash type. This avoids many problems of arbitrary file layout rules. It specifically permits pulling hash codes from debian ``*.dsc`` files. If no exact match of a hash and filename are found, then the first hash found (if any) will be returned. If no hashes at all are found, then ``None`` will be returned. For example: .. code-block:: yaml openerp_7.0-latest-1.tar.gz: file.managed: - name: /tmp/openerp_7.0-20121227-075624-1_all.deb - source: http://nightly.openerp.com/7.0/nightly/deb/openerp_7.0-20121227-075624-1.tar.gz - source_hash: http://nightly.openerp.com/7.0/nightly/deb/openerp_7.0-20121227-075624-1.dsc CLI Example: .. code-block:: bash salt '*' file.extract_hash /path/to/hash/file sha512 /etc/foo ''' hash_len = HASHES.get(hash_type) if hash_len is None: if hash_type: log.warning( 'file.extract_hash: Unsupported hash_type \'%s\', falling ' 'back to matching any supported hash_type', hash_type ) hash_type = '' hash_len_expr = '{0},{1}'.format(min(HASHES_REVMAP), max(HASHES_REVMAP)) else: hash_len_expr = str(hash_len) filename_separators = string.whitespace + r'\/' if source_hash_name: if not isinstance(source_hash_name, six.string_types): source_hash_name = str(source_hash_name) source_hash_name_idx = (len(source_hash_name) + 1) * -1 log.debug( 'file.extract_hash: Extracting %s hash for file matching ' 'source_hash_name \'%s\'', 'any supported' if not hash_type else hash_type, source_hash_name ) if file_name: if not isinstance(file_name, six.string_types): file_name = str(file_name) file_name_basename = os.path.basename(file_name) file_name_idx = (len(file_name_basename) + 1) * -1 if source: if not isinstance(source, six.string_types): source = str(source) urlparsed_source = _urlparse(source) source_basename = os.path.basename( urlparsed_source.path or urlparsed_source.netloc ) source_idx = (len(source_basename) + 1) * -1 basename_searches = [x for x in (file_name, source) if x] if basename_searches: log.debug( 'file.extract_hash: %s %s hash for file matching%s: %s', 'If no source_hash_name match found, will extract' if source_hash_name else 'Extracting', 'any supported' if not hash_type else hash_type, '' if len(basename_searches) == 1 else ' either of the following', ', '.join(basename_searches) ) partial = None found = {} with salt.utils.fopen(hash_fn, 'r') as fp_: for line in fp_: line = line.strip() hash_re = r'(?i)(?<![a-z0-9])([a-f0-9]{' + hash_len_expr + '})(?![a-z0-9])' hash_match = re.search(hash_re, line) matched = None if hash_match: matched_hsum = hash_match.group(1) if matched_hsum is not None: matched_type = HASHES_REVMAP.get(len(matched_hsum)) if matched_type is None: # There was a match, but it's not of the correct length # to match one of the supported hash types. matched = None else: matched = {'hsum': matched_hsum, 'hash_type': matched_type} if matched is None: log.debug( 'file.extract_hash: In line \'%s\', no %shash found', line, '' if not hash_type else hash_type + ' ' ) continue if partial is None: partial = matched def _add_to_matches(found, line, match_type, value, matched): log.debug( 'file.extract_hash: Line \'%s\' matches %s \'%s\'', line, match_type, value ) found.setdefault(match_type, []).append(matched) hash_matched = False if source_hash_name: if line.endswith(source_hash_name): # Checking the character before where the basename # should start for either whitespace or a path # separator. We can't just rsplit on spaces/whitespace, # because the filename may contain spaces. try: if line[source_hash_name_idx] in string.whitespace: _add_to_matches(found, line, 'source_hash_name', source_hash_name, matched) hash_matched = True except IndexError: pass elif re.match(source_hash_name.replace('.', r'\.') + r'\s+', line): _add_to_matches(found, line, 'source_hash_name', source_hash_name, matched) hash_matched = True if file_name: if line.endswith(file_name_basename): # Checking the character before where the basename # should start for either whitespace or a path # separator. We can't just rsplit on spaces/whitespace, # because the filename may contain spaces. try: if line[file_name_idx] in filename_separators: _add_to_matches(found, line, 'file_name', file_name, matched) hash_matched = True except IndexError: pass elif re.match(file_name.replace('.', r'\.') + r'\s+', line): _add_to_matches(found, line, 'file_name', file_name, matched) hash_matched = True if source: if line.endswith(source_basename): # Same as above, we can't just do an rsplit here. try: if line[source_idx] in filename_separators: _add_to_matches(found, line, 'source', source, matched) hash_matched = True except IndexError: pass elif re.match(source.replace('.', r'\.') + r'\s+', line): _add_to_matches(found, line, 'source', source, matched) hash_matched = True if not hash_matched: log.debug( 'file.extract_hash: Line \'%s\' contains %s hash ' '\'%s\', but line did not meet the search criteria', line, matched['hash_type'], matched['hsum'] ) for found_type, found_str in (('source_hash_name', source_hash_name), ('file_name', file_name), ('source', source)): if found_type in found: if len(found[found_type]) > 1: log.debug( 'file.extract_hash: Multiple %s matches for %s: %s', found_type, found_str, ', '.join( ['{0} ({1})'.format(x['hsum'], x['hash_type']) for x in found[found_type]] ) ) ret = found[found_type][0] log.debug( 'file.extract_hash: Returning %s hash \'%s\' as a match of %s', ret['hash_type'], ret['hsum'], found_str ) return ret if partial: log.debug( 'file.extract_hash: Returning the partially identified %s hash ' '\'%s\'', partial['hash_type'], partial['hsum'] ) return partial log.debug('file.extract_hash: No matches, returning None') return None def check_perms(name, ret, user, group, mode, follow_symlinks=False): ''' Check the permissions on files and chown if needed CLI Example: .. code-block:: bash salt '*' file.check_perms /etc/sudoers '{}' root root 400 .. versionchanged:: 2014.1.3 ``follow_symlinks`` option added ''' name = os.path.expanduser(name) if not ret: ret = {'name': name, 'changes': {}, 'comment': [], 'result': True} orig_comment = '' else: orig_comment = ret['comment'] ret['comment'] = [] # Check permissions perms = {} cur = stats(name, follow_symlinks=follow_symlinks) if not cur: # NOTE: The file.directory state checks the content of the error # message in this exception. Any changes made to the message for this # exception will reflect the file.directory state as well, and will # likely require changes there. raise CommandExecutionError('{0} does not exist'.format(name)) perms['luser'] = cur['user'] perms['lgroup'] = cur['group'] perms['lmode'] = salt.utils.normalize_mode(cur['mode']) # Mode changes if needed if mode is not None: # File is a symlink, ignore the mode setting # if follow_symlinks is False if os.path.islink(name) and not follow_symlinks: pass else: mode = salt.utils.normalize_mode(mode) if mode != perms['lmode']: if __opts__['test'] is True: ret['changes']['mode'] = mode else: set_mode(name, mode) if mode != salt.utils.normalize_mode(get_mode(name)): ret['result'] = False ret['comment'].append( 'Failed to change mode to {0}'.format(mode) ) else: ret['changes']['mode'] = mode # user/group changes if needed, then check if it worked if user: if isinstance(user, int): user = uid_to_user(user) if (salt.utils.is_windows() and user_to_uid(user) != user_to_uid(perms['luser']) ) or ( not salt.utils.is_windows() and user != perms['luser'] ): perms['cuser'] = user if group: if isinstance(group, int): group = gid_to_group(group) if (salt.utils.is_windows() and group_to_gid(group) != group_to_gid(perms['lgroup']) ) or ( not salt.utils.is_windows() and group != perms['lgroup'] ): perms['cgroup'] = group if 'cuser' in perms or 'cgroup' in perms: if not __opts__['test']: if os.path.islink(name) and not follow_symlinks: chown_func = lchown else: chown_func = chown if user is None: user = perms['luser'] if group is None: group = perms['lgroup'] try: chown_func(name, user, group) except OSError: ret['result'] = False if user: if isinstance(user, int): user = uid_to_user(user) if (salt.utils.is_windows() and user_to_uid(user) != user_to_uid( get_user(name, follow_symlinks=follow_symlinks)) and user != '' ) or ( not salt.utils.is_windows() and user != get_user(name, follow_symlinks=follow_symlinks) and user != '' ): if __opts__['test'] is True: ret['changes']['user'] = user else: ret['result'] = False ret['comment'].append('Failed to change user to {0}' .format(user)) elif 'cuser' in perms and user != '': ret['changes']['user'] = user if group: if isinstance(group, int): group = gid_to_group(group) if (salt.utils.is_windows() and group_to_gid(group) != group_to_gid( get_group(name, follow_symlinks=follow_symlinks)) and user != '') or ( not salt.utils.is_windows() and group != get_group(name, follow_symlinks=follow_symlinks) and user != '' ): if __opts__['test'] is True: ret['changes']['group'] = group else: ret['result'] = False ret['comment'].append('Failed to change group to {0}' .format(group)) elif 'cgroup' in perms and user != '': ret['changes']['group'] = group if isinstance(orig_comment, six.string_types): if orig_comment: ret['comment'].insert(0, orig_comment) ret['comment'] = '; '.join(ret['comment']) if __opts__['test'] is True and ret['changes']: ret['result'] = None return ret, perms def check_managed( name, source, source_hash, source_hash_name, user, group, mode, template, context, defaults, saltenv, contents=None, skip_verify=False, **kwargs): ''' Check to see what changes need to be made for a file CLI Example: .. code-block:: bash salt '*' file.check_managed /etc/httpd/conf.d/httpd.conf salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root, root, '755' jinja True None None base ''' # If the source is a list then find which file exists source, source_hash = source_list(source, # pylint: disable=W0633 source_hash, saltenv) sfn = '' source_sum = None if contents is None: # Gather the source file from the server sfn, source_sum, comments = get_managed( name, template, source, source_hash, source_hash_name, user, group, mode, saltenv, context, defaults, skip_verify, **kwargs) if comments: __clean_tmp(sfn) return False, comments changes = check_file_meta(name, sfn, source, source_sum, user, group, mode, saltenv, contents) # Ignore permission for files written temporary directories # Files in any path will still be set correctly using get_managed() if name.startswith(tempfile.gettempdir()): for key in ['user', 'group', 'mode']: changes.pop(key, None) __clean_tmp(sfn) if changes: log.info(changes) comments = ['The following values are set to be changed:\n'] comments.extend('{0}: {1}\n'.format(key, val) for key, val in six.iteritems(changes)) return None, ''.join(comments) return True, 'The file {0} is in the correct state'.format(name) def check_managed_changes( name, source, source_hash, source_hash_name, user, group, mode, template, context, defaults, saltenv, contents=None, skip_verify=False, keep_mode=False, **kwargs): ''' Return a dictionary of what changes need to be made for a file CLI Example: .. code-block:: bash salt '*' file.check_managed_changes /etc/httpd/conf.d/httpd.conf salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root, root, '755' jinja True None None base ''' # If the source is a list then find which file exists source, source_hash = source_list(source, # pylint: disable=W0633 source_hash, saltenv) sfn = '' source_sum = None if contents is None: # Gather the source file from the server sfn, source_sum, comments = get_managed( name, template, source, source_hash, source_hash_name, user, group, mode, saltenv, context, defaults, skip_verify, **kwargs) if comments: __clean_tmp(sfn) return False, comments if sfn and source and keep_mode: if _urlparse(source).scheme in ('salt', 'file') \ or source.startswith('/'): try: mode = __salt__['cp.stat_file'](source, saltenv=saltenv, octal=True) except Exception as exc: log.warning('Unable to stat %s: %s', sfn, exc) changes = check_file_meta(name, sfn, source, source_sum, user, group, mode, saltenv, contents) __clean_tmp(sfn) return changes def check_file_meta( name, sfn, source, source_sum, user, group, mode, saltenv, contents=None): ''' Check for the changes in the file metadata. CLI Example: .. code-block:: bash salt '*' file.check_file_meta /etc/httpd/conf.d/httpd.conf salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root, root, '755' base .. note:: Supported hash types include sha512, sha384, sha256, sha224, sha1, and md5. name Path to file destination sfn Template-processed source file contents source URL to file source source_sum File checksum information as a dictionary .. code-block:: yaml {hash_type: md5, hsum: <md5sum>} user Destination file user owner group Destination file group owner mode Destination file permissions mode saltenv Salt environment used to resolve source files contents File contents ''' changes = {} if not source_sum: source_sum = dict() lstats = stats(name, hash_type=source_sum.get('hash_type', None), follow_symlinks=False) if not lstats: changes['newfile'] = name return changes if 'hsum' in source_sum: if source_sum['hsum'] != lstats['sum']: if not sfn and source: sfn = __salt__['cp.cache_file'](source, saltenv) if sfn: if __salt__['config.option']('obfuscate_templates'): changes['diff'] = '<Obfuscated Template>' else: # Check to see if the files are bins bdiff = _binary_replace(name, sfn) if bdiff: changes['diff'] = bdiff else: with salt.utils.fopen(sfn, 'r') as src: slines = src.readlines() with salt.utils.fopen(name, 'r') as name_: nlines = name_.readlines() changes['diff'] = \ ''.join(difflib.unified_diff(nlines, slines)) else: changes['sum'] = 'Checksum differs' if contents is not None: # Write a tempfile with the static contents tmp = salt.utils.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX, text=True) if salt.utils.is_windows(): contents = os.linesep.join( _splitlines_preserving_trailing_newline(contents)) with salt.utils.fopen(tmp, 'w') as tmp_: tmp_.write(str(contents)) # Compare the static contents with the named file with salt.utils.fopen(tmp, 'r') as src: slines = src.readlines() with salt.utils.fopen(name, 'r') as name_: nlines = name_.readlines() __clean_tmp(tmp) if ''.join(nlines) != ''.join(slines): if __salt__['config.option']('obfuscate_templates'): changes['diff'] = '<Obfuscated Template>' else: if salt.utils.istextfile(name): changes['diff'] = \ ''.join(difflib.unified_diff(nlines, slines)) else: changes['diff'] = 'Replace binary file with text file' if (user is not None and user != lstats['user'] and user != lstats['uid']): changes['user'] = user if (group is not None and group != lstats['group'] and group != lstats['gid']): changes['group'] = group # Normalize the file mode smode = salt.utils.normalize_mode(lstats['mode']) mode = salt.utils.normalize_mode(mode) if mode is not None and mode != smode: changes['mode'] = mode return changes def get_diff( minionfile, masterfile, saltenv='base'): ''' Return unified diff of file compared to file on master CLI Example: .. code-block:: bash salt '*' file.get_diff /home/fred/.vimrc salt://users/fred/.vimrc ''' minionfile = os.path.expanduser(minionfile) ret = '' if not os.path.exists(minionfile): ret = 'File {0} does not exist on the minion'.format(minionfile) return ret sfn = __salt__['cp.cache_file'](masterfile, saltenv) if sfn: with salt.utils.fopen(sfn, 'r') as src: slines = src.readlines() with salt.utils.fopen(minionfile, 'r') as name_: nlines = name_.readlines() if ''.join(nlines) != ''.join(slines): bdiff = _binary_replace(minionfile, sfn) if bdiff: ret += bdiff else: ret += ''.join(difflib.unified_diff(nlines, slines, minionfile, masterfile)) else: ret = 'Failed to copy file from master' return ret def manage_file(name, sfn, ret, source, source_sum, user, group, mode, saltenv, backup, makedirs=False, template=None, # pylint: disable=W0613 show_changes=True, contents=None, dir_mode=None, follow_symlinks=True, skip_verify=False, keep_mode=False, **kwargs): ''' Checks the destination against what was retrieved with get_managed and makes the appropriate modifications (if necessary). name location to place the file sfn location of cached file on the minion This is the path to the file stored on the minion. This file is placed on the minion using cp.cache_file. If the hash sum of that file matches the source_sum, we do not transfer the file to the minion again. This file is then grabbed and if it has template set, it renders the file to be placed into the correct place on the system using salt.files.utils.copyfile() ret The initial state return data structure. Pass in ``None`` to use the default structure. source file reference on the master source_hash sum hash for source user user owner group group owner backup backup_mode makedirs make directories if they do not exist template format of templating show_changes Include diff in state return contents: contents to be placed in the file dir_mode mode for directories created with makedirs skip_verify : False If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped, and the ``source_hash`` argument will be ignored. .. versionadded:: 2016.3.0 keep_mode : False If ``True``, and the ``source`` is a file from the Salt fileserver (or a local file on the minion), the mode of the destination file will be set to the mode of the source file. .. note:: keep_mode does not work with salt-ssh. As a consequence of how the files are transfered to the minion, and the inability to connect back to the master with salt-ssh, salt is unable to stat the file as it exists on the fileserver and thus cannot mirror the mode on the salt-ssh minion CLI Example: .. code-block:: bash salt '*' file.manage_file /etc/httpd/conf.d/httpd.conf '' '{}' salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root root '755' base '' .. versionchanged:: 2014.7.0 ``follow_symlinks`` option added ''' name = os.path.expanduser(name) if not ret: ret = {'name': name, 'changes': {}, 'comment': '', 'result': True} if source and not sfn: # File is not present, cache it sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: return _error( ret, 'Source file \'{0}\' not found'.format(source)) htype = source_sum.get('hash_type', __opts__['hash_type']) # Recalculate source sum now that file has been cached source_sum = { 'hash_type': htype, 'hsum': get_hash(sfn, form=htype) } if keep_mode: if _urlparse(source).scheme in ('salt', 'file') \ or source.startswith('/'): try: mode = __salt__['cp.stat_file'](source, saltenv=saltenv, octal=True) except Exception as exc: log.warning('Unable to stat %s: %s', sfn, exc) # Check changes if the target file exists if os.path.isfile(name) or os.path.islink(name): if os.path.islink(name) and follow_symlinks: real_name = os.path.realpath(name) else: real_name = name # Only test the checksums on files with managed contents if source and not (not follow_symlinks and os.path.islink(real_name)): name_sum = get_hash(real_name, source_sum.get('hash_type', __opts__['hash_type'])) else: name_sum = None # Check if file needs to be replaced if source and (name_sum is None or source_sum.get('hsum', __opts__['hash_type']) != name_sum): if not sfn: sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: return _error( ret, 'Source file \'{0}\' not found'.format(source)) # If the downloaded file came from a non salt server or local # source, and we are not skipping checksum verification, then # verify that it matches the specified checksum. if not skip_verify \ and _urlparse(source).scheme not in ('salt', ''): dl_sum = get_hash(sfn, source_sum['hash_type']) if dl_sum != source_sum['hsum']: ret['comment'] = ( 'Specified {0} checksum for {1} ({2}) does not match ' 'actual checksum ({3}). If the \'source_hash\' value ' 'refers to a remote file with multiple possible ' 'matches, then it may be necessary to set ' '\'source_hash_name\'.'.format( source_sum['hash_type'], source, source_sum['hsum'], dl_sum ) ) ret['result'] = False return ret # Print a diff equivalent to diff -u old new if __salt__['config.option']('obfuscate_templates'): ret['changes']['diff'] = '<Obfuscated Template>' elif not show_changes: ret['changes']['diff'] = '<show_changes=False>' else: # Check to see if the files are bins bdiff = _binary_replace(real_name, sfn) if bdiff: ret['changes']['diff'] = bdiff else: with salt.utils.fopen(sfn, 'r') as src: slines = src.readlines() with salt.utils.fopen(real_name, 'r') as name_: nlines = name_.readlines() sndiff = ''.join(difflib.unified_diff(nlines, slines)) if sndiff: ret['changes']['diff'] = sndiff # Pre requisites are met, and the file needs to be replaced, do it try: salt.utils.files.copyfile(sfn, real_name, __salt__['config.backup_mode'](backup), __opts__['cachedir']) except IOError as io_error: __clean_tmp(sfn) return _error( ret, 'Failed to commit change: {0}'.format(io_error)) if contents is not None: # Write the static contents to a temporary file tmp = salt.utils.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX, text=True) if salt.utils.is_windows(): contents = os.linesep.join( _splitlines_preserving_trailing_newline(contents)) with salt.utils.fopen(tmp, 'w') as tmp_: tmp_.write(str(contents)) # Compare contents of files to know if we need to replace with salt.utils.fopen(tmp, 'r') as src: slines = src.readlines() with salt.utils.fopen(real_name, 'r') as name_: nlines = name_.readlines() different = ''.join(slines) != ''.join(nlines) if different: if __salt__['config.option']('obfuscate_templates'): ret['changes']['diff'] = '<Obfuscated Template>' elif not show_changes: ret['changes']['diff'] = '<show_changes=False>' else: if salt.utils.istextfile(real_name): ret['changes']['diff'] = \ ''.join(difflib.unified_diff(nlines, slines)) else: ret['changes']['diff'] = \ 'Replace binary file with text file' # Pre requisites are met, the file needs to be replaced, do it try: salt.utils.files.copyfile(tmp, real_name, __salt__['config.backup_mode'](backup), __opts__['cachedir']) except IOError as io_error: __clean_tmp(tmp) return _error( ret, 'Failed to commit change: {0}'.format(io_error)) __clean_tmp(tmp) # Check for changing symlink to regular file here if os.path.islink(name) and not follow_symlinks: if not sfn: sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: return _error( ret, 'Source file \'{0}\' not found'.format(source)) # If the downloaded file came from a non salt server source verify # that it matches the intended sum value if not skip_verify and _urlparse(source).scheme != 'salt': dl_sum = get_hash(sfn, source_sum['hash_type']) if dl_sum != source_sum['hsum']: ret['comment'] = ( 'Specified {0} checksum for {1} ({2}) does not match ' 'actual checksum ({3})'.format( source_sum['hash_type'], name, source_sum['hsum'], dl_sum ) ) ret['result'] = False return ret try: salt.utils.files.copyfile(sfn, name, __salt__['config.backup_mode'](backup), __opts__['cachedir']) except IOError as io_error: __clean_tmp(sfn) return _error( ret, 'Failed to commit change: {0}'.format(io_error)) ret['changes']['diff'] = \ 'Replace symbolic link with regular file' ret, _ = check_perms(name, ret, user, group, mode, follow_symlinks) if ret['changes']: ret['comment'] = 'File {0} updated'.format(name) elif not ret['changes'] and ret['result']: ret['comment'] = u'File {0} is in the correct state'.format( salt.utils.locales.sdecode(name) ) if sfn: __clean_tmp(sfn) return ret else: # target file does not exist contain_dir = os.path.dirname(name) def _set_mode_and_make_dirs(name, dir_mode, mode, user, group): # check for existence of windows drive letter if salt.utils.is_windows(): drive, _ = os.path.splitdrive(name) if drive and not os.path.exists(drive): __clean_tmp(sfn) return _error(ret, '{0} drive not present'.format(drive)) if dir_mode is None and mode is not None: # Add execute bit to each nonzero digit in the mode, if # dir_mode was not specified. Otherwise, any # directories created with makedirs_() below can't be # listed via a shell. mode_list = [x for x in str(mode)][-3:] for idx in range(len(mode_list)): if mode_list[idx] != '0': mode_list[idx] = str(int(mode_list[idx]) | 1) dir_mode = ''.join(mode_list) makedirs_(name, user=user, group=group, mode=dir_mode) if source: # It is a new file, set the diff accordingly ret['changes']['diff'] = 'New file' # Apply the new file if not sfn: sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: return _error( ret, 'Source file \'{0}\' not found'.format(source)) # If the downloaded file came from a non salt server source verify # that it matches the intended sum value if not skip_verify \ and _urlparse(source).scheme != 'salt': dl_sum = get_hash(sfn, source_sum['hash_type']) if dl_sum != source_sum['hsum']: ret['comment'] = ( 'Specified {0} checksum for {1} ({2}) does not match ' 'actual checksum ({3})'.format( source_sum['hash_type'], name, source_sum['hsum'], dl_sum ) ) ret['result'] = False return ret if not os.path.isdir(contain_dir): if makedirs: _set_mode_and_make_dirs(name, dir_mode, mode, user, group) else: __clean_tmp(sfn) # No changes actually made ret['changes'].pop('diff', None) return _error(ret, 'Parent directory not present') else: # source != True if not os.path.isdir(contain_dir): if makedirs: _set_mode_and_make_dirs(name, dir_mode, mode, user, group) else: __clean_tmp(sfn) # No changes actually made ret['changes'].pop('diff', None) return _error(ret, 'Parent directory not present') # Create the file, user rw-only if mode will be set to prevent # a small security race problem before the permissions are set if mode: current_umask = os.umask(0o77) # Create a new file when test is False and source is None if contents is None: if not __opts__['test']: if touch(name): ret['changes']['new'] = 'file {0} created'.format(name) ret['comment'] = 'Empty file' else: return _error( ret, 'Empty file {0} not created'.format(name) ) else: if not __opts__['test']: if touch(name): ret['changes']['diff'] = 'New file' else: return _error( ret, 'File {0} not created'.format(name) ) if mode: os.umask(current_umask) if contents is not None: # Write the static contents to a temporary file tmp = salt.utils.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX, text=True) if salt.utils.is_windows(): contents = os.linesep.join( _splitlines_preserving_trailing_newline(contents)) with salt.utils.fopen(tmp, 'w') as tmp_: tmp_.write(str(contents)) # Copy into place salt.utils.files.copyfile(tmp, name, __salt__['config.backup_mode'](backup), __opts__['cachedir']) __clean_tmp(tmp) # Now copy the file contents if there is a source file elif sfn: salt.utils.files.copyfile(sfn, name, __salt__['config.backup_mode'](backup), __opts__['cachedir']) __clean_tmp(sfn) # This is a new file, if no mode specified, use the umask to figure # out what mode to use for the new file. if mode is None and not salt.utils.is_windows(): # Get current umask mask = os.umask(0) os.umask(mask) # Calculate the mode value that results from the umask mode = oct((0o777 ^ mask) & 0o666) ret, _ = check_perms(name, ret, user, group, mode) if not ret['comment']: ret['comment'] = 'File ' + name + ' updated' if __opts__['test']: ret['comment'] = 'File ' + name + ' not updated' elif not ret['changes'] and ret['result']: ret['comment'] = 'File ' + name + ' is in the correct state' if sfn: __clean_tmp(sfn) return ret def mkdir(dir_path, user=None, group=None, mode=None): ''' Ensure that a directory is available. CLI Example: .. code-block:: bash salt '*' file.mkdir /opt/jetty/context ''' dir_path = os.path.expanduser(dir_path) directory = os.path.normpath(dir_path) if not os.path.isdir(directory): # If a caller such as managed() is invoked with makedirs=True, make # sure that any created dirs are created with the same user and group # to follow the principal of least surprise method. makedirs_perms(directory, user, group, mode) return True def makedirs_(path, user=None, group=None, mode=None): ''' Ensure that the directory containing this path is available. .. note:: The path must end with a trailing slash otherwise the directory/directories will be created up to the parent directory. For example if path is ``/opt/code``, then it would be treated as ``/opt/`` but if the path ends with a trailing slash like ``/opt/code/``, then it would be treated as ``/opt/code/``. CLI Example: .. code-block:: bash salt '*' file.makedirs /opt/code/ ''' path = os.path.expanduser(path) # walk up the directory structure until we find the first existing # directory dirname = os.path.normpath(os.path.dirname(path)) if os.path.isdir(dirname): # There's nothing for us to do msg = 'Directory \'{0}\' already exists'.format(dirname) log.debug(msg) return msg if os.path.exists(dirname): msg = 'The path \'{0}\' already exists and is not a directory'.format( dirname ) log.debug(msg) return msg directories_to_create = [] while True: if os.path.isdir(dirname): break directories_to_create.append(dirname) current_dirname = dirname dirname = os.path.dirname(dirname) if current_dirname == dirname: raise SaltInvocationError( 'Recursive creation for path \'{0}\' would result in an ' 'infinite loop. Please use an absolute path.'.format(dirname) ) # create parent directories from the topmost to the most deeply nested one directories_to_create.reverse() for directory_to_create in directories_to_create: # all directories have the user, group and mode set!! log.debug('Creating directory: %s', directory_to_create) mkdir(directory_to_create, user=user, group=group, mode=mode) def makedirs_perms(name, user=None, group=None, mode='0755'): ''' Taken and modified from os.makedirs to set user, group and mode for each directory created. CLI Example: .. code-block:: bash salt '*' file.makedirs_perms /opt/code ''' name = os.path.expanduser(name) path = os.path head, tail = path.split(name) if not tail: head, tail = path.split(head) if head and tail and not path.exists(head): try: makedirs_perms(head, user, group, mode) except OSError as exc: # be happy if someone already created the path if exc.errno != errno.EEXIST: raise if tail == os.curdir: # xxx/newdir/. exists if xxx/newdir exists return os.mkdir(name) check_perms(name, None, user, group, int('{0}'.format(mode)) if mode else None) def get_devmm(name): ''' Get major/minor info from a device CLI Example: .. code-block:: bash salt '*' file.get_devmm /dev/chr ''' name = os.path.expanduser(name) if is_chrdev(name) or is_blkdev(name): stat_structure = os.stat(name) return ( os.major(stat_structure.st_rdev), os.minor(stat_structure.st_rdev)) else: return (0, 0) def is_chrdev(name): ''' Check if a file exists and is a character device. CLI Example: .. code-block:: bash salt '*' file.is_chrdev /dev/chr ''' name = os.path.expanduser(name) stat_structure = None try: stat_structure = os.stat(name) except OSError as exc: if exc.errno == errno.ENOENT: # If the character device does not exist in the first place return False else: raise return stat.S_ISCHR(stat_structure.st_mode) def mknod_chrdev(name, major, minor, user=None, group=None, mode='0660'): ''' .. versionadded:: 0.17.0 Create a character device. CLI Example: .. code-block:: bash salt '*' file.mknod_chrdev /dev/chr 180 31 ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} log.debug('Creating character device name:{0} major:{1} minor:{2} mode:{3}' .format(name, major, minor, mode)) try: if __opts__['test']: ret['changes'] = {'new': 'Character device {0} created.'.format(name)} ret['result'] = None else: if os.mknod(name, int(str(mode).lstrip('0Oo'), 8) | stat.S_IFCHR, os.makedev(major, minor)) is None: ret['changes'] = {'new': 'Character device {0} created.'.format(name)} ret['result'] = True except OSError as exc: # be happy it is already there....however, if you are trying to change the # major/minor, you will need to unlink it first as os.mknod will not overwrite if exc.errno != errno.EEXIST: raise else: ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name) # quick pass at verifying the permissions of the newly created character device check_perms(name, None, user, group, int('{0}'.format(mode)) if mode else None) return ret def is_blkdev(name): ''' Check if a file exists and is a block device. CLI Example: .. code-block:: bash salt '*' file.is_blkdev /dev/blk ''' name = os.path.expanduser(name) stat_structure = None try: stat_structure = os.stat(name) except OSError as exc: if exc.errno == errno.ENOENT: # If the block device does not exist in the first place return False else: raise return stat.S_ISBLK(stat_structure.st_mode) def mknod_blkdev(name, major, minor, user=None, group=None, mode='0660'): ''' .. versionadded:: 0.17.0 Create a block device. CLI Example: .. code-block:: bash salt '*' file.mknod_blkdev /dev/blk 8 999 ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} log.debug('Creating block device name:{0} major:{1} minor:{2} mode:{3}' .format(name, major, minor, mode)) try: if __opts__['test']: ret['changes'] = {'new': 'Block device {0} created.'.format(name)} ret['result'] = None else: if os.mknod(name, int(str(mode).lstrip('0Oo'), 8) | stat.S_IFBLK, os.makedev(major, minor)) is None: ret['changes'] = {'new': 'Block device {0} created.'.format(name)} ret['result'] = True except OSError as exc: # be happy it is already there....however, if you are trying to change the # major/minor, you will need to unlink it first as os.mknod will not overwrite if exc.errno != errno.EEXIST: raise else: ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name) # quick pass at verifying the permissions of the newly created block device check_perms(name, None, user, group, int('{0}'.format(mode)) if mode else None) return ret def is_fifo(name): ''' Check if a file exists and is a FIFO. CLI Example: .. code-block:: bash salt '*' file.is_fifo /dev/fifo ''' name = os.path.expanduser(name) stat_structure = None try: stat_structure = os.stat(name) except OSError as exc: if exc.errno == errno.ENOENT: # If the fifo does not exist in the first place return False else: raise return stat.S_ISFIFO(stat_structure.st_mode) def mknod_fifo(name, user=None, group=None, mode='0660'): ''' .. versionadded:: 0.17.0 Create a FIFO pipe. CLI Example: .. code-block:: bash salt '*' file.mknod_fifo /dev/fifo ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} log.debug('Creating FIFO name: {0}'.format(name)) try: if __opts__['test']: ret['changes'] = {'new': 'Fifo pipe {0} created.'.format(name)} ret['result'] = None else: if os.mkfifo(name, int(str(mode).lstrip('0Oo'), 8)) is None: ret['changes'] = {'new': 'Fifo pipe {0} created.'.format(name)} ret['result'] = True except OSError as exc: # be happy it is already there if exc.errno != errno.EEXIST: raise else: ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name) # quick pass at verifying the permissions of the newly created fifo check_perms(name, None, user, group, int('{0}'.format(mode)) if mode else None) return ret def mknod(name, ntype, major=0, minor=0, user=None, group=None, mode='0600'): ''' .. versionadded:: 0.17.0 Create a block device, character device, or fifo pipe. Identical to the gnu mknod. CLI Examples: .. code-block:: bash salt '*' file.mknod /dev/chr c 180 31 salt '*' file.mknod /dev/blk b 8 999 salt '*' file.nknod /dev/fifo p ''' ret = False makedirs_(name, user, group) if ntype == 'c': ret = mknod_chrdev(name, major, minor, user, group, mode) elif ntype == 'b': ret = mknod_blkdev(name, major, minor, user, group, mode) elif ntype == 'p': ret = mknod_fifo(name, user, group, mode) else: raise SaltInvocationError( 'Node type unavailable: \'{0}\'. Available node types are ' 'character (\'c\'), block (\'b\'), and pipe (\'p\').'.format(ntype) ) return ret def list_backups(path, limit=None): ''' .. versionadded:: 0.17.0 Lists the previous versions of a file backed up using Salt's :ref:`file state backup <file-state-backups>` system. path The path on the minion to check for backups limit Limit the number of results to the most recent N backups CLI Example: .. code-block:: bash salt '*' file.list_backups /foo/bar/baz.txt ''' path = os.path.expanduser(path) try: limit = int(limit) except TypeError: pass except ValueError: log.error('file.list_backups: \'limit\' value must be numeric') limit = None bkroot = _get_bkroot() parent_dir, basename = os.path.split(path) if salt.utils.is_windows(): # ':' is an illegal filesystem path character on Windows src_dir = parent_dir.replace(':', '_') else: src_dir = parent_dir[1:] # Figure out full path of location of backup file in minion cache bkdir = os.path.join(bkroot, src_dir) if not os.path.isdir(bkdir): return {} files = {} for fname in [x for x in os.listdir(bkdir) if os.path.isfile(os.path.join(bkdir, x))]: if salt.utils.is_windows(): # ':' is an illegal filesystem path character on Windows strpfmt = '{0}_%a_%b_%d_%H-%M-%S_%f_%Y'.format(basename) else: strpfmt = '{0}_%a_%b_%d_%H:%M:%S_%f_%Y'.format(basename) try: timestamp = datetime.datetime.strptime(fname, strpfmt) except ValueError: # File didn't match the strp format string, so it's not a backup # for this file. Move on to the next one. continue if salt.utils.is_windows(): str_format = '%a %b %d %Y %H-%M-%S.%f' else: str_format = '%a %b %d %Y %H:%M:%S.%f' files.setdefault(timestamp, {})['Backup Time'] = \ timestamp.strftime(str_format) location = os.path.join(bkdir, fname) files[timestamp]['Size'] = os.stat(location).st_size files[timestamp]['Location'] = location return dict(list(zip( list(range(len(files))), [files[x] for x in sorted(files, reverse=True)[:limit]] ))) list_backup = salt.utils.alias_function(list_backups, 'list_backup') def list_backups_dir(path, limit=None): ''' Lists the previous versions of a directory backed up using Salt's :ref:`file state backup <file-state-backups>` system. path The directory on the minion to check for backups limit Limit the number of results to the most recent N backups CLI Example: .. code-block:: bash salt '*' file.list_backups_dir /foo/bar/baz/ ''' path = os.path.expanduser(path) try: limit = int(limit) except TypeError: pass except ValueError: log.error('file.list_backups_dir: \'limit\' value must be numeric') limit = None bkroot = _get_bkroot() parent_dir, basename = os.path.split(path) # Figure out full path of location of backup folder in minion cache bkdir = os.path.join(bkroot, parent_dir[1:]) if not os.path.isdir(bkdir): return {} files = {} f = dict([(i, len(list(n))) for i, n in itertools.groupby([x.split("_")[0] for x in sorted(os.listdir(bkdir))])]) ff = os.listdir(bkdir) for i, n in six.iteritems(f): ssfile = {} for x in sorted(ff): basename = x.split('_')[0] if i == basename: strpfmt = '{0}_%a_%b_%d_%H:%M:%S_%f_%Y'.format(basename) try: timestamp = datetime.datetime.strptime(x, strpfmt) except ValueError: # Folder didn't match the strp format string, so it's not a backup # for this folder. Move on to the next one. continue ssfile.setdefault(timestamp, {})['Backup Time'] = \ timestamp.strftime('%a %b %d %Y %H:%M:%S.%f') location = os.path.join(bkdir, x) ssfile[timestamp]['Size'] = os.stat(location).st_size ssfile[timestamp]['Location'] = location sfiles = dict(list(zip(list(range(n)), [ssfile[x] for x in sorted(ssfile, reverse=True)[:limit]]))) sefiles = {i: sfiles} files.update(sefiles) return files def restore_backup(path, backup_id): ''' .. versionadded:: 0.17.0 Restore a previous version of a file that was backed up using Salt's :ref:`file state backup <file-state-backups>` system. path The path on the minion to check for backups backup_id The numeric id for the backup you wish to restore, as found using :mod:`file.list_backups <salt.modules.file.list_backups>` CLI Example: .. code-block:: bash salt '*' file.restore_backup /foo/bar/baz.txt 0 ''' path = os.path.expanduser(path) # Note: This only supports minion backups, so this function will need to be # modified if/when master backups are implemented. ret = {'result': False, 'comment': 'Invalid backup_id \'{0}\''.format(backup_id)} try: if len(str(backup_id)) == len(str(int(backup_id))): backup = list_backups(path)[int(backup_id)] else: return ret except ValueError: return ret except KeyError: ret['comment'] = 'backup_id \'{0}\' does not exist for ' \ '{1}'.format(backup_id, path) return ret salt.utils.backup_minion(path, _get_bkroot()) try: shutil.copyfile(backup['Location'], path) except IOError as exc: ret['comment'] = \ 'Unable to restore {0} to {1}: ' \ '{2}'.format(backup['Location'], path, exc) return ret else: ret['result'] = True ret['comment'] = 'Successfully restored {0} to ' \ '{1}'.format(backup['Location'], path) # Try to set proper ownership if not salt.utils.is_windows(): try: fstat = os.stat(path) except (OSError, IOError): ret['comment'] += ', but was unable to set ownership' else: os.chown(path, fstat.st_uid, fstat.st_gid) return ret def delete_backup(path, backup_id): ''' .. versionadded:: 0.17.0 Delete a previous version of a file that was backed up using Salt's :ref:`file state backup <file-state-backups>` system. path The path on the minion to check for backups backup_id The numeric id for the backup you wish to delete, as found using :mod:`file.list_backups <salt.modules.file.list_backups>` CLI Example: .. code-block:: bash salt '*' file.delete_backup /var/cache/salt/minion/file_backup/home/foo/bar/baz.txt 0 ''' path = os.path.expanduser(path) ret = {'result': False, 'comment': 'Invalid backup_id \'{0}\''.format(backup_id)} try: if len(str(backup_id)) == len(str(int(backup_id))): backup = list_backups(path)[int(backup_id)] else: return ret except ValueError: return ret except KeyError: ret['comment'] = 'backup_id \'{0}\' does not exist for ' \ '{1}'.format(backup_id, path) return ret try: os.remove(backup['Location']) except IOError as exc: ret['comment'] = 'Unable to remove {0}: {1}'.format(backup['Location'], exc) else: ret['result'] = True ret['comment'] = 'Successfully removed {0}'.format(backup['Location']) return ret remove_backup = salt.utils.alias_function(delete_backup, 'remove_backup') def grep(path, pattern, *opts): ''' Grep for a string in the specified file .. note:: This function's return value is slated for refinement in future versions of Salt path Path to the file to be searched .. note:: Globbing is supported (i.e. ``/var/log/foo/*.log``, but if globbing is being used then the path should be quoted to keep the shell from attempting to expand the glob expression. pattern Pattern to match. For example: ``test``, or ``a[0-5]`` opts Additional command-line flags to pass to the grep command. For example: ``-v``, or ``-i -B2`` .. note:: The options should come after a double-dash (as shown in the examples below) to keep Salt's own argument parser from interpreting them. CLI Example: .. code-block:: bash salt '*' file.grep /etc/passwd nobody salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i -B2 salt '*' file.grep "/etc/sysconfig/network-scripts/*" ipaddr -- -i -l ''' path = os.path.expanduser(path) split_opts = [] for opt in opts: try: split = salt.utils.shlex_split(opt) except AttributeError: split = salt.utils.shlex_split(str(opt)) if len(split) > 1: raise SaltInvocationError( 'Passing multiple command line arguments in a single string ' 'is not supported, please pass the following arguments ' 'separately: {0}'.format(opt) ) split_opts.extend(split) cmd = ['grep'] + split_opts + [pattern, path] try: ret = __salt__['cmd.run_all'](cmd, python_shell=False) except (IOError, OSError) as exc: raise CommandExecutionError(exc.strerror) return ret def open_files(by_pid=False): ''' Return a list of all physical open files on the system. CLI Examples: .. code-block:: bash salt '*' file.open_files salt '*' file.open_files by_pid=True ''' # First we collect valid PIDs pids = {} procfs = os.listdir('/proc/') for pfile in procfs: try: pids[int(pfile)] = [] except ValueError: # Not a valid PID, move on pass # Then we look at the open files for each PID files = {} for pid in pids: ppath = '/proc/{0}'.format(pid) try: tids = os.listdir('{0}/task'.format(ppath)) except OSError: continue # Collect the names of all of the file descriptors fd_ = [] #try: # fd_.append(os.path.realpath('{0}/task/{1}exe'.format(ppath, tid))) #except: # pass for fpath in os.listdir('{0}/fd'.format(ppath)): fd_.append('{0}/fd/{1}'.format(ppath, fpath)) for tid in tids: try: fd_.append( os.path.realpath('{0}/task/{1}/exe'.format(ppath, tid)) ) except OSError: continue for tpath in os.listdir('{0}/task/{1}/fd'.format(ppath, tid)): fd_.append('{0}/task/{1}/fd/{2}'.format(ppath, tid, tpath)) fd_ = sorted(set(fd_)) # Loop through file descriptors and return useful data for each file for fdpath in fd_: # Sometimes PIDs and TIDs disappear before we can query them try: name = os.path.realpath(fdpath) # Running stat on the file cuts out all of the sockets and # deleted files from the list os.stat(name) except OSError: continue if name not in files: files[name] = [pid] else: # We still want to know which PIDs are using each file files[name].append(pid) files[name] = sorted(set(files[name])) pids[pid].append(name) pids[pid] = sorted(set(pids[pid])) if by_pid: return pids return files def pardir(): ''' Return the relative parent directory path symbol for underlying OS .. versionadded:: 2014.7.0 This can be useful when constructing Salt Formulas. .. code-block:: jinja {% set pardir = salt['file.pardir']() %} {% set final_path = salt['file.join']('subdir', pardir, 'confdir') %} CLI Example: .. code-block:: bash salt '*' file.pardir ''' return os.path.pardir def normpath(path): ''' Returns Normalize path, eliminating double slashes, etc. .. versionadded:: 2015.5.0 This can be useful at the CLI but is frequently useful when scripting. .. code-block:: jinja {%- from salt['file.normpath'](tpldir + '/../vars.jinja') import parent_vars %} CLI Example: .. code-block:: bash salt '*' file.normpath 'a/b/c/..' ''' return os.path.normpath(path) def basename(path): ''' Returns the final component of a pathname .. versionadded:: 2015.5.0 This can be useful at the CLI but is frequently useful when scripting. .. code-block:: jinja {%- set filename = salt['file.basename'](source_file) %} CLI Example: .. code-block:: bash salt '*' file.basename 'test/test.config' ''' return os.path.basename(path) def dirname(path): ''' Returns the directory component of a pathname .. versionadded:: 2015.5.0 This can be useful at the CLI but is frequently useful when scripting. .. code-block:: jinja {%- from salt['file.dirname'](tpldir) + '/vars.jinja' import parent_vars %} CLI Example: .. code-block:: bash salt '*' file.dirname 'test/path/filename.config' ''' return os.path.dirname(path) def join(*args): ''' Return a normalized file system path for the underlying OS .. versionadded:: 2014.7.0 This can be useful at the CLI but is frequently useful when scripting combining path variables: .. code-block:: jinja {% set www_root = '/var' %} {% set app_dir = 'myapp' %} myapp_config: file: - managed - name: {{ salt['file.join'](www_root, app_dir, 'config.yaml') }} CLI Example: .. code-block:: bash salt '*' file.join '/' 'usr' 'local' 'bin' ''' return os.path.join(*args) def move(src, dst): ''' Move a file or directory CLI Example: .. code-block:: bash salt '*' file.move /path/to/src /path/to/dst ''' src = os.path.expanduser(src) dst = os.path.expanduser(dst) if not os.path.isabs(src): raise SaltInvocationError('Source path must be absolute.') if not os.path.isabs(dst): raise SaltInvocationError('Destination path must be absolute.') ret = { 'result': True, 'comment': "'{0}' moved to '{1}'".format(src, dst), } try: shutil.move(src, dst) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to move '{0}' to '{1}': {2}".format(src, dst, exc) ) return ret def diskusage(path): ''' Recursively calculate disk usage of path and return it in bytes CLI Example: .. code-block:: bash salt '*' file.diskusage /path/to/check ''' total_size = 0 seen = set() if os.path.isfile(path): stat_structure = os.stat(path) ret = stat_structure.st_size return ret for dirpath, dirnames, filenames in os.walk(path): for f in filenames: fp = os.path.join(dirpath, f) try: stat_structure = os.stat(fp) except OSError: continue if stat_structure.st_ino in seen: continue seen.add(stat_structure.st_ino) total_size += stat_structure.st_size ret = total_size return ret
./CrossVul/dataset_final_sorted/CWE-200/py/good_3325_2
crossvul-python_data_good_3325_3
# -*- coding: utf-8 -*- ''' Operations on regular files, special files, directories, and symlinks ===================================================================== Salt States can aggressively manipulate files on a system. There are a number of ways in which files can be managed. Regular files can be enforced with the :mod:`file.managed <salt.states.file.managed>` state. This state downloads files from the salt master and places them on the target system. Managed files can be rendered as a jinja, mako, or wempy template, adding a dynamic component to file management. An example of :mod:`file.managed <salt.states.file.managed>` which makes use of the jinja templating system would look like this: .. code-block:: yaml /etc/http/conf/http.conf: file.managed: - source: salt://apache/http.conf - user: root - group: root - mode: 644 - template: jinja - defaults: custom_var: "default value" other_var: 123 {% if grains['os'] == 'Ubuntu' %} - context: custom_var: "override" {% endif %} It is also possible to use the :mod:`py renderer <salt.renderers.py>` as a templating option. The template would be a Python script which would need to contain a function called ``run()``, which returns a string. All arguments to the state will be made available to the Python script as globals. The returned string will be the contents of the managed file. For example: .. code-block:: python def run(): lines = ['foo', 'bar', 'baz'] lines.extend([source, name, user, context]) # Arguments as globals return '\\n\\n'.join(lines) .. note:: The ``defaults`` and ``context`` arguments require extra indentation (four spaces instead of the normal two) in order to create a nested dictionary. :ref:`More information <nested-dict-indentation>`. If using a template, any user-defined template variables in the file defined in ``source`` must be passed in using the ``defaults`` and/or ``context`` arguments. The general best practice is to place default values in ``defaults``, with conditional overrides going into ``context``, as seen above. The template will receive a variable ``custom_var``, which would be accessed in the template using ``{{ custom_var }}``. If the operating system is Ubuntu, the value of the variable ``custom_var`` would be *override*, otherwise it is the default *default value* The ``source`` parameter can be specified as a list. If this is done, then the first file to be matched will be the one that is used. This allows you to have a default file on which to fall back if the desired file does not exist on the salt fileserver. Here's an example: .. code-block:: yaml /etc/foo.conf: file.managed: - source: - salt://foo.conf.{{ grains['fqdn'] }} - salt://foo.conf.fallback - user: foo - group: users - mode: 644 - backup: minion .. note:: Salt supports backing up managed files via the backup option. For more details on this functionality please review the :ref:`backup_mode documentation <file-state-backups>`. The ``source`` parameter can also specify a file in another Salt environment. In this example ``foo.conf`` in the ``dev`` environment will be used instead. .. code-block:: yaml /etc/foo.conf: file.managed: - source: - salt://foo.conf?saltenv=dev - user: foo - group: users - mode: '0644' .. warning:: When using a mode that includes a leading zero you must wrap the value in single quotes. If the value is not wrapped in quotes it will be read by YAML as an integer and evaluated as an octal. The ``names`` parameter, which is part of the state compiler, can be used to expand the contents of a single state declaration into multiple, single state declarations. Each item in the ``names`` list receives its own individual state ``name`` and is converted into its own low-data structure. This is a convenient way to manage several files with similar attributes. There is more documentation about this feature in the :ref:`Names declaration<names-declaration>` section of the :ref:`Highstate docs<states-highstate>`. Special files can be managed via the ``mknod`` function. This function will create and enforce the permissions on a special file. The function supports the creation of character devices, block devices, and FIFO pipes. The function will create the directory structure up to the special file if it is needed on the minion. The function will not overwrite or operate on (change major/minor numbers) existing special files with the exception of user, group, and permissions. In most cases the creation of some special files require root permissions on the minion. This would require that the minion to be run as the root user. Here is an example of a character device: .. code-block:: yaml /var/named/chroot/dev/random: file.mknod: - ntype: c - major: 1 - minor: 8 - user: named - group: named - mode: 660 Here is an example of a block device: .. code-block:: yaml /var/named/chroot/dev/loop0: file.mknod: - ntype: b - major: 7 - minor: 0 - user: named - group: named - mode: 660 Here is an example of a fifo pipe: .. code-block:: yaml /var/named/chroot/var/log/logfifo: file.mknod: - ntype: p - user: named - group: named - mode: 660 Directories can be managed via the ``directory`` function. This function can create and enforce the permissions on a directory. A directory statement will look like this: .. code-block:: yaml /srv/stuff/substuf: file.directory: - user: fred - group: users - mode: 755 - makedirs: True If you need to enforce user and/or group ownership or permissions recursively on the directory's contents, you can do so by adding a ``recurse`` directive: .. code-block:: yaml /srv/stuff/substuf: file.directory: - user: fred - group: users - mode: 755 - makedirs: True - recurse: - user - group - mode As a default, ``mode`` will resolve to ``dir_mode`` and ``file_mode``, to specify both directory and file permissions, use this form: .. code-block:: yaml /srv/stuff/substuf: file.directory: - user: fred - group: users - file_mode: 744 - dir_mode: 755 - makedirs: True - recurse: - user - group - mode Symlinks can be easily created; the symlink function is very simple and only takes a few arguments: .. code-block:: yaml /etc/grub.conf: file.symlink: - target: /boot/grub/grub.conf Recursive directory management can also be set via the ``recurse`` function. Recursive directory management allows for a directory on the salt master to be recursively copied down to the minion. This is a great tool for deploying large code and configuration systems. A state using ``recurse`` would look something like this: .. code-block:: yaml /opt/code/flask: file.recurse: - source: salt://code/flask - include_empty: True A more complex ``recurse`` example: .. code-block:: yaml {% set site_user = 'testuser' %} {% set site_name = 'test_site' %} {% set project_name = 'test_proj' %} {% set sites_dir = 'test_dir' %} django-project: file.recurse: - name: {{ sites_dir }}/{{ site_name }}/{{ project_name }} - user: {{ site_user }} - dir_mode: 2775 - file_mode: '0644' - template: jinja - source: salt://project/templates_dir - include_empty: True Retention scheduling can be applied to manage contents of backup directories. For example: .. code-block:: yaml /var/backups/example_directory: file.retention_schedule: - strptime_format: example_name_%Y%m%dT%H%M%S.tar.bz2 - retain: most_recent: 5 first_of_hour: 4 first_of_day: 14 first_of_week: 6 first_of_month: 6 first_of_year: all ''' # Import python libs from __future__ import absolute_import import difflib import itertools import logging import os import shutil import sys import traceback from collections import Iterable, Mapping, defaultdict from datetime import datetime # python3 problem in the making? # Import salt libs import salt.loader import salt.payload import salt.utils import salt.utils.dictupdate import salt.utils.templates import salt.utils.url from salt.utils.locales import sdecode from salt.exceptions import CommandExecutionError, SaltInvocationError # Import 3rd-party libs import salt.ext.six as six from salt.ext.six.moves import zip_longest log = logging.getLogger(__name__) COMMENT_REGEX = r'^([[:space:]]*){0}[[:space:]]?' __NOT_FOUND = object() def _get_accumulator_filepath(): ''' Return accumulator data path. ''' return os.path.join(salt.utils.get_accumulator_dir(__opts__['cachedir']), __instance_id__) def _load_accumulators(): def _deserialize(path): serial = salt.payload.Serial(__opts__) ret = {'accumulators': {}, 'accumulators_deps': {}} try: with salt.utils.fopen(path, 'rb') as f: loaded = serial.load(f) return loaded if loaded else ret except (IOError, NameError): # NameError is a msgpack error from salt-ssh return ret loaded = _deserialize(_get_accumulator_filepath()) return loaded['accumulators'], loaded['accumulators_deps'] def _persist_accummulators(accumulators, accumulators_deps): accumm_data = {'accumulators': accumulators, 'accumulators_deps': accumulators_deps} serial = salt.payload.Serial(__opts__) try: with salt.utils.fopen(_get_accumulator_filepath(), 'w+b') as f: serial.dump(accumm_data, f) except NameError: # msgpack error from salt-ssh pass def _check_user(user, group): ''' Checks if the named user and group are present on the minion ''' err = '' if user: uid = __salt__['file.user_to_uid'](user) if uid == '': err += 'User {0} is not available '.format(user) if group: gid = __salt__['file.group_to_gid'](group) if gid == '': err += 'Group {0} is not available'.format(group) return err def _gen_keep_files(name, require, walk_d=None): ''' Generate the list of files that need to be kept when a dir based function like directory or recurse has a clean. ''' def _is_child(path, directory): ''' Check whether ``path`` is child of ``directory`` ''' path = os.path.abspath(path) directory = os.path.abspath(directory) relative = os.path.relpath(path, directory) return not relative.startswith(os.pardir) def _add_current_path(path): _ret = set() if os.path.isdir(path): dirs, files = walk_d.get(path, ((), ())) _ret.add(path) for _name in files: _ret.add(os.path.join(path, _name)) for _name in dirs: _ret.add(os.path.join(path, _name)) return _ret def _process_by_walk_d(name, ret): if os.path.isdir(name): walk_ret.update(_add_current_path(name)) dirs, _ = walk_d.get(name, ((), ())) for _d in dirs: p = os.path.join(name, _d) walk_ret.update(_add_current_path(p)) _process_by_walk_d(p, ret) def _process(name): ret = set() if os.path.isdir(name): for root, dirs, files in os.walk(name): ret.add(name) for name in files: ret.add(os.path.join(root, name)) for name in dirs: ret.add(os.path.join(root, name)) return ret keep = set() if isinstance(require, list): required_files = [comp for comp in require if 'file' in comp] for comp in required_files: for low in __lowstate__: # A requirement should match either the ID and the name of # another state. if low['name'] == comp['file'] or low['__id__'] == comp['file']: fn = low['name'] if os.path.isdir(fn): if _is_child(fn, name): if walk_d: walk_ret = set() _process_by_walk_d(fn, walk_ret) keep.update(walk_ret) else: keep.update(_process(fn)) else: keep.add(fn) return list(keep) def _check_file(name): ret = True msg = '' if not os.path.isabs(name): ret = False msg = 'Specified file {0} is not an absolute path'.format(name) elif not os.path.exists(name): ret = False msg = '{0}: file not found'.format(name) return ret, msg def _clean_dir(root, keep, exclude_pat): ''' Clean out all of the files and directories in a directory (root) while preserving the files in a list (keep) and part of exclude_pat ''' removed = set() real_keep = set() real_keep.add(root) if isinstance(keep, list): for fn_ in keep: if not os.path.isabs(fn_): continue real_keep.add(fn_) while True: fn_ = os.path.dirname(fn_) real_keep.add(fn_) if fn_ in ['/', ''.join([os.path.splitdrive(fn_)[0], '\\\\'])]: break def _delete_not_kept(nfn): if nfn not in real_keep: # -- check if this is a part of exclude_pat(only). No need to # check include_pat if not salt.utils.check_include_exclude( os.path.relpath(nfn, root), None, exclude_pat): return removed.add(nfn) if not __opts__['test']: try: os.remove(nfn) except OSError: __salt__['file.remove'](nfn) for roots, dirs, files in os.walk(root): for name in itertools.chain(dirs, files): _delete_not_kept(os.path.join(roots, name)) return list(removed) def _error(ret, err_msg): ret['result'] = False ret['comment'] = err_msg return ret def _check_directory(name, user, group, recurse, mode, clean, require, exclude_pat, max_depth=None): ''' Check what changes need to be made on a directory ''' changes = {} if recurse or clean: assert max_depth is None or not clean # walk path only once and store the result walk_l = list(_depth_limited_walk(name, max_depth)) # root: (dirs, files) structure, compatible for python2.6 walk_d = {} for i in walk_l: walk_d[i[0]] = (i[1], i[2]) if recurse: try: recurse_set = _get_recurse_set(recurse) except (TypeError, ValueError) as exc: return False, '{0}'.format(exc), changes if 'user' not in recurse_set: user = None if 'group' not in recurse_set: group = None if 'mode' not in recurse_set: mode = None check_files = 'ignore_files' not in recurse_set check_dirs = 'ignore_dirs' not in recurse_set for root, dirs, files in walk_l: if check_files: for fname in files: fchange = {} path = os.path.join(root, fname) stats = __salt__['file.stats']( path, None, follow_symlinks=False ) if user is not None and user != stats.get('user'): fchange['user'] = user if group is not None and group != stats.get('group'): fchange['group'] = group if fchange: changes[path] = fchange if check_dirs: for name_ in dirs: path = os.path.join(root, name_) fchange = _check_dir_meta(path, user, group, mode) if fchange: changes[path] = fchange # Recurse skips root (we always do dirs, not root), so always check root: fchange = _check_dir_meta(name, user, group, mode) if fchange: changes[name] = fchange if clean: keep = _gen_keep_files(name, require, walk_d) def _check_changes(fname): path = os.path.join(root, fname) if path in keep: return {} else: if not salt.utils.check_include_exclude( os.path.relpath(path, name), None, exclude_pat): return {} else: return {path: {'removed': 'Removed due to clean'}} for root, dirs, files in walk_l: for fname in files: changes.update(_check_changes(fname)) for name_ in dirs: changes.update(_check_changes(name_)) if not os.path.isdir(name): changes[name] = {'directory': 'new'} if changes: comments = ['The following files will be changed:\n'] for fn_ in changes: for key, val in six.iteritems(changes[fn_]): comments.append('{0}: {1} - {2}\n'.format(fn_, key, val)) return None, ''.join(comments), changes return True, 'The directory {0} is in the correct state'.format(name), changes def _check_dir_meta(name, user, group, mode): ''' Check the changes in directory metadata ''' stats = __salt__['file.stats'](name, follow_symlinks=False) changes = {} if not stats: changes['directory'] = 'new' return changes if (user is not None and user != stats['user'] and user != stats.get('uid')): changes['user'] = user if (group is not None and group != stats['group'] and group != stats.get('gid')): changes['group'] = group # Normalize the dir mode smode = salt.utils.normalize_mode(stats['mode']) mode = salt.utils.normalize_mode(mode) if mode is not None and mode != smode: changes['mode'] = mode return changes def _check_touch(name, atime, mtime): ''' Check to see if a file needs to be updated or created ''' if not os.path.exists(name): return None, 'File {0} is set to be created'.format(name) stats = __salt__['file.stats'](name, follow_symlinks=False) if atime is not None: if str(atime) != str(stats['atime']): return None, 'Times set to be updated on file {0}'.format(name) if mtime is not None: if str(mtime) != str(stats['mtime']): return None, 'Times set to be updated on file {0}'.format(name) return True, 'File {0} exists and has the correct times'.format(name) def _get_symlink_ownership(path): return ( __salt__['file.get_user'](path, follow_symlinks=False), __salt__['file.get_group'](path, follow_symlinks=False) ) def _check_symlink_ownership(path, user, group): ''' Check if the symlink ownership matches the specified user and group ''' cur_user, cur_group = _get_symlink_ownership(path) return (cur_user == user) and (cur_group == group) def _set_symlink_ownership(path, user, group): ''' Set the ownership of a symlink and return a boolean indicating success/failure ''' try: __salt__['file.lchown'](path, user, group) except OSError: pass return _check_symlink_ownership(path, user, group) def _symlink_check(name, target, force, user, group): ''' Check the symlink function ''' pchanges = {} if not os.path.exists(name) and not __salt__['file.is_link'](name): pchanges['new'] = name return None, 'Symlink {0} to {1} is set for creation'.format( name, target ), pchanges if __salt__['file.is_link'](name): if __salt__['file.readlink'](name) != target: pchanges['change'] = name return None, 'Link {0} target is set to be changed to {1}'.format( name, target ), pchanges else: result = True msg = 'The symlink {0} is present'.format(name) if not _check_symlink_ownership(name, user, group): result = None pchanges['ownership'] = '{0}:{1}'.format(*_get_symlink_ownership(name)) msg += ( ', but the ownership of the symlink would be changed ' 'from {2}:{3} to {0}:{1}' ).format(user, group, *_get_symlink_ownership(name)) return result, msg, pchanges else: if force: return None, ('The file or directory {0} is set for removal to ' 'make way for a new symlink targeting {1}' .format(name, target)), pchanges return False, ('File or directory exists where the symlink {0} ' 'should be. Did you mean to use force?'.format(name)), pchanges def _test_owner(kwargs, user=None): ''' Convert owner to user, since other config management tools use owner, no need to punish people coming from other systems. PLEASE DO NOT DOCUMENT THIS! WE USE USER, NOT OWNER!!!! ''' if user: return user if 'owner' in kwargs: log.warning( 'Use of argument owner found, "owner" is invalid, please ' 'use "user"' ) return kwargs['owner'] return user def _unify_sources_and_hashes(source=None, source_hash=None, sources=None, source_hashes=None): ''' Silly little function to give us a standard tuple list for sources and source_hashes ''' if sources is None: sources = [] if source_hashes is None: source_hashes = [] if source and sources: return (False, "source and sources are mutually exclusive", []) if source_hash and source_hashes: return (False, "source_hash and source_hashes are mutually exclusive", []) if source: return (True, '', [(source, source_hash)]) # Make a nice neat list of tuples exactly len(sources) long.. return True, '', list(zip_longest(sources, source_hashes[:len(sources)])) def _get_template_texts(source_list=None, template='jinja', defaults=None, context=None, **kwargs): ''' Iterate a list of sources and process them as templates. Returns a list of 'chunks' containing the rendered templates. ''' ret = {'name': '_get_template_texts', 'changes': {}, 'result': True, 'comment': '', 'data': []} if source_list is None: return _error(ret, '_get_template_texts called with empty source_list') txtl = [] for (source, source_hash) in source_list: tmpctx = defaults if defaults else {} if context: tmpctx.update(context) rndrd_templ_fn = __salt__['cp.get_template']( source, '', template=template, saltenv=__env__, context=tmpctx, **kwargs ) msg = 'cp.get_template returned {0} (Called with: {1})' log.debug(msg.format(rndrd_templ_fn, source)) if rndrd_templ_fn: tmplines = None with salt.utils.fopen(rndrd_templ_fn, 'rb') as fp_: tmplines = fp_.readlines() if not tmplines: msg = 'Failed to read rendered template file {0} ({1})' log.debug(msg.format(rndrd_templ_fn, source)) ret['name'] = source return _error(ret, msg.format(rndrd_templ_fn, source)) txtl.append(''.join(tmplines)) else: msg = 'Failed to load template file {0}'.format(source) log.debug(msg) ret['name'] = source return _error(ret, msg) ret['data'] = txtl return ret def _validate_str_list(arg): ''' ensure ``arg`` is a list of strings ''' if isinstance(arg, six.string_types): ret = [arg] elif isinstance(arg, Iterable) and not isinstance(arg, Mapping): ret = [] for item in arg: if isinstance(item, six.string_types): ret.append(item) else: ret.append(str(item)) else: ret = [str(arg)] return ret def symlink( name, target, force=False, backupname=None, makedirs=False, user=None, group=None, mode=None, **kwargs): ''' Create a symbolic link (symlink, soft link) If the file already exists and is a symlink pointing to any location other than the specified target, the symlink will be replaced. If the symlink is a regular file or directory then the state will return False. If the regular file or directory is desired to be replaced with a symlink pass force: True, if it is to be renamed, pass a backupname. name The location of the symlink to create target The location that the symlink points to force If the name of the symlink exists and is not a symlink and force is set to False, the state will fail. If force is set to True, the file or directory in the way of the symlink file will be deleted to make room for the symlink, unless backupname is set, when it will be renamed backupname If the name of the symlink exists and is not a symlink, it will be renamed to the backupname. If the backupname already exists and force is False, the state will fail. Otherwise, the backupname will be removed first. makedirs If the location of the symlink does not already have a parent directory then the state will fail, setting makedirs to True will allow Salt to create the parent directory user The user to own the file, this defaults to the user salt is running as on the minion group The group ownership set for the file, this defaults to the group salt is running as on the minion. On Windows, this is ignored mode The permissions to set on this file, aka 644, 0775, 4664. Not supported on Windows. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. ''' name = os.path.expanduser(name) # Make sure that leading zeros stripped by YAML loader are added back mode = salt.utils.normalize_mode(mode) user = _test_owner(kwargs, user=user) ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.symlink') if user is None: user = __opts__['user'] if salt.utils.is_windows(): # Make sure the user exists in Windows # Salt default is 'root' if not __salt__['user.info'](user): # User not found, use the account salt is running under # If username not found, use System user = __salt__['user.current']() if not user: user = 'SYSTEM' if group is not None: log.warning( 'The group argument for {0} has been ignored as this ' 'is a Windows system.'.format(name) ) group = user if group is None: group = __salt__['file.gid_to_group']( __salt__['user.info'](user).get('gid', 0) ) preflight_errors = [] uid = __salt__['file.user_to_uid'](user) gid = __salt__['file.group_to_gid'](group) if uid == '': preflight_errors.append('User {0} does not exist'.format(user)) if gid == '': preflight_errors.append('Group {0} does not exist'.format(group)) if not os.path.isabs(name): preflight_errors.append( 'Specified file {0} is not an absolute path'.format(name) ) if preflight_errors: msg = '. '.join(preflight_errors) if len(preflight_errors) > 1: msg += '.' return _error(ret, msg) presult, pcomment, ret['pchanges'] = _symlink_check(name, target, force, user, group) if __opts__['test']: ret['result'] = presult ret['comment'] = pcomment return ret if not os.path.isdir(os.path.dirname(name)): if makedirs: __salt__['file.makedirs']( name, user=user, group=group, mode=mode) else: return _error( ret, 'Directory {0} for symlink is not present'.format( os.path.dirname(name) ) ) if __salt__['file.is_link'](name): # The link exists, verify that it matches the target if os.path.normpath(__salt__['file.readlink'](name)) != os.path.normpath(target): # The target is wrong, delete the link os.remove(name) else: if _check_symlink_ownership(name, user, group): # The link looks good! ret['comment'] = ('Symlink {0} is present and owned by ' '{1}:{2}'.format(name, user, group)) else: if _set_symlink_ownership(name, user, group): ret['comment'] = ('Set ownership of symlink {0} to ' '{1}:{2}'.format(name, user, group)) ret['changes']['ownership'] = '{0}:{1}'.format(user, group) else: ret['result'] = False ret['comment'] += ( 'Failed to set ownership of symlink {0} to ' '{1}:{2}'.format(name, user, group) ) return ret elif os.path.isfile(name) or os.path.isdir(name): # It is not a link, but a file or dir if backupname is not None: # Make a backup first if os.path.lexists(backupname): if not force: return _error(ret, (( 'File exists where the backup target {0} should go' ).format(backupname))) else: __salt__['file.remove'](backupname) os.rename(name, backupname) elif force: # Remove whatever is in the way if __salt__['file.is_link'](name): __salt__['file.remove'](name) ret['changes']['forced'] = 'Symlink was forcibly replaced' else: __salt__['file.remove'](name) else: # Otherwise throw an error if os.path.isfile(name): return _error(ret, ('File exists where the symlink {0} should be' .format(name))) else: return _error(ret, (( 'Directory exists where the symlink {0} should be' ).format(name))) if not os.path.exists(name): # The link is not present, make it try: __salt__['file.symlink'](target, name) except OSError as exc: ret['result'] = False ret['comment'] = ('Unable to create new symlink {0} -> ' '{1}: {2}'.format(name, target, exc)) return ret else: ret['comment'] = ('Created new symlink {0} -> ' '{1}'.format(name, target)) ret['changes']['new'] = name if not _check_symlink_ownership(name, user, group): if not _set_symlink_ownership(name, user, group): ret['result'] = False ret['comment'] += (', but was unable to set ownership to ' '{0}:{1}'.format(user, group)) return ret def absent(name): ''' Make sure that the named file or directory is absent. If it exists, it will be deleted. This will work to reverse any of the functions in the file state module. If a directory is supplied, it will be recursively deleted. name The path which should be deleted ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': True, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.absent') if not os.path.isabs(name): return _error( ret, 'Specified file {0} is not an absolute path'.format(name) ) if name == '/': return _error(ret, 'Refusing to make "/" absent') if os.path.isfile(name) or os.path.islink(name): ret['pchanges']['removed'] = name if __opts__['test']: ret['result'] = None ret['comment'] = 'File {0} is set for removal'.format(name) return ret try: __salt__['file.remove'](name) ret['comment'] = 'Removed file {0}'.format(name) ret['changes']['removed'] = name return ret except CommandExecutionError as exc: return _error(ret, '{0}'.format(exc)) elif os.path.isdir(name): ret['pchanges']['removed'] = name if __opts__['test']: ret['result'] = None ret['comment'] = 'Directory {0} is set for removal'.format(name) return ret try: __salt__['file.remove'](name) ret['comment'] = 'Removed directory {0}'.format(name) ret['changes']['removed'] = name return ret except (OSError, IOError): return _error(ret, 'Failed to remove directory {0}'.format(name)) ret['comment'] = 'File {0} is not present'.format(name) return ret def exists(name): ''' Verify that the named file or directory is present or exists. Ensures pre-requisites outside of Salt's purview (e.g., keytabs, private keys, etc.) have been previously satisfied before deployment. name Absolute path which must exist ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': True, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.exists') if not os.path.exists(name): return _error(ret, 'Specified path {0} does not exist'.format(name)) ret['comment'] = 'Path {0} exists'.format(name) return ret def missing(name): ''' Verify that the named file or directory is missing, this returns True only if the named file is missing but does not remove the file if it is present. name Absolute path which must NOT exist ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': True, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.missing') if os.path.exists(name): return _error(ret, 'Specified path {0} exists'.format(name)) ret['comment'] = 'Path {0} is missing'.format(name) return ret def managed(name, source=None, source_hash='', source_hash_name=None, user=None, group=None, mode=None, template=None, makedirs=False, dir_mode=None, context=None, replace=True, defaults=None, backup='', show_changes=True, create=True, contents=None, tmp_ext='', contents_pillar=None, contents_grains=None, contents_newline=True, contents_delimiter=':', allow_empty=True, follow_symlinks=True, check_cmd=None, skip_verify=False, **kwargs): ''' Manage a given file, this function allows for a file to be downloaded from the salt master and potentially run through a templating system. name The location of the file to manage source The source file to download to the minion, this source file can be hosted on either the salt master server (``salt://``), the salt minion local file system (``/``), or on an HTTP or FTP server (``http(s)://``, ``ftp://``). Both HTTPS and HTTP are supported as well as downloading directly from Amazon S3 compatible URLs with both pre-configured and automatic IAM credentials. (see s3.get state documentation) File retrieval from Openstack Swift object storage is supported via swift://container/object_path URLs, see swift.get documentation. For files hosted on the salt file server, if the file is located on the master in the directory named spam, and is called eggs, the source string is salt://spam/eggs. If source is left blank or None (use ~ in YAML), the file will be created as an empty file and the content will not be managed. This is also the case when a file already exists and the source is undefined; the contents of the file will not be changed or managed. If the file is hosted on a HTTP or FTP server then the source_hash argument is also required. A list of sources can also be passed in to provide a default source and a set of fallbacks. The first source in the list that is found to exist will be used and subsequent entries in the list will be ignored. Source list functionality only supports local files and remote files hosted on the salt master server or retrievable via HTTP, HTTPS, or FTP. .. code-block:: yaml file_override_example: file.managed: - source: - salt://file_that_does_not_exist - salt://file_that_exists source_hash This can be one of the following: 1. a source hash string 2. the URI of a file that contains source hash strings The function accepts the first encountered long unbroken alphanumeric string of correct length as a valid hash, in order from most secure to least secure: .. code-block:: text Type Length ====== ====== sha512 128 sha384 96 sha256 64 sha224 56 sha1 40 md5 32 **Using a Source Hash File** The file can contain several checksums for several files. Each line must contain both the file name and the hash. If no file name is matched, the first hash encountered will be used, otherwise the most secure hash with the correct source file name will be used. When using a source hash file the source_hash argument needs to be a url, the standard download urls are supported, ftp, http, salt etc: Example: .. code-block:: yaml tomdroid-src-0.7.3.tar.gz: file.managed: - name: /tmp/tomdroid-src-0.7.3.tar.gz - source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz - source_hash: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.hash The following lines are all supported formats: .. code-block:: text /etc/rc.conf ef6e82e4006dee563d98ada2a2a80a27 sha254c8525aee419eb649f0233be91c151178b30f0dff8ebbdcc8de71b1d5c8bcc06a /etc/resolv.conf ead48423703509d37c4a90e6a0d53e143b6fc268 Debian file type ``*.dsc`` files are also supported. **Inserting the Source Hash in the SLS Data** The source_hash can be specified as a simple checksum, like so: .. code-block:: yaml tomdroid-src-0.7.3.tar.gz: file.managed: - name: /tmp/tomdroid-src-0.7.3.tar.gz - source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz - source_hash: 79eef25f9b0b2c642c62b7f737d4f53f .. note:: Releases prior to 2016.11.0 must also include the hash type, like in the below example: .. code-block:: yaml tomdroid-src-0.7.3.tar.gz: file.managed: - name: /tmp/tomdroid-src-0.7.3.tar.gz - source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz - source_hash: md5=79eef25f9b0b2c642c62b7f737d4f53f Known issues: If the remote server URL has the hash file as an apparent sub-directory of the source file, the module will discover that it has already cached a directory where a file should be cached. For example: .. code-block:: yaml tomdroid-src-0.7.3.tar.gz: file.managed: - name: /tmp/tomdroid-src-0.7.3.tar.gz - source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz - source_hash: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz/+md5 source_hash_name When ``source_hash`` refers to a hash file, Salt will try to find the correct hash by matching the filename/URI associated with that hash. By default, Salt will look for the filename being managed. When managing a file at path ``/tmp/foo.txt``, then the following line in a hash file would match: .. code-block:: text acbd18db4cc2f85cedef654fccc4a4d8 foo.txt However, sometimes a hash file will include multiple similar paths: .. code-block:: text 37b51d194a7513e45b56f6524f2d51f2 ./dir1/foo.txt acbd18db4cc2f85cedef654fccc4a4d8 ./dir2/foo.txt 73feffa4b7f6bb68e44cf984c85f6e88 ./dir3/foo.txt In cases like this, Salt may match the incorrect hash. This argument can be used to tell Salt which filename to match, to ensure that the correct hash is identified. For example: .. code-block:: yaml /tmp/foo.txt: file.managed: - source: https://mydomain.tld/dir2/foo.txt - source_hash: https://mydomain.tld/hashes - source_hash_name: ./dir2/foo.txt .. note:: This argument must contain the full filename entry from the checksum file, as this argument is meant to disambiguate matches for multiple files that have the same basename. So, in the example above, simply using ``foo.txt`` would not match. .. versionadded:: 2016.3.5 user The user to own the file, this defaults to the user salt is running as on the minion group The group ownership set for the file, this defaults to the group salt is running as on the minion On Windows, this is ignored mode The permissions to set on this file, e.g. ``644``, ``0775``, or ``4664``. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. .. note:: This option is **not** supported on Windows. .. versionchanged:: 2016.11.0 This option can be set to ``keep``, and Salt will keep the mode from the Salt fileserver. This is only supported when the ``source`` URL begins with ``salt://``, or for files local to the minion. Because the ``source`` option cannot be used with any of the ``contents`` options, setting the ``mode`` to ``keep`` is also incompatible with the ``contents`` options. .. note:: keep does not work with salt-ssh. As a consequence of how the files are transfered to the minion, and the inability to connect back to the master with salt-ssh, salt is unable to stat the file as it exists on the fileserver and thus cannot mirror the mode on the salt-ssh minion template If this setting is applied, the named templating engine will be used to render the downloaded file. The following templates are supported: - :mod:`cheetah<salt.renderers.cheetah>` - :mod:`genshi<salt.renderers.genshi>` - :mod:`jinja<salt.renderers.jinja>` - :mod:`mako<salt.renderers.mako>` - :mod:`py<salt.renderers.py>` - :mod:`wempy<salt.renderers.wempy>` makedirs : False If set to ``True``, then the parent directories will be created to facilitate the creation of the named file. If ``False``, and the parent directory of the destination file doesn't exist, the state will fail. dir_mode If directories are to be created, passing this option specifies the permissions for those directories. If this is not set, directories will be assigned permissions by adding the execute bit to the mode of the files. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. replace : True If set to ``False`` and the file already exists, the file will not be modified even if changes would otherwise be made. Permissions and ownership will still be enforced, however. context Overrides default context variables passed to the template. defaults Default context passed to the template. backup Overrides the default backup mode for this specific file. See :ref:`backup_mode documentation <file-state-backups>` for more details. show_changes Output a unified diff of the old file and the new file. If ``False`` return a boolean if any changes were made. create : True If set to ``False``, then the file will only be managed if the file already exists on the system. contents Specify the contents of the file. Cannot be used in combination with ``source``. Ignores hashes and does not use a templating engine. This value can be either a single string, a multiline YAML string or a list of strings. If a list of strings, then the strings will be joined together with newlines in the resulting file. For example, the below two example states would result in identical file contents: .. code-block:: yaml /path/to/file1: file.managed: - contents: - This is line 1 - This is line 2 /path/to/file2: file.managed: - contents: | This is line 1 This is line 2 contents_pillar .. versionadded:: 0.17.0 .. versionchanged: 2016.11.0 contents_pillar can also be a list, and the pillars will be concatinated together to form one file. Operates like ``contents``, but draws from a value stored in pillar, using the pillar path syntax used in :mod:`pillar.get <salt.modules.pillar.get>`. This is useful when the pillar value contains newlines, as referencing a pillar variable using a jinja/mako template can result in YAML formatting issues due to the newlines causing indentation mismatches. For example, the following could be used to deploy an SSH private key: .. code-block:: yaml /home/deployer/.ssh/id_rsa: file.managed: - user: deployer - group: deployer - mode: 600 - contents_pillar: userdata:deployer:id_rsa This would populate ``/home/deployer/.ssh/id_rsa`` with the contents of ``pillar['userdata']['deployer']['id_rsa']``. An example of this pillar setup would be like so: .. code-block:: yaml userdata: deployer: id_rsa: | -----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAoQiwO3JhBquPAalQF9qP1lLZNXVjYMIswrMe2HcWUVBgh+vY U7sCwx/dH6+VvNwmCoqmNnP+8gTPKGl1vgAObJAnMT623dMXjVKwnEagZPRJIxDy B/HaAre9euNiY3LvIzBTWRSeMfT+rWvIKVBpvwlgGrfgz70m0pqxu+UyFbAGLin+ GpxzZAMaFpZw4sSbIlRuissXZj/sHpQb8p9M5IeO4Z3rjkCP1cxI -----END RSA PRIVATE KEY----- .. note:: The private key above is shortened to keep the example brief, but shows how to do multiline string in YAML. The key is followed by a pipe character, and the mutliline string is indented two more spaces. To avoid the hassle of creating an indented multiline YAML string, the :mod:`file_tree external pillar <salt.pillar.file_tree>` can be used instead. However, this will not work for binary files in Salt releases before 2015.8.4. contents_grains .. versionadded:: 2014.7.0 Operates like ``contents``, but draws from a value stored in grains, using the grains path syntax used in :mod:`grains.get <salt.modules.grains.get>`. This functionality works similarly to ``contents_pillar``, but with grains. For example, the following could be used to deploy a "message of the day" file: .. code-block:: yaml write_motd: file.managed: - name: /etc/motd - contents_grains: motd This would populate ``/etc/motd`` file with the contents of the ``motd`` grain. The ``motd`` grain is not a default grain, and would need to be set prior to running the state: .. code-block:: bash salt '*' grains.set motd 'Welcome! This system is managed by Salt.' contents_newline : True .. versionadded:: 2014.7.0 .. versionchanged:: 2015.8.4 This option is now ignored if the contents being deployed contain binary data. If ``True``, files managed using ``contents``, ``contents_pillar``, or ``contents_grains`` will have a newline added to the end of the file if one is not present. Setting this option to ``False`` will omit this final newline. contents_delimiter .. versionadded:: 2015.8.4 Can be used to specify an alternate delimiter for ``contents_pillar`` or ``contents_grains``. This delimiter will be passed through to :py:func:`pillar.get <salt.modules.pillar.get>` or :py:func:`grains.get <salt.modules.grains.get>` when retrieving the contents. allow_empty : True .. versionadded:: 2015.8.4 If set to ``False``, then the state will fail if the contents specified by ``contents_pillar`` or ``contents_grains`` are empty. follow_symlinks : True .. versionadded:: 2014.7.0 If the desired path is a symlink follow it and make changes to the file to which the symlink points. check_cmd .. versionadded:: 2014.7.0 The specified command will be run with an appended argument of a *temporary* file containing the new managed contents. If the command exits with a zero status the new managed contents will be written to the managed destination. If the command exits with a nonzero exit code, the state will fail and no changes will be made to the file. For example, the following could be used to verify sudoers before making changes: .. code-block:: yaml /etc/sudoers: file.managed: - user: root - group: root - mode: 0440 - source: salt://sudoers/files/sudoers.jinja - template: jinja - check_cmd: /usr/sbin/visudo -c -f **NOTE**: This ``check_cmd`` functions differently than the requisite ``check_cmd``. tmp_ext Suffix for temp file created by ``check_cmd``. Useful for checkers dependant on config file extension (e.g. the init-checkconf upstart config checker). .. code-block:: yaml /etc/init/test.conf: file.managed: - user: root - group: root - mode: 0440 - tmp_ext: '.conf' - contents: - 'description "Salt Minion"'' - 'start on started mountall' - 'stop on shutdown' - 'respawn' - 'exec salt-minion' - check_cmd: init-checkconf -f skip_verify : False If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped, and the ``source_hash`` argument will be ignored. .. versionadded:: 2016.3.0 ''' if 'env' in kwargs: salt.utils.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' ) kwargs.pop('env') name = os.path.expanduser(name) ret = {'changes': {}, 'pchanges': {}, 'comment': '', 'name': name, 'result': True} if mode is not None and salt.utils.is_windows(): return _error(ret, 'The \'mode\' option is not supported on Windows') try: keep_mode = mode.lower() == 'keep' if keep_mode: # We're not hard-coding the mode, so set it to None mode = None except AttributeError: keep_mode = False # Make sure that any leading zeros stripped by YAML loader are added back mode = salt.utils.normalize_mode(mode) contents_count = len( [x for x in (contents, contents_pillar, contents_grains) if x is not None] ) if source and contents_count > 0: return _error( ret, '\'source\' cannot be used in combination with \'contents\', ' '\'contents_pillar\', or \'contents_grains\'' ) elif keep_mode and contents_count > 0: return _error( ret, 'Mode preservation cannot be used in combination with \'contents\', ' '\'contents_pillar\', or \'contents_grains\'' ) elif contents_count > 1: return _error( ret, 'Only one of \'contents\', \'contents_pillar\', and ' '\'contents_grains\' is permitted' ) # If no source is specified, set replace to False, as there is nothing # with which to replace the file. if not source and contents_count == 0 and replace: replace = False log.warning( 'State for file: {0} - Neither \'source\' nor \'contents\' nor ' '\'contents_pillar\' nor \'contents_grains\' was defined, yet ' '\'replace\' was set to \'True\'. As there is no source to ' 'replace the file with, \'replace\' has been set to \'False\' to ' 'avoid reading the file unnecessarily.'.format(name) ) # Use this below to avoid multiple '\0' checks and save some CPU cycles if contents_pillar is not None: if isinstance(contents_pillar, list): list_contents = [] for nextp in contents_pillar: nextc = __salt__['pillar.get'](nextp, __NOT_FOUND, delimiter=contents_delimiter) if nextc is __NOT_FOUND: return _error( ret, 'Pillar {0} does not exist'.format(nextp) ) list_contents.append(nextc) use_contents = os.linesep.join(list_contents) else: use_contents = __salt__['pillar.get'](contents_pillar, __NOT_FOUND, delimiter=contents_delimiter) if use_contents is __NOT_FOUND: return _error( ret, 'Pillar {0} does not exist'.format(contents_pillar) ) elif contents_grains is not None: if isinstance(contents_grains, list): list_contents = [] for nextg in contents_grains: nextc = __salt__['grains.get'](nextg, __NOT_FOUND, delimiter=contents_delimiter) if nextc is __NOT_FOUND: return _error( ret, 'Grain {0} does not exist'.format(nextc) ) list_contents.append(nextc) use_contents = os.linesep.join(list_contents) else: use_contents = __salt__['grains.get'](contents_grains, __NOT_FOUND, delimiter=contents_delimiter) if use_contents is __NOT_FOUND: return _error( ret, 'Grain {0} does not exist'.format(contents_grains) ) elif contents is not None: use_contents = contents else: use_contents = None if use_contents is not None: if not allow_empty and not use_contents: if contents_pillar: contents_id = 'contents_pillar {0}'.format(contents_pillar) elif contents_grains: contents_id = 'contents_grains {0}'.format(contents_grains) else: contents_id = '\'contents\'' return _error( ret, '{0} value would result in empty contents. Set allow_empty ' 'to True to allow the managed file to be empty.' .format(contents_id) ) contents_are_binary = \ isinstance(use_contents, six.string_types) and '\0' in use_contents if contents_are_binary: contents = use_contents else: validated_contents = _validate_str_list(use_contents) if not validated_contents: return _error( ret, 'Contents specified by contents/contents_pillar/' 'contents_grains is not a string or list of strings, and ' 'is not binary data. SLS is likely malformed.' ) contents = os.linesep.join(validated_contents) if contents_newline and not contents.endswith(os.linesep): contents += os.linesep if template: contents = __salt__['file.apply_template_on_contents']( contents, template=template, context=context, defaults=defaults, saltenv=__env__) if not isinstance(contents, six.string_types): if 'result' in contents: ret['result'] = contents['result'] else: ret['result'] = False if 'comment' in contents: ret['comment'] = contents['comment'] else: ret['comment'] = 'Error while applying template on contents' return ret if not name: return _error(ret, 'Must provide name to file.exists') user = _test_owner(kwargs, user=user) if salt.utils.is_windows(): if group is not None: log.warning( 'The group argument for {0} has been ignored as this ' 'is a Windows system.'.format(name) ) group = user if not create: if not os.path.isfile(name): # Don't create a file that is not already present ret['comment'] = ('File {0} is not present and is not set for ' 'creation').format(name) return ret u_check = _check_user(user, group) if u_check: # The specified user or group do not exist return _error(ret, u_check) if not os.path.isabs(name): return _error( ret, 'Specified file {0} is not an absolute path'.format(name)) if os.path.isdir(name): ret['comment'] = 'Specified target {0} is a directory'.format(name) ret['result'] = False return ret if context is None: context = {} elif not isinstance(context, dict): return _error( ret, 'Context must be formed as a dict') if defaults and not isinstance(defaults, dict): return _error( ret, 'Defaults must be formed as a dict') if not replace and os.path.exists(name): # Check and set the permissions if necessary ret, _ = __salt__['file.check_perms'](name, ret, user, group, mode, follow_symlinks) if __opts__['test']: ret['comment'] = 'File {0} not updated'.format(name) elif not ret['changes'] and ret['result']: ret['comment'] = ('File {0} exists with proper permissions. ' 'No changes made.'.format(name)) return ret accum_data, _ = _load_accumulators() if name in accum_data: if not context: context = {} context['accumulator'] = accum_data[name] try: if __opts__['test']: if 'file.check_managed_changes' in __salt__: ret['pchanges'] = __salt__['file.check_managed_changes']( name, source, source_hash, source_hash_name, user, group, mode, template, context, defaults, __env__, contents, skip_verify, **kwargs ) if isinstance(ret['pchanges'], tuple): ret['result'], ret['comment'] = ret['pchanges'] elif ret['pchanges']: ret['result'] = None ret['comment'] = 'The file {0} is set to be changed'.format(name) if show_changes and 'diff' in ret['pchanges']: ret['changes']['diff'] = ret['pchanges']['diff'] if not show_changes: ret['changes']['diff'] = '<show_changes=False>' else: ret['result'] = True ret['comment'] = 'The file {0} is in the correct state'.format(name) return ret # If the source is a list then find which file exists source, source_hash = __salt__['file.source_list']( source, source_hash, __env__ ) except CommandExecutionError as exc: ret['result'] = False ret['comment'] = 'Unable to manage file: {0}'.format(exc) return ret # Gather the source file from the server try: sfn, source_sum, comment_ = __salt__['file.get_managed']( name, template, source, source_hash, source_hash_name, user, group, mode, __env__, context, defaults, skip_verify, **kwargs ) except Exception as exc: ret['changes'] = {} log.debug(traceback.format_exc()) return _error(ret, 'Unable to manage file: {0}'.format(exc)) tmp_filename = None if check_cmd: tmp_filename = salt.utils.mkstemp(suffix=tmp_ext) # if exists copy existing file to tmp to compare if __salt__['file.file_exists'](name): try: __salt__['file.copy'](name, tmp_filename) except Exception as exc: return _error( ret, 'Unable to copy file {0} to {1}: {2}'.format( name, tmp_filename, exc ) ) try: ret = __salt__['file.manage_file']( tmp_filename, sfn, ret, source, source_sum, user, group, mode, __env__, backup, makedirs, template, show_changes, contents, dir_mode, follow_symlinks, skip_verify, keep_mode, **kwargs) except Exception as exc: ret['changes'] = {} log.debug(traceback.format_exc()) if os.path.isfile(tmp_filename): os.remove(tmp_filename) return _error(ret, 'Unable to check_cmd file: {0}'.format(exc)) # file being updated to verify using check_cmd if ret['changes']: # Reset ret ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} check_cmd_opts = {} if 'shell' in __grains__: check_cmd_opts['shell'] = __grains__['shell'] cret = mod_run_check_cmd(check_cmd, tmp_filename, **check_cmd_opts) if isinstance(cret, dict): ret.update(cret) if os.path.isfile(tmp_filename): os.remove(tmp_filename) return ret # Since we generated a new tempfile and we are not returning here # lets change the original sfn to the new tempfile or else we will # get file not found sfn = tmp_filename else: ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if comment_ and contents is None: return _error(ret, comment_) else: try: return __salt__['file.manage_file']( name, sfn, ret, source, source_sum, user, group, mode, __env__, backup, makedirs, template, show_changes, contents, dir_mode, follow_symlinks, skip_verify, keep_mode, **kwargs) except Exception as exc: ret['changes'] = {} log.debug(traceback.format_exc()) return _error(ret, 'Unable to manage file: {0}'.format(exc)) finally: if tmp_filename and os.path.isfile(tmp_filename): os.remove(tmp_filename) _RECURSE_TYPES = ['user', 'group', 'mode', 'ignore_files', 'ignore_dirs'] def _get_recurse_set(recurse): ''' Converse *recurse* definition to a set of strings. Raises TypeError or ValueError when *recurse* has wrong structure. ''' if not recurse: return set() if not isinstance(recurse, list): raise TypeError('"recurse" must be formed as a list of strings') try: recurse_set = set(recurse) except TypeError: # non-hashable elements recurse_set = None if recurse_set is None or not set(_RECURSE_TYPES) >= recurse_set: raise ValueError('Types for "recurse" limited to {0}.'.format( ', '.join('"{0}"'.format(rtype) for rtype in _RECURSE_TYPES))) if 'ignore_files' in recurse_set and 'ignore_dirs' in recurse_set: raise ValueError('Must not specify "recurse" options "ignore_files"' ' and "ignore_dirs" at the same time.') return recurse_set def _depth_limited_walk(top, max_depth=None): ''' Walk the directory tree under root up till reaching max_depth. With max_depth=None (default), do not limit depth. ''' for root, dirs, files in os.walk(top): if max_depth is not None: rel_depth = root.count(os.sep) - top.count(os.sep) if rel_depth >= max_depth: del dirs[:] yield (str(root), list(dirs), list(files)) def directory(name, user=None, group=None, recurse=None, max_depth=None, dir_mode=None, file_mode=None, makedirs=False, clean=False, require=None, exclude_pat=None, follow_symlinks=False, force=False, backupname=None, allow_symlink=True, children_only=False, **kwargs): ''' Ensure that a named directory is present and has the right perms name The location to create or manage a directory user The user to own the directory; this defaults to the user salt is running as on the minion group The group ownership set for the directory; this defaults to the group salt is running as on the minion. On Windows, this is ignored recurse Enforce user/group ownership and mode of directory recursively. Accepts a list of strings representing what you would like to recurse. If ``mode`` is defined, will recurse on both ``file_mode`` and ``dir_mode`` if they are defined. If ``ignore_files`` or ``ignore_dirs`` is included, files or directories will be left unchanged respectively. Example: .. code-block:: yaml /var/log/httpd: file.directory: - user: root - group: root - dir_mode: 755 - file_mode: 644 - recurse: - user - group - mode Leave files or directories unchanged: .. code-block:: yaml /var/log/httpd: file.directory: - user: root - group: root - dir_mode: 755 - file_mode: 644 - recurse: - user - group - mode - ignore_dirs .. versionadded:: 2015.5.0 max_depth Limit the recursion depth. The default is no limit=None. 'max_depth' and 'clean' are mutually exclusive. .. versionadded:: 2016.11.0 dir_mode / mode The permissions mode to set any directories created. Not supported on Windows. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. file_mode The permissions mode to set any files created if 'mode' is run in 'recurse'. This defaults to dir_mode. Not supported on Windows. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. makedirs If the directory is located in a path without a parent directory, then the state will fail. If makedirs is set to True, then the parent directories will be created to facilitate the creation of the named file. clean Make sure that only files that are set up by salt and required by this function are kept. If this option is set then everything in this directory will be deleted unless it is required. 'clean' and 'max_depth' are mutually exclusive. require Require other resources such as packages or files exclude_pat When 'clean' is set to True, exclude this pattern from removal list and preserve in the destination. follow_symlinks : False If the desired path is a symlink (or ``recurse`` is defined and a symlink is encountered while recursing), follow it and check the permissions of the directory/file to which the symlink points. .. versionadded:: 2014.1.4 force If the name of the directory exists and is not a directory and force is set to False, the state will fail. If force is set to True, the file in the way of the directory will be deleted to make room for the directory, unless backupname is set, then it will be renamed. .. versionadded:: 2014.7.0 backupname If the name of the directory exists and is not a directory, it will be renamed to the backupname. If the backupname already exists and force is False, the state will fail. Otherwise, the backupname will be removed first. .. versionadded:: 2014.7.0 allow_symlink : True If allow_symlink is True and the specified path is a symlink, it will be allowed to remain if it points to a directory. If allow_symlink is False then the state will fail, unless force is also set to True, in which case it will be removed or renamed, depending on the value of the backupname argument. .. versionadded:: 2014.7.0 children_only : False If children_only is True the base of a path is excluded when performing a recursive operation. In case of /path/to/base, base will be ignored while all of /path/to/base/* are still operated on. ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': True, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.directory') # Remove trailing slash, if present and we're not working on "/" itself if name[-1] == '/' and name != '/': name = name[:-1] if max_depth is not None and clean: return _error(ret, 'Cannot specify both max_depth and clean') user = _test_owner(kwargs, user=user) if salt.utils.is_windows(): if group is not None: log.warning( 'The group argument for {0} has been ignored as this is ' 'a Windows system.'.format(name) ) group = user if 'mode' in kwargs and not dir_mode: dir_mode = kwargs.get('mode', []) if not file_mode: file_mode = dir_mode # Make sure that leading zeros stripped by YAML loader are added back dir_mode = salt.utils.normalize_mode(dir_mode) file_mode = salt.utils.normalize_mode(file_mode) u_check = _check_user(user, group) if u_check: # The specified user or group do not exist return _error(ret, u_check) if not os.path.isabs(name): return _error( ret, 'Specified file {0} is not an absolute path'.format(name)) # Check for existing file or symlink if os.path.isfile(name) or (not allow_symlink and os.path.islink(name)) \ or (force and os.path.islink(name)): # Was a backupname specified if backupname is not None: # Make a backup first if os.path.lexists(backupname): if not force: return _error(ret, (( 'File exists where the backup target {0} should go' ).format(backupname))) else: __salt__['file.remove'](backupname) os.rename(name, backupname) elif force: # Remove whatever is in the way if os.path.isfile(name): os.remove(name) ret['changes']['forced'] = 'File was forcibly replaced' elif __salt__['file.is_link'](name): __salt__['file.remove'](name) ret['changes']['forced'] = 'Symlink was forcibly replaced' else: __salt__['file.remove'](name) else: if os.path.isfile(name): return _error( ret, 'Specified location {0} exists and is a file'.format(name)) elif os.path.islink(name): return _error( ret, 'Specified location {0} exists and is a symlink'.format(name)) presult, pcomment, ret['pchanges'] = _check_directory( name, user, group, recurse or [], dir_mode, clean, require, exclude_pat, max_depth) if __opts__['test']: ret['result'] = presult ret['comment'] = pcomment return ret if not os.path.isdir(name): # The dir does not exist, make it if not os.path.isdir(os.path.dirname(name)): # The parent directory does not exist, create them if makedirs: # Make sure the drive is mapped before trying to create the # path in windows if salt.utils.is_windows(): drive, path = os.path.splitdrive(name) if not os.path.isdir(drive): return _error( ret, 'Drive {0} is not mapped'.format(drive)) # Everything's good, create the path __salt__['file.makedirs']( name, user=user, group=group, mode=dir_mode ) else: return _error( ret, 'No directory to create {0} in'.format(name)) __salt__['file.mkdir']( name, user=user, group=group, mode=dir_mode ) ret['changes'][name] = 'New Dir' if not os.path.isdir(name): return _error(ret, 'Failed to create directory {0}'.format(name)) # issue 32707: skip this __salt__['file.check_perms'] call if children_only == True # Check permissions if not children_only: ret, perms = __salt__['file.check_perms'](name, ret, user, group, dir_mode, follow_symlinks) errors = [] if recurse or clean: # walk path only once and store the result walk_l = list(_depth_limited_walk(name, max_depth)) # root: (dirs, files) structure, compatible for python2.6 walk_d = {} for i in walk_l: walk_d[i[0]] = (i[1], i[2]) recurse_set = None if recurse: try: recurse_set = _get_recurse_set(recurse) except (TypeError, ValueError) as exc: ret['result'] = False ret['comment'] = '{0}'.format(exc) # NOTE: Should this be enough to stop the whole check altogether? if recurse_set: if 'user' in recurse_set: if user: uid = __salt__['file.user_to_uid'](user) # file.user_to_uid returns '' if user does not exist. Above # check for user is not fatal, so we need to be sure user # exists. if isinstance(uid, six.string_types): ret['result'] = False ret['comment'] = 'Failed to enforce ownership for ' \ 'user {0} (user does not ' \ 'exist)'.format(user) else: ret['result'] = False ret['comment'] = 'user not specified, but configured as ' \ 'a target for recursive ownership ' \ 'management' else: user = None if 'group' in recurse_set: if group: gid = __salt__['file.group_to_gid'](group) # As above with user, we need to make sure group exists. if isinstance(gid, six.string_types): ret['result'] = False ret['comment'] = 'Failed to enforce group ownership ' \ 'for group {0}'.format(group) else: ret['result'] = False ret['comment'] = 'group not specified, but configured ' \ 'as a target for recursive ownership ' \ 'management' else: group = None if 'mode' not in recurse_set: file_mode = None dir_mode = None check_files = 'ignore_files' not in recurse_set check_dirs = 'ignore_dirs' not in recurse_set for root, dirs, files in walk_l: if check_files: for fn_ in files: full = os.path.join(root, fn_) try: ret, _ = __salt__['file.check_perms']( full, ret, user, group, file_mode, follow_symlinks) except CommandExecutionError as exc: if not exc.strerror.endswith('does not exist'): errors.append(exc.strerror) if check_dirs: for dir_ in dirs: full = os.path.join(root, dir_) try: ret, _ = __salt__['file.check_perms']( full, ret, user, group, dir_mode, follow_symlinks) except CommandExecutionError as exc: if not exc.strerror.endswith('does not exist'): errors.append(exc.strerror) if clean: keep = _gen_keep_files(name, require, walk_d) log.debug('List of kept files when use file.directory with clean: %s', keep) removed = _clean_dir(name, list(keep), exclude_pat) if removed: ret['changes']['removed'] = removed ret['comment'] = 'Files cleaned from directory {0}'.format(name) # issue 32707: reflect children_only selection in comments if not ret['comment']: if children_only: ret['comment'] = 'Directory {0}/* updated'.format(name) else: ret['comment'] = 'Directory {0} updated'.format(name) if __opts__['test']: ret['comment'] = 'Directory {0} not updated'.format(name) elif not ret['changes'] and ret['result']: ret['comment'] = 'Directory {0} is in the correct state'.format(name) if errors: ret['result'] = False ret['comment'] += '\n\nThe following errors were encountered:\n' for error in errors: ret['comment'] += '\n- {0}'.format(error) return ret def recurse(name, source, clean=False, require=None, user=None, group=None, dir_mode=None, file_mode=None, sym_mode=None, template=None, context=None, defaults=None, include_empty=False, backup='', include_pat=None, exclude_pat=None, maxdepth=None, keep_symlinks=False, force_symlinks=False, **kwargs): ''' Recurse through a subdirectory on the master and copy said subdirectory over to the specified path. name The directory to set the recursion in source The source directory, this directory is located on the salt master file server and is specified with the salt:// protocol. If the directory is located on the master in the directory named spam, and is called eggs, the source string is salt://spam/eggs clean Make sure that only files that are set up by salt and required by this function are kept. If this option is set then everything in this directory will be deleted unless it is required. require Require other resources such as packages or files user The user to own the directory. This defaults to the user salt is running as on the minion group The group ownership set for the directory. This defaults to the group salt is running as on the minion. On Windows, this is ignored dir_mode The permissions mode to set on any directories created. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. .. note:: This option is **not** supported on Windows. file_mode The permissions mode to set on any files created. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. .. note:: This option is **not** supported on Windows. .. versionchanged:: 2016.11.0 This option can be set to ``keep``, and Salt will keep the mode from the Salt fileserver. This is only supported when the ``source`` URL begins with ``salt://``, or for files local to the minion. Because the ``source`` option cannot be used with any of the ``contents`` options, setting the ``mode`` to ``keep`` is also incompatible with the ``contents`` options. sym_mode The permissions mode to set on any symlink created. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. .. note:: This option is **not** supported on Windows. template If this setting is applied, the named templating engine will be used to render the downloaded file. The following templates are supported: - :mod:`cheetah<salt.renderers.cheetah>` - :mod:`genshi<salt.renderers.genshi>` - :mod:`jinja<salt.renderers.jinja>` - :mod:`mako<salt.renderers.mako>` - :mod:`py<salt.renderers.py>` - :mod:`wempy<salt.renderers.wempy>` .. note:: The template option is required when recursively applying templates. context Overrides default context variables passed to the template. defaults Default context passed to the template. include_empty Set this to True if empty directories should also be created (default is False) backup Overrides the default backup mode for all replaced files. See :ref:`backup_mode documentation <file-state-backups>` for more details. include_pat When copying, include only this pattern from the source. Default is glob match; if prefixed with 'E@', then regexp match. Example: .. code-block:: yaml - include_pat: hello* :: glob matches 'hello01', 'hello02' ... but not 'otherhello' - include_pat: E@hello :: regexp matches 'otherhello', 'hello01' ... exclude_pat Exclude this pattern from the source when copying. If both `include_pat` and `exclude_pat` are supplied, then it will apply conditions cumulatively. i.e. first select based on include_pat, and then within that result apply exclude_pat. Also, when 'clean=True', exclude this pattern from the removal list and preserve in the destination. Example: .. code-block:: yaml - exclude_pat: APPDATA* :: glob matches APPDATA.01, APPDATA.02,.. for exclusion - exclude_pat: E@(APPDATA)|(TEMPDATA) :: regexp matches APPDATA or TEMPDATA for exclusion maxdepth When copying, only copy paths which are of depth `maxdepth` from the source path. Example: .. code-block:: yaml - maxdepth: 0 :: Only include files located in the source directory - maxdepth: 1 :: Only include files located in the source or immediate subdirectories keep_symlinks Keep symlinks when copying from the source. This option will cause the copy operation to terminate at the symlink. If desire behavior similar to rsync, then set this to True. force_symlinks Force symlink creation. This option will force the symlink creation. If a file or directory is obstructing symlink creation it will be recursively removed so that symlink creation can proceed. This option is usually not needed except in special circumstances. ''' if 'env' in kwargs: salt.utils.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' ) kwargs.pop('env') name = os.path.expanduser(sdecode(name)) user = _test_owner(kwargs, user=user) if salt.utils.is_windows(): if group is not None: log.warning( 'The group argument for {0} has been ignored as this ' 'is a Windows system.'.format(name) ) group = user ret = { 'name': name, 'changes': {}, 'pchanges': {}, 'result': True, 'comment': {} # { path: [comment, ...] } } if 'mode' in kwargs: ret['result'] = False ret['comment'] = ( '\'mode\' is not allowed in \'file.recurse\'. Please use ' '\'file_mode\' and \'dir_mode\'.' ) return ret if any([x is not None for x in (dir_mode, file_mode, sym_mode)]) \ and salt.utils.is_windows(): return _error(ret, 'mode management is not supported on Windows') # Make sure that leading zeros stripped by YAML loader are added back dir_mode = salt.utils.normalize_mode(dir_mode) try: keep_mode = file_mode.lower() == 'keep' if keep_mode: # We're not hard-coding the mode, so set it to None file_mode = None except AttributeError: keep_mode = False file_mode = salt.utils.normalize_mode(file_mode) u_check = _check_user(user, group) if u_check: # The specified user or group do not exist return _error(ret, u_check) if not os.path.isabs(name): return _error( ret, 'Specified file {0} is not an absolute path'.format(name)) # expand source into source_list source_list = _validate_str_list(source) for idx, val in enumerate(source_list): source_list[idx] = val.rstrip('/') for precheck in source_list: if not precheck.startswith('salt://'): return _error(ret, ('Invalid source \'{0}\' ' '(must be a salt:// URI)'.format(precheck))) # Select the first source in source_list that exists try: source, source_hash = __salt__['file.source_list'](source_list, '', __env__) except CommandExecutionError as exc: ret['result'] = False ret['comment'] = 'Recurse failed: {0}'.format(exc) return ret # Check source path relative to fileserver root, make sure it is a # directory srcpath, senv = salt.utils.url.parse(source) if senv is None: senv = __env__ master_dirs = __salt__['cp.list_master_dirs'](saltenv=senv) if srcpath not in master_dirs \ and not any((x for x in master_dirs if x.startswith(srcpath + '/'))): ret['result'] = False ret['comment'] = ( 'The directory \'{0}\' does not exist on the salt fileserver ' 'in saltenv \'{1}\''.format(srcpath, senv) ) return ret # Verify the target directory if not os.path.isdir(name): if os.path.exists(name): # it is not a dir, but it exists - fail out return _error( ret, 'The path {0} exists and is not a directory'.format(name)) if not __opts__['test']: __salt__['file.makedirs_perms'](name, user, group, dir_mode) def add_comment(path, comment): comments = ret['comment'].setdefault(path, []) if isinstance(comment, six.string_types): comments.append(comment) else: comments.extend(comment) def merge_ret(path, _ret): # Use the most "negative" result code (out of True, None, False) if _ret['result'] is False or ret['result'] is True: ret['result'] = _ret['result'] # Only include comments about files that changed if _ret['result'] is not True and _ret['comment']: add_comment(path, _ret['comment']) if _ret['changes']: ret['changes'][path] = _ret['changes'] def manage_file(path, source): if clean and os.path.exists(path) and os.path.isdir(path): _ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if __opts__['test']: _ret['comment'] = 'Replacing directory {0} with a ' \ 'file'.format(path) _ret['result'] = None merge_ret(path, _ret) return else: __salt__['file.remove'](path) _ret['changes'] = {'diff': 'Replaced directory with a ' 'new file'} merge_ret(path, _ret) # Conflicts can occur if some kwargs are passed in here pass_kwargs = {} faults = ['mode', 'makedirs'] for key in kwargs: if key not in faults: pass_kwargs[key] = kwargs[key] _ret = managed( path, source=source, user=user, group=group, mode='keep' if keep_mode else file_mode, template=template, makedirs=True, context=context, defaults=defaults, backup=backup, **pass_kwargs) merge_ret(path, _ret) def manage_directory(path): if os.path.basename(path) == '..': return if clean and os.path.exists(path) and not os.path.isdir(path): _ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if __opts__['test']: _ret['comment'] = 'Replacing {0} with a directory'.format(path) _ret['result'] = None merge_ret(path, _ret) return else: __salt__['file.remove'](path) _ret['changes'] = {'diff': 'Replaced file with a directory'} merge_ret(path, _ret) _ret = directory( path, user=user, group=group, recurse=[], dir_mode=dir_mode, file_mode=None, makedirs=True, clean=False, require=None) merge_ret(path, _ret) # Process symlinks and return the updated filenames list def process_symlinks(filenames, symlinks): for lname, ltarget in six.iteritems(symlinks): if not salt.utils.check_include_exclude( os.path.relpath(lname, srcpath), include_pat, exclude_pat): continue srelpath = os.path.relpath(lname, srcpath) # Check for max depth if maxdepth is not None: srelpieces = srelpath.split('/') if not srelpieces[-1]: srelpieces = srelpieces[:-1] if len(srelpieces) > maxdepth + 1: continue # Check for all paths that begin with the symlink # and axe it leaving only the dirs/files below it. # This needs to use list() otherwise they reference # the same list. _filenames = list(filenames) for filename in _filenames: if filename.startswith(lname): log.debug('** skipping file ** {0}, it intersects a ' 'symlink'.format(filename)) filenames.remove(filename) # Create the symlink along with the necessary dirs. # The dir perms/ownership will be adjusted later # if needed _ret = symlink(os.path.join(name, srelpath), ltarget, makedirs=True, force=force_symlinks, user=user, group=group, mode=sym_mode) if not _ret: continue merge_ret(os.path.join(name, srelpath), _ret) # Add the path to the keep set in case clean is set to True keep.add(os.path.join(name, srelpath)) vdir.update(keep) return filenames keep = set() vdir = set() if not srcpath.endswith('/'): # we're searching for things that start with this *directory*. # use '/' since #master only runs on POSIX srcpath = srcpath + '/' fns_ = __salt__['cp.list_master'](senv, srcpath) # If we are instructed to keep symlinks, then process them. if keep_symlinks: # Make this global so that emptydirs can use it if needed. symlinks = __salt__['cp.list_master_symlinks'](senv, srcpath) fns_ = process_symlinks(fns_, symlinks) for fn_ in fns_: if not fn_.strip(): continue # fn_ here is the absolute (from file_roots) source path of # the file to copy from; it is either a normal file or an # empty dir(if include_empty==true). relname = sdecode(os.path.relpath(fn_, srcpath)) if relname.startswith('..'): continue # Check for maxdepth of the relative path if maxdepth is not None: # Since paths are all master, just use POSIX separator relpieces = relname.split('/') # Handle empty directories (include_empty==true) by removing the # the last piece if it is an empty string if not relpieces[-1]: relpieces = relpieces[:-1] if len(relpieces) > maxdepth + 1: continue # Check if it is to be excluded. Match only part of the path # relative to the target directory if not salt.utils.check_include_exclude( relname, include_pat, exclude_pat): continue dest = os.path.join(name, relname) dirname = os.path.dirname(dest) keep.add(dest) if dirname not in vdir: # verify the directory perms if they are set manage_directory(dirname) vdir.add(dirname) src = salt.utils.url.create(fn_, saltenv=senv) manage_file(dest, src) if include_empty: mdirs = __salt__['cp.list_master_dirs'](senv, srcpath) for mdir in mdirs: if not salt.utils.check_include_exclude( os.path.relpath(mdir, srcpath), include_pat, exclude_pat): continue mdest = os.path.join(name, os.path.relpath(mdir, srcpath)) # Check for symlinks that happen to point to an empty dir. if keep_symlinks: islink = False for link in symlinks: if mdir.startswith(link, 0): log.debug('** skipping empty dir ** {0}, it intersects' ' a symlink'.format(mdir)) islink = True break if islink: continue manage_directory(mdest) keep.add(mdest) keep = list(keep) if clean: # TODO: Use directory(clean=True) instead keep += _gen_keep_files(name, require) removed = _clean_dir(name, list(keep), exclude_pat) if removed: if __opts__['test']: if ret['result']: ret['result'] = None add_comment('removed', removed) else: ret['changes']['removed'] = removed # Flatten comments until salt command line client learns # to display structured comments in a readable fashion ret['comment'] = '\n'.join(u'\n#### {0} ####\n{1}'.format( k, v if isinstance(v, six.string_types) else '\n'.join(v) ) for (k, v) in six.iteritems(ret['comment'])).strip() if not ret['comment']: ret['comment'] = 'Recursively updated {0}'.format(name) if not ret['changes'] and ret['result']: ret['comment'] = 'The directory {0} is in the correct state'.format( name ) return ret def retention_schedule(name, retain, strptime_format=None, timezone=None): ''' Apply retention scheduling to backup storage directory. .. versionadded:: 2016.11.0 :param name: The filesystem path to the directory containing backups to be managed. :param retain: Delete the backups, except for the ones we want to keep. The N below should be an integer but may also be the special value of ``all``, which keeps all files matching the criteria. All of the retain options default to None, which means to not keep files based on this criteria. :most_recent N: Keep the most recent N files. :first_of_hour N: For the last N hours from now, keep the first file after the hour. :first_of_day N: For the last N days from now, keep the first file after midnight. See also ``timezone``. :first_of_week N: For the last N weeks from now, keep the first file after Sunday midnight. :first_of_month N: For the last N months from now, keep the first file after the start of the month. :first_of_year N: For the last N years from now, keep the first file after the start of the year. :param strptime_format: A python strptime format string used to first match the filenames of backups and then parse the filename to determine the datetime of the file. https://docs.python.org/2/library/datetime.html#datetime.datetime.strptime Defaults to None, which considers all files in the directory to be backups eligible for deletion and uses ``os.path.getmtime()`` to determine the datetime. :param timezone: The timezone to use when determining midnight. This is only used when datetime is pulled from ``os.path.getmtime()``. Defaults to ``None`` which uses the timezone from the locale. .. code-block: yaml /var/backups/example_directory: file.retention_schedule: - retain: most_recent: 5 first_of_hour: 4 first_of_day: 7 first_of_week: 6 # NotImplemented yet. first_of_month: 6 first_of_year: all - strptime_format: example_name_%Y%m%dT%H%M%S.tar.bz2 - timezone: None ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {'retained': [], 'deleted': [], 'ignored': []}, 'pchanges': {'retained': [], 'deleted': [], 'ignored': []}, 'result': True, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.retention_schedule') if not os.path.isdir(name): return _error(ret, 'Name provided to file.retention must be a directory') # get list of files in directory all_files = __salt__['file.readdir'](name) # if strptime_format is set, filter through the list to find names which parse and get their datetimes. beginning_of_unix_time = datetime(1970, 1, 1) def get_file_time_from_strptime(f): try: ts = datetime.strptime(f, strptime_format) ts_epoch = salt.utils.total_seconds(ts - beginning_of_unix_time) return (ts, ts_epoch) except ValueError: # Files which don't match the pattern are not relevant files. return (None, None) def get_file_time_from_mtime(f): lstat = __salt__['file.lstat'](os.path.join(name, f)) if lstat: mtime = lstat['st_mtime'] return (datetime.fromtimestamp(mtime, timezone), mtime) else: # maybe it was deleted since we did the readdir? return (None, None) get_file_time = get_file_time_from_strptime if strptime_format else get_file_time_from_mtime # data structures are nested dicts: # files_by_ymd = year.month.day.hour.unixtime: filename # files_by_y_week_dow = year.week_of_year.day_of_week.unixtime: filename # http://the.randomengineer.com/2015/04/28/python-recursive-defaultdict/ # TODO: move to an ordered dict model and reduce the number of sorts in the rest of the code? def dict_maker(): return defaultdict(dict_maker) files_by_ymd = dict_maker() files_by_y_week_dow = dict_maker() relevant_files = set() ignored_files = set() for f in all_files: ts, ts_epoch = get_file_time(f) if ts: files_by_ymd[ts.year][ts.month][ts.day][ts.hour][ts_epoch] = f week_of_year = ts.isocalendar()[1] files_by_y_week_dow[ts.year][week_of_year][ts.weekday()][ts_epoch] = f relevant_files.add(f) else: ignored_files.add(f) # This is tightly coupled with the file_with_times data-structure above. RETAIN_TO_DEPTH = { 'first_of_year': 1, 'first_of_month': 2, 'first_of_day': 3, 'first_of_hour': 4, 'most_recent': 5, } def get_first(fwt): if isinstance(fwt, dict): first_sub_key = sorted(fwt.keys())[0] return get_first(fwt[first_sub_key]) else: return set([fwt, ]) def get_first_n_at_depth(fwt, depth, n): if depth <= 0: return get_first(fwt) else: result_set = set() for k in sorted(fwt.keys(), reverse=True): needed = n - len(result_set) if needed < 1: break result_set |= get_first_n_at_depth(fwt[k], depth - 1, needed) return result_set # for each retain criteria, add filenames which match the criteria to the retain set. retained_files = set() for retention_rule, keep_count in retain.items(): # This is kind of a hack, since 'all' should really mean all, # but I think it's a large enough number that even modern filesystems would # choke if they had this many files in a single directory. keep_count = sys.maxsize if 'all' == keep_count else int(keep_count) if 'first_of_week' == retention_rule: first_of_week_depth = 2 # year + week_of_year = 2 # I'm adding 1 to keep_count below because it fixed an off-by one # issue in the tests. I don't understand why, and that bothers me. retained_files |= get_first_n_at_depth(files_by_y_week_dow, first_of_week_depth, keep_count + 1) else: retained_files |= get_first_n_at_depth(files_by_ymd, RETAIN_TO_DEPTH[retention_rule], keep_count) deletable_files = list(relevant_files - retained_files) deletable_files.sort(reverse=True) changes = { 'retained': sorted(list(retained_files), reverse=True), 'deleted': deletable_files, 'ignored': sorted(list(ignored_files), reverse=True), } ret['pchanges'] = changes # TODO: track and report how much space was / would be reclaimed if __opts__['test']: ret['comment'] = '{0} backups would have been removed from {1}.\n'.format(len(deletable_files), name) if deletable_files: ret['result'] = None else: for f in deletable_files: __salt__['file.remove'](os.path.join(name, f)) ret['comment'] = '{0} backups were removed from {1}.\n'.format(len(deletable_files), name) ret['changes'] = changes return ret def line(name, content, match=None, mode=None, location=None, before=None, after=None, show_changes=True, backup=False, quiet=False, indent=True, create=False, user=None, group=None, file_mode=None): ''' Line-based editing of a file. .. versionadded:: 2015.8.0 name Filesystem path to the file to be edited. content Content of the line. match Match the target line for an action by a fragment of a string or regular expression. If neither ``before`` nor ``after`` are provided, and ``match`` is also ``None``, match becomes the ``content`` value. mode Defines how to edit a line. One of the following options is required: - ensure If line does not exist, it will be added. - replace If line already exists, it will be replaced. - delete Delete the line, once found. - insert Insert a line. .. note:: If ``mode=insert`` is used, at least one of the following options must also be defined: ``location``, ``before``, or ``after``. If ``location`` is used, it takes precedence over the other two options. location Defines where to place content in the line. Note this option is only used when ``mode=insert`` is specified. If a location is passed in, it takes precedence over both the ``before`` and ``after`` kwargs. Valid locations are: - start Place the content at the beginning of the file. - end Place the content at the end of the file. before Regular expression or an exact case-sensitive fragment of the string. This option is only used when either the ``ensure`` or ``insert`` mode is defined. after Regular expression or an exact case-sensitive fragment of the string. This option is only used when either the ``ensure`` or ``insert`` mode is defined. show_changes Output a unified diff of the old file and the new file. If ``False`` return a boolean if any changes were made. Default is ``True`` .. note:: Using this option will store two copies of the file in-memory (the original version and the edited version) in order to generate the diff. backup Create a backup of the original file with the extension: "Year-Month-Day-Hour-Minutes-Seconds". quiet Do not raise any exceptions. E.g. ignore the fact that the file that is tried to be edited does not exist and nothing really happened. indent Keep indentation with the previous line. This option is not considered when the ``delete`` mode is specified. :param create: Create an empty file if doesn't exists. .. versionadded:: 2016.11.0 :param user: The user to own the file, this defaults to the user salt is running as on the minion. .. versionadded:: 2016.11.0 :param group: The group ownership set for the file, this defaults to the group salt is running as on the minion On Windows, this is ignored. .. versionadded:: 2016.11.0 :param file_mode: The permissions to set on this file, aka 644, 0775, 4664. Not supported on Windows. .. versionadded:: 2016.11.0 If an equal sign (``=``) appears in an argument to a Salt command, it is interpreted as a keyword argument in the format of ``key=val``. That processing can be bypassed in order to pass an equal sign through to the remote shell command by manually specifying the kwarg: .. code-block: yaml update_config: file.line: - name: /etc/myconfig.conf - mode: ensure - content: my key = my value - before: somekey.*? ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': True, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.line') managed(name, create=create, user=user, group=group, mode=file_mode) check_res, check_msg = _check_file(name) if not check_res: return _error(ret, check_msg) changes = __salt__['file.line']( name, content, match=match, mode=mode, location=location, before=before, after=after, show_changes=show_changes, backup=backup, quiet=quiet, indent=indent) if changes: ret['pchanges']['diff'] = changes if __opts__['test']: ret['result'] = None ret['comment'] = 'Changes would be made:\ndiff:\n{0}'.format(changes) else: ret['result'] = True ret['comment'] = 'Changes were made' ret['changes'] = {'diff': changes} else: ret['result'] = True ret['comment'] = 'No changes needed to be made' return ret def replace(name, pattern, repl, count=0, flags=8, bufsize=1, append_if_not_found=False, prepend_if_not_found=False, not_found_content=None, backup='.bak', show_changes=True, ignore_if_missing=False): r''' Maintain an edit in a file. .. versionadded:: 0.17.0 name Filesystem path to the file to be edited. If a symlink is specified, it will be resolved to its target. pattern A regular expression, to be matched using Python's :py:func:`~re.search`. repl The replacement text count Maximum number of pattern occurrences to be replaced. Defaults to 0. If count is a positive integer n, no more than n occurrences will be replaced, otherwise all occurrences will be replaced. flags A list of flags defined in the :ref:`re module documentation <contents-of-module-re>`. Each list item should be a string that will correlate to the human-friendly flag name. E.g., ``['IGNORECASE', 'MULTILINE']``. Optionally, ``flags`` may be an int, with a value corresponding to the XOR (``|``) of all the desired flags. Defaults to ``8`` (which equates to ``['MULTILINE']``). .. note:: ``file.replace`` reads the entire file as a string to support multiline regex patterns. Therefore, when using anchors such as ``^`` or ``$`` in the pattern, those anchors may be relative to the line OR relative to the file. The default for ``file.replace`` is to treat anchors as relative to the line, which is implemented by setting the default value of ``flags`` to ``['MULTILINE']``. When overriding the default value for ``flags``, if ``'MULTILINE'`` is not present then anchors will be relative to the file. If the desired behavior is for anchors to be relative to the line, then simply add ``'MULTILINE'`` to the list of flags. bufsize How much of the file to buffer into memory at once. The default value ``1`` processes one line at a time. The special value ``file`` may be specified which will read the entire file into memory before processing. append_if_not_found : False If set to ``True``, and pattern is not found, then the content will be appended to the file. .. versionadded:: 2014.7.0 prepend_if_not_found : False If set to ``True`` and pattern is not found, then the content will be prepended to the file. .. versionadded:: 2014.7.0 not_found_content Content to use for append/prepend if not found. If ``None`` (default), uses ``repl``. Useful when ``repl`` uses references to group in pattern. .. versionadded:: 2014.7.0 backup The file extension to use for a backup of the file before editing. Set to ``False`` to skip making a backup. show_changes : True Output a unified diff of the old file and the new file. If ``False`` return a boolean if any changes were made. Returns a boolean or a string. .. note: Using this option will store two copies of the file in memory (the original version and the edited version) in order to generate the diff. This may not normally be a concern, but could impact performance if used with large files. ignore_if_missing : False .. versionadded:: 2016.3.4 Controls what to do if the file is missing. If set to ``False``, the state will display an error raised by the execution module. If set to ``True``, the state will simply report no changes. For complex regex patterns, it can be useful to avoid the need for complex quoting and escape sequences by making use of YAML's multiline string syntax. .. code-block:: yaml complex_search_and_replace: file.replace: # <...snip...> - pattern: | CentOS \(2.6.32[^\n]+\n\s+root[^\n]+\n\)+ .. note:: When using YAML multiline string syntax in ``pattern:``, make sure to also use that syntax in the ``repl:`` part, or you might loose line feeds. ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': True, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.replace') check_res, check_msg = _check_file(name) if not check_res: if ignore_if_missing and 'file not found' in check_msg: ret['comment'] = 'No changes needed to be made' return ret else: return _error(ret, check_msg) changes = __salt__['file.replace'](name, pattern, repl, count=count, flags=flags, bufsize=bufsize, append_if_not_found=append_if_not_found, prepend_if_not_found=prepend_if_not_found, not_found_content=not_found_content, backup=backup, dry_run=__opts__['test'], show_changes=show_changes, ignore_if_missing=ignore_if_missing) if changes: ret['pchanges']['diff'] = changes if __opts__['test']: ret['result'] = None ret['comment'] = 'Changes would have been made:\ndiff:\n{0}'.format(changes) else: ret['result'] = True ret['comment'] = 'Changes were made' ret['changes'] = {'diff': changes} else: ret['result'] = True ret['comment'] = 'No changes needed to be made' return ret def blockreplace( name, marker_start='#-- start managed zone --', marker_end='#-- end managed zone --', source=None, source_hash=None, template='jinja', sources=None, source_hashes=None, defaults=None, context=None, content='', append_if_not_found=False, prepend_if_not_found=False, backup='.bak', show_changes=True): ''' Maintain an edit in a file in a zone delimited by two line markers .. versionadded:: 2014.1.0 A block of content delimited by comments can help you manage several lines entries without worrying about old entries removal. This can help you maintaining an un-managed file containing manual edits. Note: this function will store two copies of the file in-memory (the original version and the edited version) in order to detect changes and only edit the targeted file if necessary. name Filesystem path to the file to be edited marker_start The line content identifying a line as the start of the content block. Note that the whole line containing this marker will be considered, so whitespace or extra content before or after the marker is included in final output marker_end The line content identifying a line as the end of the content block. Note that the whole line containing this marker will be considered, so whitespace or extra content before or after the marker is included in final output. Note: you can use file.accumulated and target this state. All accumulated data dictionaries content will be added as new lines in the content content The content to be used between the two lines identified by ``marker_start`` and ``marker_end`` source The source file to download to the minion, this source file can be hosted on either the salt master server, or on an HTTP or FTP server. Both HTTPS and HTTP are supported as well as downloading directly from Amazon S3 compatible URLs with both pre-configured and automatic IAM credentials. (see s3.get state documentation) File retrieval from Openstack Swift object storage is supported via swift://container/object_path URLs, see swift.get documentation. For files hosted on the salt file server, if the file is located on the master in the directory named spam, and is called eggs, the source string is salt://spam/eggs. If source is left blank or None (use ~ in YAML), the file will be created as an empty file and the content will not be managed. This is also the case when a file already exists and the source is undefined; the contents of the file will not be changed or managed. If the file is hosted on a HTTP or FTP server then the source_hash argument is also required. A list of sources can also be passed in to provide a default source and a set of fallbacks. The first source in the list that is found to exist will be used and subsequent entries in the list will be ignored. .. code-block:: yaml file_override_example: file.blockreplace: - name: /etc/example.conf - source: - salt://file_that_does_not_exist - salt://file_that_exists source_hash This can be one of the following: 1. a source hash string 2. the URI of a file that contains source hash strings The function accepts the first encountered long unbroken alphanumeric string of correct length as a valid hash, in order from most secure to least secure: .. code-block:: text Type Length ====== ====== sha512 128 sha384 96 sha256 64 sha224 56 sha1 40 md5 32 See the ``source_hash`` parameter description for :mod:`file.managed <salt.states.file.managed>` function for more details and examples. template The named templating engine will be used to render the downloaded file. Defaults to ``jinja``. The following templates are supported: - :mod:`cheetah<salt.renderers.cheetah>` - :mod:`genshi<salt.renderers.genshi>` - :mod:`jinja<salt.renderers.jinja>` - :mod:`mako<salt.renderers.mako>` - :mod:`py<salt.renderers.py>` - :mod:`wempy<salt.renderers.wempy>` context Overrides default context variables passed to the template. defaults Default context passed to the template. append_if_not_found If markers are not found and set to True then the markers and content will be appended to the file. Default is ``False`` prepend_if_not_found If markers are not found and set to True then the markers and content will be prepended to the file. Default is ``False`` backup The file extension to use for a backup of the file if any edit is made. Set this to ``False`` to skip making a backup. dry_run Don't make any edits to the file show_changes Output a unified diff of the old file and the new file. If ``False`` return a boolean if any changes were made Example of usage with an accumulator and with a variable: .. code-block:: yaml {% set myvar = 42 %} hosts-config-block-{{ myvar }}: file.blockreplace: - name: /etc/hosts - marker_start: "# START managed zone {{ myvar }} -DO-NOT-EDIT-" - marker_end: "# END managed zone {{ myvar }} --" - content: 'First line of content' - append_if_not_found: True - backup: '.bak' - show_changes: True hosts-config-block-{{ myvar }}-accumulated1: file.accumulated: - filename: /etc/hosts - name: my-accumulator-{{ myvar }} - text: "text 2" - require_in: - file: hosts-config-block-{{ myvar }} hosts-config-block-{{ myvar }}-accumulated2: file.accumulated: - filename: /etc/hosts - name: my-accumulator-{{ myvar }} - text: | text 3 text 4 - require_in: - file: hosts-config-block-{{ myvar }} will generate and maintain a block of content in ``/etc/hosts``: .. code-block:: text # START managed zone 42 -DO-NOT-EDIT- First line of content text 2 text 3 text 4 # END managed zone 42 -- ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': False, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.blockreplace') if sources is None: sources = [] if source_hashes is None: source_hashes = [] (ok_, err, sl_) = _unify_sources_and_hashes(source=source, source_hash=source_hash, sources=sources, source_hashes=source_hashes) if not ok_: return _error(ret, err) check_res, check_msg = _check_file(name) if not check_res: return _error(ret, check_msg) accum_data, accum_deps = _load_accumulators() if name in accum_data: accumulator = accum_data[name] # if we have multiple accumulators for a file, only apply the one # required at a time deps = accum_deps.get(name, []) filtered = [a for a in deps if __low__['__id__'] in deps[a] and a in accumulator] if not filtered: filtered = [a for a in accumulator] for acc in filtered: acc_content = accumulator[acc] for line in acc_content: if content == '': content = line else: content += "\n" + line if sl_: tmpret = _get_template_texts(source_list=sl_, template=template, defaults=defaults, context=context) if not tmpret['result']: return tmpret text = tmpret['data'] for index, item in enumerate(text): content += str(item) changes = __salt__['file.blockreplace']( name, marker_start, marker_end, content=content, append_if_not_found=append_if_not_found, prepend_if_not_found=prepend_if_not_found, backup=backup, dry_run=__opts__['test'], show_changes=show_changes ) if changes: ret['pchanges'] = {'diff': changes} if __opts__['test']: ret['result'] = None ret['comment'] = 'Changes would be made' else: ret['changes'] = {'diff': changes} ret['result'] = True ret['comment'] = 'Changes were made' else: ret['result'] = True ret['comment'] = 'No changes needed to be made' return ret def comment(name, regex, char='#', backup='.bak'): ''' Comment out specified lines in a file. name The full path to the file to be edited regex A regular expression used to find the lines that are to be commented; this pattern will be wrapped in parenthesis and will move any preceding/trailing ``^`` or ``$`` characters outside the parenthesis (e.g., the pattern ``^foo$`` will be rewritten as ``^(foo)$``) Note that you _need_ the leading ^, otherwise each time you run highstate, another comment char will be inserted. char : ``#`` The character to be inserted at the beginning of a line in order to comment it out backup : ``.bak`` The file will be backed up before edit with this file extension .. warning:: This backup will be overwritten each time ``sed`` / ``comment`` / ``uncomment`` is called. Meaning the backup will only be useful after the first invocation. Set to False/None to not keep a backup. Usage: .. code-block:: yaml /etc/fstab: file.comment: - regex: ^bind 127.0.0.1 .. versionadded:: 0.9.5 ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': False, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.comment') check_res, check_msg = _check_file(name) if not check_res: return _error(ret, check_msg) unanchor_regex = regex.lstrip('^').rstrip('$') comment_regex = char + unanchor_regex # Check if the line is already commented if __salt__['file.search'](name, comment_regex, multiline=True): commented = True else: commented = False # Make sure the pattern appears in the file before continuing if commented or not __salt__['file.search'](name, regex, multiline=True): if __salt__['file.search'](name, unanchor_regex, multiline=True): ret['comment'] = 'Pattern already commented' ret['result'] = True return ret else: return _error(ret, '{0}: Pattern not found'.format(unanchor_regex)) ret['pchanges'][name] = 'updated' if __opts__['test']: ret['comment'] = 'File {0} is set to be updated'.format(name) ret['result'] = None return ret with salt.utils.fopen(name, 'rb') as fp_: slines = fp_.readlines() # Perform the edit __salt__['file.comment_line'](name, regex, char, True, backup) with salt.utils.fopen(name, 'rb') as fp_: nlines = fp_.readlines() # Check the result ret['result'] = __salt__['file.search'](name, unanchor_regex, multiline=True) if slines != nlines: if not salt.utils.istextfile(name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them ret['changes']['diff'] = ( ''.join(difflib.unified_diff(slines, nlines)) ) if ret['result']: ret['comment'] = 'Commented lines successfully' else: ret['comment'] = 'Expected commented lines not found' return ret def uncomment(name, regex, char='#', backup='.bak'): ''' Uncomment specified commented lines in a file name The full path to the file to be edited regex A regular expression used to find the lines that are to be uncommented. This regex should not include the comment character. A leading ``^`` character will be stripped for convenience (for easily switching between comment() and uncomment()). The regex will be searched for from the beginning of the line, ignoring leading spaces (we prepend '^[ \\t]*') char : ``#`` The character to remove in order to uncomment a line backup : ``.bak`` The file will be backed up before edit with this file extension; .. warning:: This backup will be overwritten each time ``sed`` / ``comment`` / ``uncomment`` is called. Meaning the backup will only be useful after the first invocation. Set to False/None to not keep a backup. Usage: .. code-block:: yaml /etc/adduser.conf: file.uncomment: - regex: EXTRA_GROUPS .. versionadded:: 0.9.5 ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': False, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.uncomment') check_res, check_msg = _check_file(name) if not check_res: return _error(ret, check_msg) # Make sure the pattern appears in the file if __salt__['file.search']( name, '^[ \t]*{0}'.format(regex.lstrip('^')), multiline=True): ret['comment'] = 'Pattern already uncommented' ret['result'] = True return ret elif __salt__['file.search']( name, '{0}[ \t]*{1}'.format(char, regex.lstrip('^')), multiline=True): # Line exists and is commented pass else: return _error(ret, '{0}: Pattern not found'.format(regex)) ret['pchanges'][name] = 'updated' if __opts__['test']: ret['comment'] = 'File {0} is set to be updated'.format(name) ret['result'] = None return ret with salt.utils.fopen(name, 'rb') as fp_: slines = fp_.readlines() # Perform the edit __salt__['file.comment_line'](name, regex, char, False, backup) with salt.utils.fopen(name, 'rb') as fp_: nlines = fp_.readlines() # Check the result ret['result'] = __salt__['file.search']( name, '^[ \t]*{0}'.format(regex.lstrip('^')), multiline=True ) if slines != nlines: if not salt.utils.istextfile(name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them ret['changes']['diff'] = ( ''.join(difflib.unified_diff(slines, nlines)) ) if ret['result']: ret['comment'] = 'Uncommented lines successfully' else: ret['comment'] = 'Expected uncommented lines not found' return ret def append(name, text=None, makedirs=False, source=None, source_hash=None, template='jinja', sources=None, source_hashes=None, defaults=None, context=None, ignore_whitespace=True): ''' Ensure that some text appears at the end of a file. The text will not be appended if it already exists in the file. A single string of text or a list of strings may be appended. name The location of the file to append to. text The text to be appended, which can be a single string or a list of strings. makedirs If the file is located in a path without a parent directory, then the state will fail. If makedirs is set to True, then the parent directories will be created to facilitate the creation of the named file. Defaults to False. source A single source file to append. This source file can be hosted on either the salt master server, or on an HTTP or FTP server. Both HTTPS and HTTP are supported as well as downloading directly from Amazon S3 compatible URLs with both pre-configured and automatic IAM credentials (see s3.get state documentation). File retrieval from Openstack Swift object storage is supported via swift://container/object_path URLs (see swift.get documentation). For files hosted on the salt file server, if the file is located on the master in the directory named spam, and is called eggs, the source string is salt://spam/eggs. If the file is hosted on an HTTP or FTP server, the source_hash argument is also required. source_hash This can be one of the following: 1. a source hash string 2. the URI of a file that contains source hash strings The function accepts the first encountered long unbroken alphanumeric string of correct length as a valid hash, in order from most secure to least secure: .. code-block:: text Type Length ====== ====== sha512 128 sha384 96 sha256 64 sha224 56 sha1 40 md5 32 See the ``source_hash`` parameter description for :mod:`file.managed <salt.states.file.managed>` function for more details and examples. template The named templating engine will be used to render the appended-to file. Defaults to ``jinja``. The following templates are supported: - :mod:`cheetah<salt.renderers.cheetah>` - :mod:`genshi<salt.renderers.genshi>` - :mod:`jinja<salt.renderers.jinja>` - :mod:`mako<salt.renderers.mako>` - :mod:`py<salt.renderers.py>` - :mod:`wempy<salt.renderers.wempy>` sources A list of source files to append. If the files are hosted on an HTTP or FTP server, the source_hashes argument is also required. source_hashes A list of source_hashes corresponding to the sources list specified in the sources argument. defaults Default context passed to the template. context Overrides default context variables passed to the template. ignore_whitespace .. versionadded:: 2015.8.4 Spaces and Tabs in text are ignored by default, when searching for the appending content, one space or multiple tabs are the same for salt. Set this option to ``False`` if you want to change this behavior. Multi-line example: .. code-block:: yaml /etc/motd: file.append: - text: | Thou hadst better eat salt with the Philosophers of Greece, than sugar with the Courtiers of Italy. - Benjamin Franklin Multiple lines of text: .. code-block:: yaml /etc/motd: file.append: - text: - Trust no one unless you have eaten much salt with him. - "Salt is born of the purest of parents: the sun and the sea." Gather text from multiple template files: .. code-block:: yaml /etc/motd: file: - append - template: jinja - sources: - salt://motd/devops-messages.tmpl - salt://motd/hr-messages.tmpl - salt://motd/general-messages.tmpl .. versionadded:: 0.9.5 ''' ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': False, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.append') name = os.path.expanduser(name) if sources is None: sources = [] if source_hashes is None: source_hashes = [] # Add sources and source_hashes with template support # NOTE: FIX 'text' and any 'source' are mutually exclusive as 'text' # is re-assigned in the original code. (ok_, err, sl_) = _unify_sources_and_hashes(source=source, source_hash=source_hash, sources=sources, source_hashes=source_hashes) if not ok_: return _error(ret, err) if makedirs is True: dirname = os.path.dirname(name) if not __salt__['file.directory_exists'](dirname): __salt__['file.makedirs'](name) check_res, check_msg, ret['pchanges'] = _check_directory( dirname, None, None, False, None, False, False, None ) if not check_res: return _error(ret, check_msg) check_res, check_msg = _check_file(name) if not check_res: # Try to create the file touch(name, makedirs=makedirs) retry_res, retry_msg = _check_file(name) if not retry_res: return _error(ret, check_msg) # Follow the original logic and re-assign 'text' if using source(s)... if sl_: tmpret = _get_template_texts(source_list=sl_, template=template, defaults=defaults, context=context) if not tmpret['result']: return tmpret text = tmpret['data'] text = _validate_str_list(text) with salt.utils.fopen(name, 'rb') as fp_: slines = fp_.read().splitlines() append_lines = [] try: for chunk in text: if ignore_whitespace: if __salt__['file.search']( name, salt.utils.build_whitespace_split_regex(chunk), multiline=True): continue elif __salt__['file.search']( name, chunk, multiline=True): continue for line_item in chunk.splitlines(): append_lines.append('{0}'.format(line_item)) except TypeError: return _error(ret, 'No text found to append. Nothing appended') if __opts__['test']: ret['comment'] = 'File {0} is set to be updated'.format(name) ret['result'] = None nlines = list(slines) nlines.extend(append_lines) if slines != nlines: if not salt.utils.istextfile(name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them ret['changes']['diff'] = ( '\n'.join(difflib.unified_diff(slines, nlines)) ) else: ret['comment'] = 'File {0} is in correct state'.format(name) ret['result'] = True return ret if append_lines: __salt__['file.append'](name, args=append_lines) ret['comment'] = 'Appended {0} lines'.format(len(append_lines)) else: ret['comment'] = 'File {0} is in correct state'.format(name) with salt.utils.fopen(name, 'rb') as fp_: nlines = fp_.read().splitlines() if slines != nlines: if not salt.utils.istextfile(name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them ret['changes']['diff'] = ( '\n'.join(difflib.unified_diff(slines, nlines))) ret['result'] = True return ret def prepend(name, text=None, makedirs=False, source=None, source_hash=None, template='jinja', sources=None, source_hashes=None, defaults=None, context=None, header=None): ''' Ensure that some text appears at the beginning of a file The text will not be prepended again if it already exists in the file. You may specify a single line of text or a list of lines to append. Multi-line example: .. code-block:: yaml /etc/motd: file.prepend: - text: | Thou hadst better eat salt with the Philosophers of Greece, than sugar with the Courtiers of Italy. - Benjamin Franklin Multiple lines of text: .. code-block:: yaml /etc/motd: file.prepend: - text: - Trust no one unless you have eaten much salt with him. - "Salt is born of the purest of parents: the sun and the sea." Optionally, require the text to appear exactly as specified (order and position). Combine with multi-line or multiple lines of input. .. code-block:: yaml /etc/motd: file.prepend: - header: True - text: - This will be the very first line in the file. - The 2nd line, regardless of duplicates elsewhere in the file. - These will be written anew if they do not appear verbatim. Gather text from multiple template files: .. code-block:: yaml /etc/motd: file: - prepend - template: jinja - sources: - salt://motd/devops-messages.tmpl - salt://motd/hr-messages.tmpl - salt://motd/general-messages.tmpl .. versionadded:: 2014.7.0 ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': False, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.prepend') if sources is None: sources = [] if source_hashes is None: source_hashes = [] # Add sources and source_hashes with template support # NOTE: FIX 'text' and any 'source' are mutually exclusive as 'text' # is re-assigned in the original code. (ok_, err, sl_) = _unify_sources_and_hashes(source=source, source_hash=source_hash, sources=sources, source_hashes=source_hashes) if not ok_: return _error(ret, err) if makedirs is True: dirname = os.path.dirname(name) if not __salt__['file.directory_exists'](dirname): __salt__['file.makedirs'](name) check_res, check_msg, ret['pchanges'] = _check_directory( dirname, None, None, False, None, False, False, None ) if not check_res: return _error(ret, check_msg) check_res, check_msg = _check_file(name) if not check_res: # Try to create the file touch(name, makedirs=makedirs) retry_res, retry_msg = _check_file(name) if not retry_res: return _error(ret, check_msg) # Follow the original logic and re-assign 'text' if using source(s)... if sl_: tmpret = _get_template_texts(source_list=sl_, template=template, defaults=defaults, context=context) if not tmpret['result']: return tmpret text = tmpret['data'] text = _validate_str_list(text) with salt.utils.fopen(name, 'rb') as fp_: slines = fp_.readlines() count = 0 test_lines = [] preface = [] for chunk in text: # if header kwarg is unset of False, use regex search if not header: if __salt__['file.search']( name, salt.utils.build_whitespace_split_regex(chunk), multiline=True): continue lines = chunk.splitlines() for line in lines: if __opts__['test']: ret['comment'] = 'File {0} is set to be updated'.format(name) ret['result'] = None test_lines.append('{0}\n'.format(line)) else: preface.append(line) count += 1 if __opts__['test']: nlines = test_lines + slines if slines != nlines: if not salt.utils.istextfile(name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them ret['changes']['diff'] = ( ''.join(difflib.unified_diff(slines, nlines)) ) ret['result'] = None else: ret['comment'] = 'File {0} is in correct state'.format(name) ret['result'] = True return ret # if header kwarg is True, use verbatim compare if header: with salt.utils.fopen(name, 'rb') as fp_: # read as many lines of target file as length of user input target_head = fp_.readlines()[0:len(preface)] target_lines = [] # strip newline chars from list entries for chunk in target_head: target_lines += chunk.splitlines() # compare current top lines in target file with user input # and write user input if they differ if target_lines != preface: __salt__['file.prepend'](name, *preface) else: # clear changed lines counter if target file not modified count = 0 else: __salt__['file.prepend'](name, *preface) with salt.utils.fopen(name, 'rb') as fp_: nlines = fp_.readlines() if slines != nlines: if not salt.utils.istextfile(name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them ret['changes']['diff'] = ( ''.join(difflib.unified_diff(slines, nlines)) ) if count: ret['comment'] = 'Prepended {0} lines'.format(count) else: ret['comment'] = 'File {0} is in correct state'.format(name) ret['result'] = True return ret def patch(name, source=None, options='', dry_run_first=True, **kwargs): ''' Apply a patch to a file or directory. .. note:: A suitable ``patch`` executable must be available on the minion when using this state function. name The file or directory to which the patch will be applied. source The source patch to download to the minion, this source file must be hosted on the salt master server. If the file is located in the directory named spam, and is called eggs, the source string is salt://spam/eggs. A source is required. hash The hash of the patched file. If the hash of the target file matches this value then the patch is assumed to have been applied. For versions 2016.11.4 and newer, the hash can be specified without an accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``), but for earlier releases it is necessary to also specify the hash type in the format ``<hash_type>:<hash_value>`` (e.g. ``md5:e138491e9d5b97023cea823fe17bac22``). options Extra options to pass to patch. dry_run_first : ``True`` Run patch with ``--dry-run`` first to check if it will apply cleanly. saltenv Specify the environment from which to retrieve the patch file indicated by the ``source`` parameter. If not provided, this defaults to the environment from which the state is being executed. **Usage:** .. code-block:: yaml # Equivalent to ``patch --forward /opt/file.txt file.patch`` /opt/file.txt: file.patch: - source: salt://file.patch - hash: e138491e9d5b97023cea823fe17bac22 .. note:: For minions running version 2016.11.3 or older, the hash in the example above would need to be specified with the hash type (i.e. ``md5:e138491e9d5b97023cea823fe17bac22``). ''' hash_ = kwargs.pop('hash', None) if 'env' in kwargs: salt.utils.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' ) kwargs.pop('env') name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.patch') check_res, check_msg = _check_file(name) if not check_res: return _error(ret, check_msg) if not source: return _error(ret, 'Source is required') if hash_ is None: return _error(ret, 'Hash is required') try: if hash_ and __salt__['file.check_hash'](name, hash_): ret['result'] = True ret['comment'] = 'Patch is already applied' return ret except (SaltInvocationError, ValueError) as exc: ret['comment'] = exc.__str__() return ret # get cached file or copy it to cache cached_source_path = __salt__['cp.cache_file'](source, __env__) if not cached_source_path: ret['comment'] = ('Unable to cache {0} from saltenv \'{1}\'' .format(source, __env__)) return ret log.debug( 'State patch.applied cached source %s -> %s', source, cached_source_path ) if dry_run_first or __opts__['test']: ret['changes'] = __salt__['file.patch']( name, cached_source_path, options=options, dry_run=True ) if __opts__['test']: ret['comment'] = 'File {0} will be patched'.format(name) ret['result'] = None return ret if ret['changes']['retcode'] != 0: return ret ret['changes'] = __salt__['file.patch']( name, cached_source_path, options=options ) ret['result'] = ret['changes']['retcode'] == 0 # No need to check for SaltInvocationError or ValueError this time, since # these exceptions would have been caught above. if ret['result'] and hash_ and not __salt__['file.check_hash'](name, hash_): ret['result'] = False ret['comment'] = 'Hash mismatch after patch was applied' return ret def touch(name, atime=None, mtime=None, makedirs=False): ''' Replicate the 'nix "touch" command to create a new empty file or update the atime and mtime of an existing file. Note that if you just want to create a file and don't care about atime or mtime, you should use ``file.managed`` instead, as it is more feature-complete. (Just leave out the ``source``/``template``/``contents`` arguments, and it will just create the file and/or check its permissions, without messing with contents) name name of the file atime atime of the file mtime mtime of the file makedirs whether we should create the parent directory/directories in order to touch the file Usage: .. code-block:: yaml /var/log/httpd/logrotate.empty: file.touch .. versionadded:: 0.9.5 ''' name = os.path.expanduser(name) ret = { 'name': name, 'changes': {}, } if not name: return _error(ret, 'Must provide name to file.touch') if not os.path.isabs(name): return _error( ret, 'Specified file {0} is not an absolute path'.format(name) ) if __opts__['test']: ret['result'], ret['comment'] = _check_touch(name, atime, mtime) return ret if makedirs: __salt__['file.makedirs'](name) if not os.path.isdir(os.path.dirname(name)): return _error( ret, 'Directory not present to touch file {0}'.format(name) ) extant = os.path.exists(name) ret['result'] = __salt__['file.touch'](name, atime, mtime) if not extant and ret['result']: ret['comment'] = 'Created empty file {0}'.format(name) ret['changes']['new'] = name elif extant and ret['result']: ret['comment'] = 'Updated times on {0} {1}'.format( 'directory' if os.path.isdir(name) else 'file', name ) ret['changes']['touched'] = name return ret def copy( name, source, force=False, makedirs=False, preserve=False, user=None, group=None, mode=None, subdir=False, **kwargs): ''' If the source file exists on the system, copy it to the named file. The named file will not be overwritten if it already exists unless the force option is set to True. name The location of the file to copy to source The location of the file to copy to the location specified with name force If the target location is present then the file will not be moved, specify "force: True" to overwrite the target file makedirs If the target subdirectories don't exist create them preserve .. versionadded:: 2015.5.0 Set ``preserve: True`` to preserve user/group ownership and mode after copying. Default is ``False``. If ``preserve`` is set to ``True``, then user/group/mode attributes will be ignored. user .. versionadded:: 2015.5.0 The user to own the copied file, this defaults to the user salt is running as on the minion. If ``preserve`` is set to ``True``, then this will be ignored group .. versionadded:: 2015.5.0 The group to own the copied file, this defaults to the group salt is running as on the minion. If ``preserve`` is set to ``True`` or on Windows this will be ignored mode .. versionadded:: 2015.5.0 The permissions to set on the copied file, aka 644, '0775', '4664'. If ``preserve`` is set to ``True``, then this will be ignored. Not supported on Windows. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. subdir .. versionadded:: 2015.5.0 If the name is a directory then place the file inside the named directory .. note:: The copy function accepts paths that are local to the Salt minion. This function does not support salt://, http://, or the other additional file paths that are supported by :mod:`states.file.managed <salt.states.file.managed>` and :mod:`states.file.recurse <salt.states.file.recurse>`. ''' name = os.path.expanduser(name) source = os.path.expanduser(source) ret = { 'name': name, 'changes': {}, 'comment': 'Copied "{0}" to "{1}"'.format(source, name), 'result': True} if not name: return _error(ret, 'Must provide name to file.copy') changed = True if not os.path.isabs(name): return _error( ret, 'Specified file {0} is not an absolute path'.format(name)) if not os.path.exists(source): return _error(ret, 'Source file "{0}" is not present'.format(source)) if preserve: user = __salt__['file.get_user'](source) group = __salt__['file.get_group'](source) mode = __salt__['file.get_mode'](source) else: user = _test_owner(kwargs, user=user) if user is None: user = __opts__['user'] if salt.utils.is_windows(): if group is not None: log.warning( 'The group argument for {0} has been ignored as this is ' 'a Windows system.'.format(name) ) group = user if group is None: group = __salt__['file.gid_to_group']( __salt__['user.info'](user).get('gid', 0) ) u_check = _check_user(user, group) if u_check: # The specified user or group do not exist return _error(ret, u_check) if mode is None: mode = __salt__['file.get_mode'](source) if os.path.isdir(name) and subdir: # If the target is a dir, and overwrite_dir is False, copy into the dir name = os.path.join(name, os.path.basename(source)) if os.path.lexists(source) and os.path.lexists(name): # if this is a file which did not change, do not update if force and os.path.isfile(name): hash1 = salt.utils.get_hash(name) hash2 = salt.utils.get_hash(source) if hash1 == hash2: changed = True ret['comment'] = ' '.join([ret['comment'], '- files are identical but force flag is set']) if not force: changed = False elif not __opts__['test'] and changed: # Remove the destination to prevent problems later try: __salt__['file.remove'](name) except (IOError, OSError): return _error( ret, 'Failed to delete "{0}" in preparation for ' 'forced move'.format(name) ) if __opts__['test']: if changed: ret['comment'] = 'File "{0}" is set to be copied to "{1}"'.format( source, name ) ret['result'] = None else: ret['comment'] = ('The target file "{0}" exists and will not be ' 'overwritten'.format(name)) ret['result'] = True return ret if not changed: ret['comment'] = ('The target file "{0}" exists and will not be ' 'overwritten'.format(name)) ret['result'] = True return ret # Run makedirs dname = os.path.dirname(name) if not os.path.isdir(dname): if makedirs: __salt__['file.makedirs'](name) else: return _error( ret, 'The target directory {0} is not present'.format(dname)) # All tests pass, move the file into place try: if os.path.isdir(source): shutil.copytree(source, name, symlinks=True) for root, dirs, files in os.walk(name): for dir_ in dirs: __salt__['file.lchown'](os.path.join(root, dir_), user, group) for file_ in files: __salt__['file.lchown'](os.path.join(root, file_), user, group) else: shutil.copy(source, name) ret['changes'] = {name: source} # Preserve really means just keep the behavior of the cp command. If # the filesystem we're copying to is squashed or doesn't support chown # then we shouldn't be checking anything. if not preserve: __salt__['file.check_perms'](name, ret, user, group, mode) except (IOError, OSError): return _error( ret, 'Failed to copy "{0}" to "{1}"'.format(source, name)) return ret def rename(name, source, force=False, makedirs=False): ''' If the source file exists on the system, rename it to the named file. The named file will not be overwritten if it already exists unless the force option is set to True. name The location of the file to rename to source The location of the file to move to the location specified with name force If the target location is present then the file will not be moved, specify "force: True" to overwrite the target file makedirs If the target subdirectories don't exist create them ''' name = os.path.expanduser(name) source = os.path.expanduser(source) ret = { 'name': name, 'changes': {}, 'comment': '', 'result': True} if not name: return _error(ret, 'Must provide name to file.rename') if not os.path.isabs(name): return _error( ret, 'Specified file {0} is not an absolute path'.format(name)) if not os.path.lexists(source): ret['comment'] = ('Source file "{0}" has already been moved out of ' 'place').format(source) return ret if os.path.lexists(source) and os.path.lexists(name): if not force: ret['comment'] = ('The target file "{0}" exists and will not be ' 'overwritten'.format(name)) ret['result'] = False return ret elif not __opts__['test']: # Remove the destination to prevent problems later try: __salt__['file.remove'](name) except (IOError, OSError): return _error( ret, 'Failed to delete "{0}" in preparation for ' 'forced move'.format(name) ) if __opts__['test']: ret['comment'] = 'File "{0}" is set to be moved to "{1}"'.format( source, name ) ret['result'] = None return ret # Run makedirs dname = os.path.dirname(name) if not os.path.isdir(dname): if makedirs: __salt__['file.makedirs'](name) else: return _error( ret, 'The target directory {0} is not present'.format(dname)) # All tests pass, move the file into place try: if os.path.islink(source): linkto = os.readlink(source) os.symlink(linkto, name) os.unlink(source) else: shutil.move(source, name) except (IOError, OSError): return _error( ret, 'Failed to move "{0}" to "{1}"'.format(source, name)) ret['comment'] = 'Moved "{0}" to "{1}"'.format(source, name) ret['changes'] = {name: source} return ret def accumulated(name, filename, text, **kwargs): ''' Prepare accumulator which can be used in template in file.managed state. Accumulator dictionary becomes available in template. It can also be used in file.blockreplace. name Accumulator name filename Filename which would receive this accumulator (see file.managed state documentation about ``name``) text String or list for adding in accumulator require_in / watch_in One of them required for sure we fill up accumulator before we manage the file. Probably the same as filename Example: Given the following: .. code-block:: yaml animals_doing_things: file.accumulated: - filename: /tmp/animal_file.txt - text: ' jumps over the lazy dog.' - require_in: - file: animal_file animal_file: file.managed: - name: /tmp/animal_file.txt - source: salt://animal_file.txt - template: jinja One might write a template for ``animal_file.txt`` like the following: .. code-block:: jinja The quick brown fox{% for animal in accumulator['animals_doing_things'] %}{{ animal }}{% endfor %} Collectively, the above states and template file will produce: .. code-block:: text The quick brown fox jumps over the lazy dog. Multiple accumulators can be "chained" together. .. note:: The 'accumulator' data structure is a Python dictionary. Do not expect any loop over the keys in a deterministic order! ''' ret = { 'name': name, 'changes': {}, 'result': True, 'comment': '' } if not name: return _error(ret, 'Must provide name to file.accumulated') if text is None: ret['result'] = False ret['comment'] = 'No text supplied for accumulator' return ret require_in = __low__.get('require_in', []) watch_in = __low__.get('watch_in', []) deps = require_in + watch_in if not [x for x in deps if 'file' in x]: ret['result'] = False ret['comment'] = 'Orphaned accumulator {0} in {1}:{2}'.format( name, __low__['__sls__'], __low__['__id__'] ) return ret if isinstance(text, six.string_types): text = (text,) elif isinstance(text, dict): text = (text,) accum_data, accum_deps = _load_accumulators() if filename not in accum_data: accum_data[filename] = {} if filename not in accum_deps: accum_deps[filename] = {} if name not in accum_deps[filename]: accum_deps[filename][name] = [] for accumulator in deps: accum_deps[filename][name].extend(six.itervalues(accumulator)) if name not in accum_data[filename]: accum_data[filename][name] = [] for chunk in text: if chunk not in accum_data[filename][name]: accum_data[filename][name].append(chunk) ret['comment'] = ('Accumulator {0} for file {1} ' 'was charged by text'.format(name, filename)) _persist_accummulators(accum_data, accum_deps) return ret def serialize(name, dataset=None, dataset_pillar=None, user=None, group=None, mode=None, backup='', makedirs=False, show_diff=None, show_changes=True, create=True, merge_if_exists=False, **kwargs): ''' Serializes dataset and store it into managed file. Useful for sharing simple configuration files. name The location of the file to create dataset The dataset that will be serialized dataset_pillar Operates like ``dataset``, but draws from a value stored in pillar, using the pillar path syntax used in :mod:`pillar.get <salt.modules.pillar.get>`. This is useful when the pillar value contains newlines, as referencing a pillar variable using a jinja/mako template can result in YAML formatting issues due to the newlines causing indentation mismatches. .. versionadded:: 2015.8.0 formatter Write the data as this format. Supported output formats: * JSON * YAML * Python (via pprint.pformat) user The user to own the directory, this defaults to the user salt is running as on the minion group The group ownership set for the directory, this defaults to the group salt is running as on the minion mode The permissions to set on this file, e.g. ``644``, ``0775``, or ``4664``. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. .. note:: This option is **not** supported on Windows. backup Overrides the default backup mode for this specific file. makedirs Create parent directories for destination file. .. versionadded:: 2014.1.3 show_diff DEPRECATED: Please use show_changes. If set to ``False``, the diff will not be shown in the return data if changes are made. show_changes Output a unified diff of the old file and the new file. If ``False`` return a boolean if any changes were made. create Default is True, if create is set to False then the file will only be managed if the file already exists on the system. merge_if_exists Default is False, if merge_if_exists is True then the existing file will be parsed and the dataset passed in will be merged with the existing content .. versionadded:: 2014.7.0 For example, this state: .. code-block:: yaml /etc/dummy/package.json: file.serialize: - dataset: name: naive description: A package using naive versioning author: A confused individual <iam@confused.com> dependencies: express: >= 1.2.0 optimist: >= 0.1.0 engine: node 0.4.1 - formatter: json will manage the file ``/etc/dummy/package.json``: .. code-block:: json { "author": "A confused individual <iam@confused.com>", "dependencies": { "express": ">= 1.2.0", "optimist": ">= 0.1.0" }, "description": "A package using naive versioning", "engine": "node 0.4.1", "name": "naive" } ''' if 'env' in kwargs: salt.utils.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' ) kwargs.pop('env') name = os.path.expanduser(name) default_serializer_opts = {'yaml.serialize': {'default_flow_style': False}, 'json.serialize': {'indent': 2, 'separators': (',', ': '), 'sort_keys': True} } ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if not name: return _error(ret, 'Must provide name to file.serialize') if not create: if not os.path.isfile(name): # Don't create a file that is not already present ret['comment'] = ('File {0} is not present and is not set for ' 'creation').format(name) return ret formatter = kwargs.pop('formatter', 'yaml').lower() if len([x for x in (dataset, dataset_pillar) if x]) > 1: return _error( ret, 'Only one of \'dataset\' and \'dataset_pillar\' is permitted') if dataset_pillar: dataset = __salt__['pillar.get'](dataset_pillar) if dataset is None: return _error( ret, 'Neither \'dataset\' nor \'dataset_pillar\' was defined') if salt.utils.is_windows(): if group is not None: log.warning( 'The group argument for {0} has been ignored as this ' 'is a Windows system.'.format(name) ) group = user serializer_name = '{0}.serialize'.format(formatter) deserializer_name = '{0}.deserialize'.format(formatter) if serializer_name not in __serializers__: return {'changes': {}, 'comment': '{0} format is not supported'.format( formatter.capitalize()), 'name': name, 'result': False } if merge_if_exists: if os.path.isfile(name): if '{0}.deserialize'.format(formatter) not in __serializers__: return {'changes': {}, 'comment': ('{0} format is not supported for merging' .format(formatter.capitalize())), 'name': name, 'result': False} with salt.utils.fopen(name, 'r') as fhr: existing_data = __serializers__[deserializer_name](fhr) if existing_data is not None: merged_data = salt.utils.dictupdate.merge_recurse(existing_data, dataset) if existing_data == merged_data: ret['result'] = True ret['comment'] = 'The file {0} is in the correct state'.format(name) return ret dataset = merged_data contents = __serializers__[serializer_name](dataset, **default_serializer_opts.get(serializer_name, {})) contents += '\n' # Make sure that any leading zeros stripped by YAML loader are added back mode = salt.utils.normalize_mode(mode) if show_diff is not None: show_changes = show_diff msg = ( 'The \'show_diff\' argument to the file.serialized state has been ' 'deprecated, please use \'show_changes\' instead.' ) salt.utils.warn_until('Oxygen', msg) if __opts__['test']: ret['changes'] = __salt__['file.check_managed_changes']( name=name, source=None, source_hash={}, source_hash_name=None, user=user, group=group, mode=mode, template=None, context=None, defaults=None, saltenv=__env__, contents=contents, skip_verify=False, **kwargs ) if ret['changes']: ret['result'] = None ret['comment'] = 'Dataset will be serialized and stored into {0}'.format( name) if not show_changes: ret['changes']['diff'] = '<show_changes=False>' else: ret['result'] = True ret['comment'] = 'The file {0} is in the correct state'.format(name) return ret return __salt__['file.manage_file'](name=name, sfn='', ret=ret, source=None, source_sum={}, user=user, group=group, mode=mode, saltenv=__env__, backup=backup, makedirs=makedirs, template=None, show_changes=show_changes, contents=contents) def mknod(name, ntype, major=0, minor=0, user=None, group=None, mode='0600'): ''' Create a special file similar to the 'nix mknod command. The supported device types are ``p`` (fifo pipe), ``c`` (character device), and ``b`` (block device). Provide the major and minor numbers when specifying a character device or block device. A fifo pipe does not require this information. The command will create the necessary dirs if needed. If a file of the same name not of the same type/major/minor exists, it will not be overwritten or unlinked (deleted). This is logically in place as a safety measure because you can really shoot yourself in the foot here and it is the behavior of 'nix ``mknod``. It is also important to note that not just anyone can create special devices. Usually this is only done as root. If the state is executed as none other than root on a minion, you may receive a permission error. name name of the file ntype node type 'p' (fifo pipe), 'c' (character device), or 'b' (block device) major major number of the device does not apply to a fifo pipe minor minor number of the device does not apply to a fifo pipe user owning user of the device/pipe group owning group of the device/pipe mode permissions on the device/pipe Usage: .. code-block:: yaml /dev/chr: file.mknod: - ntype: c - major: 180 - minor: 31 - user: root - group: root - mode: 660 /dev/blk: file.mknod: - ntype: b - major: 8 - minor: 999 - user: root - group: root - mode: 660 /dev/fifo: file.mknod: - ntype: p - user: root - group: root - mode: 660 .. versionadded:: 0.17.0 ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} if not name: return _error(ret, 'Must provide name to file.mknod') if ntype == 'c': # Check for file existence if __salt__['file.file_exists'](name): ret['comment'] = ( 'File exists and is not a character device {0}. Cowardly ' 'refusing to continue'.format(name) ) # Check if it is a character device elif not __salt__['file.is_chrdev'](name): if __opts__['test']: ret['comment'] = ( 'Character device {0} is set to be created' ).format(name) ret['result'] = None else: ret = __salt__['file.mknod'](name, ntype, major, minor, user, group, mode) # Check the major/minor else: devmaj, devmin = __salt__['file.get_devmm'](name) if (major, minor) != (devmaj, devmin): ret['comment'] = ( 'Character device {0} exists and has a different ' 'major/minor {1}/{2}. Cowardly refusing to continue' .format(name, devmaj, devmin) ) # Check the perms else: ret = __salt__['file.check_perms'](name, None, user, group, mode)[0] if not ret['changes']: ret['comment'] = ( 'Character device {0} is in the correct state'.format( name ) ) elif ntype == 'b': # Check for file existence if __salt__['file.file_exists'](name): ret['comment'] = ( 'File exists and is not a block device {0}. Cowardly ' 'refusing to continue'.format(name) ) # Check if it is a block device elif not __salt__['file.is_blkdev'](name): if __opts__['test']: ret['comment'] = ( 'Block device {0} is set to be created' ).format(name) ret['result'] = None else: ret = __salt__['file.mknod'](name, ntype, major, minor, user, group, mode) # Check the major/minor else: devmaj, devmin = __salt__['file.get_devmm'](name) if (major, minor) != (devmaj, devmin): ret['comment'] = ( 'Block device {0} exists and has a different major/minor ' '{1}/{2}. Cowardly refusing to continue'.format( name, devmaj, devmin ) ) # Check the perms else: ret = __salt__['file.check_perms'](name, None, user, group, mode)[0] if not ret['changes']: ret['comment'] = ( 'Block device {0} is in the correct state'.format(name) ) elif ntype == 'p': # Check for file existence if __salt__['file.file_exists'](name): ret['comment'] = ( 'File exists and is not a fifo pipe {0}. Cowardly refusing ' 'to continue'.format(name) ) # Check if it is a fifo elif not __salt__['file.is_fifo'](name): if __opts__['test']: ret['comment'] = 'Fifo pipe {0} is set to be created'.format( name ) ret['result'] = None else: ret = __salt__['file.mknod'](name, ntype, major, minor, user, group, mode) # Check the perms else: ret = __salt__['file.check_perms'](name, None, user, group, mode)[0] if not ret['changes']: ret['comment'] = ( 'Fifo pipe {0} is in the correct state'.format(name) ) else: ret['comment'] = ( 'Node type unavailable: \'{0}\'. Available node types are ' 'character (\'c\'), block (\'b\'), and pipe (\'p\')'.format(ntype) ) return ret def mod_run_check_cmd(cmd, filename, **check_cmd_opts): ''' Execute the check_cmd logic. Return a result dict if ``check_cmd`` succeeds (check_cmd == 0) otherwise return True ''' log.debug('running our check_cmd') _cmd = '{0} {1}'.format(cmd, filename) cret = __salt__['cmd.run_all'](_cmd, **check_cmd_opts) if cret['retcode'] != 0: ret = {'comment': 'check_cmd execution failed', 'skip_watch': True, 'result': False} if cret.get('stdout'): ret['comment'] += '\n' + cret['stdout'] if cret.get('stderr'): ret['comment'] += '\n' + cret['stderr'] return ret # No reason to stop, return True return True def decode(name, encoded_data=None, contents_pillar=None, encoding_type='base64', checksum='md5'): ''' Decode an encoded file and write it to disk .. versionadded:: 2016.3.0 name Path of the file to be written. encoded_data The encoded file. Either this option or ``contents_pillar`` must be specified. contents_pillar A Pillar path to the encoded file. Uses the same path syntax as :py:func:`pillar.get <salt.modules.pillar.get>`. The :py:func:`hashutil.base64_encodefile <salt.modules.hashutil.base64_encodefile>` function can load encoded content into Pillar. Either this option or ``encoded_data`` must be specified. encoding_type : ``base64`` The type of encoding. checksum : ``md5`` The hashing algorithm to use to generate checksums. Wraps the :py:func:`hashutil.digest <salt.modules.hashutil.digest>` execution function. Usage: .. code-block:: yaml write_base64_encoded_string_to_a_file: file.decode: - name: /tmp/new_file - encoding_type: base64 - contents_pillar: mypillar:thefile # or write_base64_encoded_string_to_a_file: file.decode: - name: /tmp/new_file - encoding_type: base64 - encoded_data: | Z2V0IHNhbHRlZAo= Be careful with multi-line strings that the YAML indentation is correct. E.g., .. code-block:: yaml write_base64_encoded_string_to_a_file: file.decode: - name: /tmp/new_file - encoding_type: base64 - encoded_data: | {{ salt.pillar.get('path:to:data') | indent(8) }} ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if not (encoded_data or contents_pillar): raise CommandExecutionError("Specify either the 'encoded_data' or " "'contents_pillar' argument.") elif encoded_data and contents_pillar: raise CommandExecutionError("Specify only one 'encoded_data' or " "'contents_pillar' argument.") elif encoded_data: content = encoded_data elif contents_pillar: content = __salt__['pillar.get'](contents_pillar, False) if content is False: raise CommandExecutionError('Pillar data not found.') else: raise CommandExecutionError('No contents given.') dest_exists = __salt__['file.file_exists'](name) if dest_exists: instr = __salt__['hashutil.base64_decodestring'](content) insum = __salt__['hashutil.digest'](instr, checksum) del instr # no need to keep in-memory after we have the hash outsum = __salt__['hashutil.digest_file'](name, checksum) if insum != outsum: ret['changes'] = { 'old': outsum, 'new': insum, } if not ret['changes']: ret['comment'] = 'File is in the correct state.' ret['result'] = True return ret if __opts__['test'] is True: ret['comment'] = 'File is set to be updated.' ret['result'] = None return ret ret['result'] = __salt__['hashutil.base64_decodefile'](content, name) ret['comment'] = 'File was updated.' if not ret['changes']: ret['changes'] = { 'old': None, 'new': __salt__['hashutil.digest_file'](name, checksum), } return ret
./CrossVul/dataset_final_sorted/CWE-200/py/good_3325_3
crossvul-python_data_good_5206_0
# Copyright (C) 2016 JWCrypto Project Contributors - see LICENSE file import abc import os import struct from binascii import hexlify, unhexlify from cryptography.exceptions import InvalidSignature from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import constant_time, hashes, hmac from cryptography.hazmat.primitives.asymmetric import ec from cryptography.hazmat.primitives.asymmetric import padding from cryptography.hazmat.primitives.asymmetric import utils as ec_utils from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes from cryptography.hazmat.primitives.kdf.concatkdf import ConcatKDFHash from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC from cryptography.hazmat.primitives.padding import PKCS7 import six from jwcrypto.common import InvalidCEKeyLength from jwcrypto.common import InvalidJWAAlgorithm from jwcrypto.common import InvalidJWEKeyLength from jwcrypto.common import InvalidJWEKeyType from jwcrypto.common import InvalidJWEOperation from jwcrypto.common import base64url_decode, base64url_encode from jwcrypto.common import json_decode from jwcrypto.jwk import JWK # Implements RFC 7518 - JSON Web Algorithms (JWA) @six.add_metaclass(abc.ABCMeta) class JWAAlgorithm(object): @abc.abstractproperty def name(self): """The algorithm Name""" pass @abc.abstractproperty def description(self): """A short description""" pass @abc.abstractproperty def keysize(self): """The actual/recommended/minimum key size""" pass @abc.abstractproperty def algorithm_usage_location(self): """One of 'alg', 'enc' or 'JWK'""" pass @abc.abstractproperty def algorithm_use(self): """One of 'sig', 'kex', 'enc'""" pass def _bitsize(x): return len(x) * 8 def _inbytes(x): return x // 8 def _randombits(x): if x % 8 != 0: raise ValueError("lenght must be a multiple of 8") return os.urandom(_inbytes(x)) # Note: the number of bits should be a multiple of 16 def _encode_int(n, bits): e = '{:x}'.format(n) ilen = ((bits + 7) // 8) * 2 # number of bytes rounded up times 2 bytes return unhexlify(e.rjust(ilen, '0')[:ilen]) def _decode_int(n): return int(hexlify(n), 16) class _RawJWS(object): def sign(self, key, payload): raise NotImplementedError def verify(self, key, payload, signature): raise NotImplementedError class _RawHMAC(_RawJWS): def __init__(self, hashfn): self.backend = default_backend() self.hashfn = hashfn def _hmac_setup(self, key, payload): h = hmac.HMAC(key, self.hashfn, backend=self.backend) h.update(payload) return h def sign(self, key, payload): skey = base64url_decode(key.get_op_key('sign')) h = self._hmac_setup(skey, payload) return h.finalize() def verify(self, key, payload, signature): vkey = base64url_decode(key.get_op_key('verify')) h = self._hmac_setup(vkey, payload) h.verify(signature) class _RawRSA(_RawJWS): def __init__(self, padfn, hashfn): self.padfn = padfn self.hashfn = hashfn def sign(self, key, payload): skey = key.get_op_key('sign') signer = skey.signer(self.padfn, self.hashfn) signer.update(payload) return signer.finalize() def verify(self, key, payload, signature): pkey = key.get_op_key('verify') verifier = pkey.verifier(signature, self.padfn, self.hashfn) verifier.update(payload) verifier.verify() class _RawEC(_RawJWS): def __init__(self, curve, hashfn): self._curve = curve self.hashfn = hashfn @property def curve(self): return self._curve def sign(self, key, payload): skey = key.get_op_key('sign', self._curve) signer = skey.signer(ec.ECDSA(self.hashfn)) signer.update(payload) signature = signer.finalize() r, s = ec_utils.decode_rfc6979_signature(signature) l = key.get_curve(self._curve).key_size return _encode_int(r, l) + _encode_int(s, l) def verify(self, key, payload, signature): pkey = key.get_op_key('verify', self._curve) r = signature[:len(signature) // 2] s = signature[len(signature) // 2:] enc_signature = ec_utils.encode_rfc6979_signature( int(hexlify(r), 16), int(hexlify(s), 16)) verifier = pkey.verifier(enc_signature, ec.ECDSA(self.hashfn)) verifier.update(payload) verifier.verify() class _RawNone(_RawJWS): def sign(self, key, payload): return '' def verify(self, key, payload, signature): raise InvalidSignature('The "none" signature cannot be verified') class _HS256(_RawHMAC, JWAAlgorithm): name = "HS256" description = "HMAC using SHA-256" keysize = 256 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): super(_HS256, self).__init__(hashes.SHA256()) class _HS384(_RawHMAC, JWAAlgorithm): name = "HS384" description = "HMAC using SHA-384" keysize = 384 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): super(_HS384, self).__init__(hashes.SHA384()) class _HS512(_RawHMAC, JWAAlgorithm): name = "HS512" description = "HMAC using SHA-512" keysize = 512 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): super(_HS512, self).__init__(hashes.SHA512()) class _RS256(_RawRSA, JWAAlgorithm): name = "RS256" description = "RSASSA-PKCS1-v1_5 using SHA-256" keysize = 2048 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): super(_RS256, self).__init__(padding.PKCS1v15(), hashes.SHA256()) class _RS384(_RawRSA, JWAAlgorithm): name = "RS384" description = "RSASSA-PKCS1-v1_5 using SHA-384" keysize = 2048 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): super(_RS384, self).__init__(padding.PKCS1v15(), hashes.SHA384()) class _RS512(_RawRSA, JWAAlgorithm): name = "RS512" description = "RSASSA-PKCS1-v1_5 using SHA-512" keysize = 2048 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): super(_RS512, self).__init__(padding.PKCS1v15(), hashes.SHA512()) class _ES256(_RawEC, JWAAlgorithm): name = "ES256" description = "ECDSA using P-256 and SHA-256" keysize = 256 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): super(_ES256, self).__init__('P-256', hashes.SHA256()) class _ES384(_RawEC, JWAAlgorithm): name = "ES384" description = "ECDSA using P-384 and SHA-384" keysize = 384 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): super(_ES384, self).__init__('P-384', hashes.SHA384()) class _ES512(_RawEC, JWAAlgorithm): name = "ES512" description = "ECDSA using P-521 and SHA-512" keysize = 512 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): super(_ES512, self).__init__('P-521', hashes.SHA512()) class _PS256(_RawRSA, JWAAlgorithm): name = "PS256" description = "RSASSA-PSS using SHA-256 and MGF1 with SHA-256" keysize = 2048 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): padfn = padding.PSS(padding.MGF1(hashes.SHA256()), hashes.SHA256.digest_size) super(_PS256, self).__init__(padfn, hashes.SHA256()) class _PS384(_RawRSA, JWAAlgorithm): name = "PS384" description = "RSASSA-PSS using SHA-384 and MGF1 with SHA-384" keysize = 2048 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): padfn = padding.PSS(padding.MGF1(hashes.SHA384()), hashes.SHA384.digest_size) super(_PS384, self).__init__(padfn, hashes.SHA384()) class _PS512(_RawRSA, JWAAlgorithm): name = "PS512" description = "RSASSA-PSS using SHA-512 and MGF1 with SHA-512" keysize = 2048 algorithm_usage_location = 'alg' algorithm_use = 'sig' def __init__(self): padfn = padding.PSS(padding.MGF1(hashes.SHA512()), hashes.SHA512.digest_size) super(_PS512, self).__init__(padfn, hashes.SHA512()) class _None(_RawNone, JWAAlgorithm): name = "none" description = "No digital signature or MAC performed" keysize = 0 algorithm_usage_location = 'alg' algorithm_use = 'sig' class _RawKeyMgmt(object): def wrap(self, key, bitsize, cek, headers): raise NotImplementedError def unwrap(self, key, bitsize, ek, headers): raise NotImplementedError class _RSA(_RawKeyMgmt): def __init__(self, padfn): self.padfn = padfn def _check_key(self, key): if not isinstance(key, JWK): raise ValueError('key is not a JWK object') if key.key_type != 'RSA': raise InvalidJWEKeyType('RSA', key.key_type) # FIXME: get key size and insure > 2048 bits def wrap(self, key, bitsize, cek, headers): self._check_key(key) if not cek: cek = _randombits(bitsize) rk = key.get_op_key('wrapKey') ek = rk.encrypt(cek, self.padfn) return {'cek': cek, 'ek': ek} def unwrap(self, key, bitsize, ek, headers): self._check_key(key) rk = key.get_op_key('decrypt') cek = rk.decrypt(ek, self.padfn) if _bitsize(cek) != bitsize: raise InvalidJWEKeyLength(bitsize, _bitsize(cek)) return cek class _Rsa15(_RSA, JWAAlgorithm): name = 'RSA1_5' description = "RSAES-PKCS1-v1_5" keysize = 2048 algorithm_usage_location = 'alg' algorithm_use = 'kex' def __init__(self): super(_Rsa15, self).__init__(padding.PKCS1v15()) def unwrap(self, key, bitsize, ek, headers): self._check_key(key) # Address MMA attack by implementing RFC 3218 - 2.3.2. Random Filling # provides a random cek that will cause the decryption engine to # run to the end, but will fail decryption later. # always generate a random cek so we spend roughly the # same time as in the exception side of the branch cek = _randombits(bitsize) try: cek = super(_Rsa15, self).unwrap(key, bitsize, ek, headers) # always raise so we always run through the exception handling # code in all cases raise Exception('Dummy') except Exception: # pylint: disable=broad-except return cek class _RsaOaep(_RSA, JWAAlgorithm): name = 'RSA-OAEP' description = "RSAES OAEP using default parameters" keysize = 2048 algorithm_usage_location = 'alg' algorithm_use = 'kex' def __init__(self): super(_RsaOaep, self).__init__( padding.OAEP(padding.MGF1(hashes.SHA1()), hashes.SHA1(), None)) class _RsaOaep256(_RSA, JWAAlgorithm): # noqa: ignore=N801 name = 'RSA-OAEP-256' description = "RSAES OAEP using SHA-256 and MGF1 with SHA-256" keysize = 2048 algorithm_usage_location = 'alg' algorithm_use = 'kex' def __init__(self): super(_RsaOaep256, self).__init__( padding.OAEP(padding.MGF1(hashes.SHA256()), hashes.SHA256(), None)) class _AesKw(_RawKeyMgmt): keysize = None def __init__(self): self.backend = default_backend() def _get_key(self, key, op): if not isinstance(key, JWK): raise ValueError('key is not a JWK object') if key.key_type != 'oct': raise InvalidJWEKeyType('oct', key.key_type) rk = base64url_decode(key.get_op_key(op)) if _bitsize(rk) != self.keysize: raise InvalidJWEKeyLength(self.keysize, _bitsize(rk)) return rk def wrap(self, key, bitsize, cek, headers): rk = self._get_key(key, 'encrypt') if not cek: cek = _randombits(bitsize) # Implement RFC 3394 Key Unwrap - 2.2.2 # TODO: Use cryptography once issue #1733 is resolved iv = 'a6a6a6a6a6a6a6a6' a = unhexlify(iv) r = [cek[i:i + 8] for i in range(0, len(cek), 8)] n = len(r) for j in range(0, 6): for i in range(0, n): e = Cipher(algorithms.AES(rk), modes.ECB(), backend=self.backend).encryptor() b = e.update(a + r[i]) + e.finalize() a = _encode_int(_decode_int(b[:8]) ^ ((n * j) + i + 1), 64) r[i] = b[-8:] ek = a for i in range(0, n): ek += r[i] return {'cek': cek, 'ek': ek} def unwrap(self, key, bitsize, ek, headers): rk = self._get_key(key, 'decrypt') # Implement RFC 3394 Key Unwrap - 2.2.3 # TODO: Use cryptography once issue #1733 is resolved iv = 'a6a6a6a6a6a6a6a6' aiv = unhexlify(iv) r = [ek[i:i + 8] for i in range(0, len(ek), 8)] a = r.pop(0) n = len(r) for j in range(5, -1, -1): for i in range(n - 1, -1, -1): da = _decode_int(a) atr = _encode_int((da ^ ((n * j) + i + 1)), 64) + r[i] d = Cipher(algorithms.AES(rk), modes.ECB(), backend=self.backend).decryptor() b = d.update(atr) + d.finalize() a = b[:8] r[i] = b[-8:] if a != aiv: raise RuntimeError('Decryption Failed') cek = b''.join(r) if _bitsize(cek) != bitsize: raise InvalidJWEKeyLength(bitsize, _bitsize(cek)) return cek class _A128KW(_AesKw, JWAAlgorithm): name = 'A128KW' description = "AES Key Wrap using 128-bit key" keysize = 128 algorithm_usage_location = 'alg' algorithm_use = 'kex' class _A192KW(_AesKw, JWAAlgorithm): name = 'A192KW' description = "AES Key Wrap using 192-bit key" keysize = 192 algorithm_usage_location = 'alg' algorithm_use = 'kex' class _A256KW(_AesKw, JWAAlgorithm): name = 'A256KW' description = "AES Key Wrap using 256-bit key" keysize = 256 algorithm_usage_location = 'alg' algorithm_use = 'kex' class _AesGcmKw(_RawKeyMgmt): keysize = None def __init__(self): self.backend = default_backend() def _get_key(self, key, op): if not isinstance(key, JWK): raise ValueError('key is not a JWK object') if key.key_type != 'oct': raise InvalidJWEKeyType('oct', key.key_type) rk = base64url_decode(key.get_op_key(op)) if _bitsize(rk) != self.keysize: raise InvalidJWEKeyLength(self.keysize, _bitsize(rk)) return rk def wrap(self, key, bitsize, cek, headers): rk = self._get_key(key, 'encrypt') if not cek: cek = _randombits(bitsize) iv = _randombits(96) cipher = Cipher(algorithms.AES(rk), modes.GCM(iv), backend=self.backend) encryptor = cipher.encryptor() ek = encryptor.update(cek) + encryptor.finalize() tag = encryptor.tag return {'cek': cek, 'ek': ek, 'header': {'iv': base64url_encode(iv), 'tag': base64url_encode(tag)}} def unwrap(self, key, bitsize, ek, headers): rk = self._get_key(key, 'decrypt') if 'iv' not in headers: raise ValueError('Invalid Header, missing "iv" parameter') iv = base64url_decode(headers['iv']) if 'tag' not in headers: raise ValueError('Invalid Header, missing "tag" parameter') tag = base64url_decode(headers['tag']) cipher = Cipher(algorithms.AES(rk), modes.GCM(iv, tag), backend=self.backend) decryptor = cipher.decryptor() cek = decryptor.update(ek) + decryptor.finalize() if _bitsize(cek) != bitsize: raise InvalidJWEKeyLength(bitsize, _bitsize(cek)) return cek class _A128GcmKw(_AesGcmKw, JWAAlgorithm): name = 'A128GCMKW' description = "Key wrapping with AES GCM using 128-bit key" keysize = 128 algorithm_usage_location = 'alg' algorithm_use = 'kex' class _A192GcmKw(_AesGcmKw, JWAAlgorithm): name = 'A192GCMKW' description = "Key wrapping with AES GCM using 192-bit key" keysize = 192 algorithm_usage_location = 'alg' algorithm_use = 'kex' class _A256GcmKw(_AesGcmKw, JWAAlgorithm): name = 'A256GCMKW' description = "Key wrapping with AES GCM using 256-bit key" keysize = 256 algorithm_usage_location = 'alg' algorithm_use = 'kex' class _Pbes2HsAesKw(_RawKeyMgmt): name = None keysize = None hashsize = None def __init__(self): self.backend = default_backend() self.aeskwmap = {128: _A128KW, 192: _A192KW, 256: _A256KW} def _get_key(self, alg, key, p2s, p2c): if isinstance(key, bytes): plain = key else: plain = key.encode('utf8') salt = bytes(self.name.encode('utf8')) + b'\x00' + p2s if self.hashsize == 256: hashalg = hashes.SHA256() elif self.hashsize == 384: hashalg = hashes.SHA384() elif self.hashsize == 512: hashalg = hashes.SHA512() else: raise ValueError('Unknown Hash Size') kdf = PBKDF2HMAC(algorithm=hashalg, length=_inbytes(self.keysize), salt=salt, iterations=p2c, backend=self.backend) rk = kdf.derive(plain) if _bitsize(rk) != self.keysize: raise InvalidJWEKeyLength(self.keysize, len(rk)) return JWK(kty="oct", use="enc", k=base64url_encode(rk)) def wrap(self, key, bitsize, cek, headers): p2s = _randombits(128) p2c = 8192 kek = self._get_key(headers['alg'], key, p2s, p2c) aeskw = self.aeskwmap[self.keysize]() ret = aeskw.wrap(kek, bitsize, cek, headers) ret['header'] = {'p2s': base64url_encode(p2s), 'p2c': p2c} return ret def unwrap(self, key, bitsize, ek, headers): if 'p2s' not in headers: raise ValueError('Invalid Header, missing "p2s" parameter') if 'p2c' not in headers: raise ValueError('Invalid Header, missing "p2c" parameter') p2s = base64url_decode(headers['p2s']) p2c = headers['p2c'] kek = self._get_key(headers['alg'], key, p2s, p2c) aeskw = self.aeskwmap[self.keysize]() return aeskw.unwrap(kek, bitsize, ek, headers) class _Pbes2Hs256A128Kw(_Pbes2HsAesKw, JWAAlgorithm): name = 'PBES2-HS256+A128KW' description = 'PBES2 with HMAC SHA-256 and "A128KW" wrapping' keysize = 128 algorithm_usage_location = 'alg' algorithm_use = 'kex' hashsize = 256 class _Pbes2Hs384A192Kw(_Pbes2HsAesKw, JWAAlgorithm): name = 'PBES2-HS384+A192KW' description = 'PBES2 with HMAC SHA-384 and "A192KW" wrapping' keysize = 192 algorithm_usage_location = 'alg' algorithm_use = 'kex' hashsize = 384 class _Pbes2Hs512A256Kw(_Pbes2HsAesKw, JWAAlgorithm): name = 'PBES2-HS512+A256KW' description = 'PBES2 with HMAC SHA-512 and "A256KW" wrapping' keysize = 256 algorithm_usage_location = 'alg' algorithm_use = 'kex' hashsize = 512 class _Direct(_RawKeyMgmt, JWAAlgorithm): name = 'dir' description = "Direct use of a shared symmetric key" keysize = 128 algorithm_usage_location = 'alg' algorithm_use = 'kex' def _check_key(self, key): if not isinstance(key, JWK): raise ValueError('key is not a JWK object') if key.key_type != 'oct': raise InvalidJWEKeyType('oct', key.key_type) def wrap(self, key, bitsize, cek, headers): self._check_key(key) if cek: return (cek, None) k = base64url_decode(key.get_op_key('encrypt')) if _bitsize(k) != bitsize: raise InvalidCEKeyLength(bitsize, _bitsize(k)) return {'cek': k} def unwrap(self, key, bitsize, ek, headers): self._check_key(key) if ek != b'': raise ValueError('Invalid Encryption Key.') cek = base64url_decode(key.get_op_key('decrypt')) if _bitsize(cek) != bitsize: raise InvalidJWEKeyLength(bitsize, _bitsize(cek)) return cek class _EcdhEs(_RawKeyMgmt): name = 'ECDH-ES' description = "ECDH-ES using Concat KDF" algorithm_usage_location = 'alg' algorithm_use = 'kex' keysize = None def __init__(self): self.backend = default_backend() self.aeskwmap = {128: _A128KW, 192: _A192KW, 256: _A256KW} def _check_key(self, key): if not isinstance(key, JWK): raise ValueError('key is not a JWK object') if key.key_type != 'EC': raise InvalidJWEKeyType('EC', key.key_type) def _derive(self, privkey, pubkey, alg, bitsize, headers): # OtherInfo is defined in NIST SP 56A 5.8.1.2.1 # AlgorithmID otherinfo = struct.pack('>I', len(alg)) otherinfo += bytes(alg.encode('utf8')) # PartyUInfo apu = base64url_decode(headers['apu']) if 'apu' in headers else b'' otherinfo += struct.pack('>I', len(apu)) otherinfo += apu # PartyVInfo apv = base64url_decode(headers['apv']) if 'apv' in headers else b'' otherinfo += struct.pack('>I', len(apv)) otherinfo += apv # SuppPubInfo otherinfo += struct.pack('>I', bitsize) # no SuppPrivInfo shared_key = privkey.exchange(ec.ECDH(), pubkey) ckdf = ConcatKDFHash(algorithm=hashes.SHA256(), length=_inbytes(bitsize), otherinfo=otherinfo, backend=self.backend) return ckdf.derive(shared_key) def wrap(self, key, bitsize, cek, headers): self._check_key(key) if self.keysize is None: if cek is not None: raise InvalidJWEOperation('ECDH-ES cannot use an existing CEK') alg = headers['enc'] else: bitsize = self.keysize alg = headers['alg'] epk = JWK.generate(kty=key.key_type, crv=key.key_curve) dk = self._derive(epk.get_op_key('unwrapKey'), key.get_op_key('wrapKey'), alg, bitsize, headers) if self.keysize is None: ret = {'cek': dk} else: aeskw = self.aeskwmap[bitsize]() kek = JWK(kty="oct", use="enc", k=base64url_encode(dk)) ret = aeskw.wrap(kek, bitsize, cek, headers) ret['header'] = {'epk': json_decode(epk.export_public())} return ret def unwrap(self, key, bitsize, ek, headers): if 'epk' not in headers: raise ValueError('Invalid Header, missing "epk" parameter') self._check_key(key) if self.keysize is None: alg = headers['enc'] else: bitsize = self.keysize alg = headers['alg'] epk = JWK(**headers['epk']) dk = self._derive(key.get_op_key('unwrapKey'), epk.get_op_key('wrapKey'), alg, bitsize, headers) if self.keysize is None: return dk else: aeskw = self.aeskwmap[bitsize]() kek = JWK(kty="oct", use="enc", k=base64url_encode(dk)) cek = aeskw.unwrap(kek, bitsize, ek, headers) return cek class _EcdhEsAes128Kw(_EcdhEs, JWAAlgorithm): name = 'ECDH-ES+A128KW' description = 'ECDH-ES using Concat KDF and "A128KW" wrapping' keysize = 128 algorithm_usage_location = 'alg' algorithm_use = 'kex' class _EcdhEsAes192Kw(_EcdhEs, JWAAlgorithm): name = 'ECDH-ES+A192KW' description = 'ECDH-ES using Concat KDF and "A192KW" wrapping' keysize = 192 algorithm_usage_location = 'alg' algorithm_use = 'kex' class _EcdhEsAes256Kw(_EcdhEs, JWAAlgorithm): name = 'ECDH-ES+A256KW' description = 'ECDH-ES using Concat KDF and "A128KW" wrapping' keysize = 256 algorithm_usage_location = 'alg' algorithm_use = 'kex' class _RawJWE(object): def encrypt(self, k, a, m): raise NotImplementedError def decrypt(self, k, a, iv, e, t): raise NotImplementedError class _AesCbcHmacSha2(_RawJWE): keysize = None def __init__(self, hashfn): self.backend = default_backend() self.hashfn = hashfn self.blocksize = algorithms.AES.block_size self.wrap_key_size = self.keysize * 2 def _mac(self, k, a, iv, e): al = _encode_int(_bitsize(a), 64) h = hmac.HMAC(k, self.hashfn, backend=self.backend) h.update(a) h.update(iv) h.update(e) h.update(al) m = h.finalize() return m[:_inbytes(self.keysize)] # RFC 7518 - 5.2.2 def encrypt(self, k, a, m): """ Encrypt according to the selected encryption and hashing functions. :param k: Encryption key (optional) :param a: Additional Authentication Data :param m: Plaintext Returns a dictionary with the computed data. """ hkey = k[:_inbytes(self.keysize)] ekey = k[_inbytes(self.keysize):] # encrypt iv = _randombits(self.blocksize) cipher = Cipher(algorithms.AES(ekey), modes.CBC(iv), backend=self.backend) encryptor = cipher.encryptor() padder = PKCS7(self.blocksize).padder() padded_data = padder.update(m) + padder.finalize() e = encryptor.update(padded_data) + encryptor.finalize() # mac t = self._mac(hkey, a, iv, e) return (iv, e, t) def decrypt(self, k, a, iv, e, t): """ Decrypt according to the selected encryption and hashing functions. :param k: Encryption key (optional) :param a: Additional Authenticated Data :param iv: Initialization Vector :param e: Ciphertext :param t: Authentication Tag Returns plaintext or raises an error """ hkey = k[:_inbytes(self.keysize)] dkey = k[_inbytes(self.keysize):] # verify mac if not constant_time.bytes_eq(t, self._mac(hkey, a, iv, e)): raise InvalidSignature('Failed to verify MAC') # decrypt cipher = Cipher(algorithms.AES(dkey), modes.CBC(iv), backend=self.backend) decryptor = cipher.decryptor() d = decryptor.update(e) + decryptor.finalize() unpadder = PKCS7(self.blocksize).unpadder() return unpadder.update(d) + unpadder.finalize() class _A128CbcHs256(_AesCbcHmacSha2, JWAAlgorithm): name = 'A128CBC-HS256' description = "AES_128_CBC_HMAC_SHA_256 authenticated" keysize = 128 algorithm_usage_location = 'enc' algorithm_use = 'enc' def __init__(self): super(_A128CbcHs256, self).__init__(hashes.SHA256()) class _A192CbcHs384(_AesCbcHmacSha2, JWAAlgorithm): name = 'A192CBC-HS384' description = "AES_192_CBC_HMAC_SHA_384 authenticated" keysize = 192 algorithm_usage_location = 'enc' algorithm_use = 'enc' def __init__(self): super(_A192CbcHs384, self).__init__(hashes.SHA384()) class _A256CbcHs512(_AesCbcHmacSha2, JWAAlgorithm): name = 'A256CBC-HS512' description = "AES_256_CBC_HMAC_SHA_512 authenticated" keysize = 256 algorithm_usage_location = 'enc' algorithm_use = 'enc' def __init__(self): super(_A256CbcHs512, self).__init__(hashes.SHA512()) class _AesGcm(_RawJWE): keysize = None def __init__(self): self.backend = default_backend() self.wrap_key_size = self.keysize # RFC 7518 - 5.3 def encrypt(self, k, a, m): """ Encrypt accoriding to the selected encryption and hashing functions. :param k: Encryption key (optional) :param a: Additional Authentication Data :param m: Plaintext Returns a dictionary with the computed data. """ iv = _randombits(96) cipher = Cipher(algorithms.AES(k), modes.GCM(iv), backend=self.backend) encryptor = cipher.encryptor() encryptor.authenticate_additional_data(a) e = encryptor.update(m) + encryptor.finalize() return (iv, e, encryptor.tag) def decrypt(self, k, a, iv, e, t): """ Decrypt accoriding to the selected encryption and hashing functions. :param k: Encryption key (optional) :param a: Additional Authenticated Data :param iv: Initialization Vector :param e: Ciphertext :param t: Authentication Tag Returns plaintext or raises an error """ cipher = Cipher(algorithms.AES(k), modes.GCM(iv, t), backend=self.backend) decryptor = cipher.decryptor() decryptor.authenticate_additional_data(a) return decryptor.update(e) + decryptor.finalize() class _A128Gcm(_AesGcm, JWAAlgorithm): name = 'A128GCM' description = "AES GCM using 128-bit key" keysize = 128 algorithm_usage_location = 'enc' algorithm_use = 'enc' class _A192Gcm(_AesGcm, JWAAlgorithm): name = 'A192GCM' description = "AES GCM using 192-bit key" keysize = 192 algorithm_usage_location = 'enc' algorithm_use = 'enc' class _A256Gcm(_AesGcm, JWAAlgorithm): name = 'A256GCM' description = "AES GCM using 256-bit key" keysize = 256 algorithm_usage_location = 'enc' algorithm_use = 'enc' class JWA(object): """JWA Signing Algorithms. This class provides access to all JWA algorithms. """ algorithms_registry = { 'HS256': _HS256, 'HS384': _HS384, 'HS512': _HS512, 'RS256': _RS256, 'RS384': _RS384, 'RS512': _RS512, 'ES256': _ES256, 'ES384': _ES384, 'ES512': _ES512, 'PS256': _PS256, 'PS384': _PS384, 'PS512': _PS512, 'none': _None, 'RSA1_5': _Rsa15, 'RSA-OAEP': _RsaOaep, 'RSA-OAEP-256': _RsaOaep256, 'A128KW': _A128KW, 'A192KW': _A192KW, 'A256KW': _A256KW, 'dir': _Direct, 'ECDH-ES': _EcdhEs, 'ECDH-ES+A128KW': _EcdhEsAes128Kw, 'ECDH-ES+A192KW': _EcdhEsAes192Kw, 'ECDH-ES+A256KW': _EcdhEsAes256Kw, 'A128GCMKW': _A128GcmKw, 'A192GCMKW': _A192GcmKw, 'A256GCMKW': _A256GcmKw, 'PBES2-HS256+A128KW': _Pbes2Hs256A128Kw, 'PBES2-HS384+A192KW': _Pbes2Hs384A192Kw, 'PBES2-HS512+A256KW': _Pbes2Hs512A256Kw, 'A128CBC-HS256': _A128CbcHs256, 'A192CBC-HS384': _A192CbcHs384, 'A256CBC-HS512': _A256CbcHs512, 'A128GCM': _A128Gcm, 'A192GCM': _A192Gcm, 'A256GCM': _A256Gcm } @classmethod def instantiate_alg(cls, name, use=None): alg = cls.algorithms_registry[name] if use is not None and alg.algorithm_use != use: raise KeyError return alg() @classmethod def signing_alg(cls, name): try: return cls.instantiate_alg(name, use='sig') except KeyError: raise InvalidJWAAlgorithm( '%s is not a valid Signign algorithm name' % name) @classmethod def keymgmt_alg(cls, name): try: return cls.instantiate_alg(name, use='kex') except KeyError: raise InvalidJWAAlgorithm( '%s is not a valid Key Management algorithm name' % name) @classmethod def encryption_alg(cls, name): try: return cls.instantiate_alg(name, use='enc') except KeyError: raise InvalidJWAAlgorithm( '%s is not a valid Encryption algorithm name' % name)
./CrossVul/dataset_final_sorted/CWE-200/py/good_5206_0
crossvul-python_data_bad_5542_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2011 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Storage backend for SWIFT""" from __future__ import absolute_import import hashlib import httplib import math import urllib import urlparse from glance.common import auth from glance.common import exception from glance.openstack.common import cfg import glance.openstack.common.log as logging import glance.store import glance.store.base import glance.store.location try: import swiftclient except ImportError: pass LOG = logging.getLogger(__name__) DEFAULT_CONTAINER = 'glance' DEFAULT_LARGE_OBJECT_SIZE = 5 * 1024 # 5GB DEFAULT_LARGE_OBJECT_CHUNK_SIZE = 200 # 200M ONE_MB = 1000 * 1024 swift_opts = [ cfg.BoolOpt('swift_enable_snet', default=False), cfg.StrOpt('swift_store_auth_address'), cfg.StrOpt('swift_store_user', secret=True), cfg.StrOpt('swift_store_key', secret=True), cfg.StrOpt('swift_store_auth_version', default='2'), cfg.StrOpt('swift_store_region'), cfg.StrOpt('swift_store_container', default=DEFAULT_CONTAINER), cfg.IntOpt('swift_store_large_object_size', default=DEFAULT_LARGE_OBJECT_SIZE), cfg.IntOpt('swift_store_large_object_chunk_size', default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE), cfg.BoolOpt('swift_store_create_container_on_put', default=False), cfg.BoolOpt('swift_store_multi_tenant', default=False), cfg.ListOpt('swift_store_admin_tenants', default=[]), ] CONF = cfg.CONF CONF.register_opts(swift_opts) class StoreLocation(glance.store.location.StoreLocation): """ Class describing a Swift URI. A Swift URI can look like any of the following: swift://user:pass@authurl.com/container/obj-id swift://account:user:pass@authurl.com/container/obj-id swift+http://user:pass@authurl.com/container/obj-id swift+https://user:pass@authurl.com/container/obj-id When using multi-tenant a URI might look like this (a storage URL): swift+https://example.com/container/obj-id The swift+http:// URIs indicate there is an HTTP authentication URL. The default for Swift is an HTTPS authentication URL, so swift:// and swift+https:// are the same... """ def process_specs(self): self.scheme = self.specs.get('scheme', 'swift+https') self.user = self.specs.get('user') self.key = self.specs.get('key') self.auth_or_store_url = self.specs.get('auth_or_store_url') self.container = self.specs.get('container') self.obj = self.specs.get('obj') def _get_credstring(self): if self.user and self.key: return '%s:%s@' % (urllib.quote(self.user), urllib.quote(self.key)) return '' def get_uri(self): auth_or_store_url = self.auth_or_store_url if auth_or_store_url.startswith('http://'): auth_or_store_url = auth_or_store_url[len('http://'):] elif auth_or_store_url.startswith('https://'): auth_or_store_url = auth_or_store_url[len('https://'):] credstring = self._get_credstring() auth_or_store_url = auth_or_store_url.strip('/') container = self.container.strip('/') obj = self.obj.strip('/') return '%s://%s%s/%s/%s' % (self.scheme, credstring, auth_or_store_url, container, obj) def parse_uri(self, uri): """ Parse URLs. This method fixes an issue where credentials specified in the URL are interpreted differently in Python 2.6.1+ than prior versions of Python. It also deals with the peculiarity that new-style Swift URIs have where a username can contain a ':', like so: swift://account:user:pass@authurl.com/container/obj """ # Make sure that URIs that contain multiple schemes, such as: # swift://user:pass@http://authurl.com/v1/container/obj # are immediately rejected. if uri.count('://') != 1: reason = _( "URI cannot contain more than one occurrence of a scheme." "If you have specified a URI like " "swift://user:pass@http://authurl.com/v1/container/obj" ", you need to change it to use the swift+http:// scheme, " "like so: " "swift+http://user:pass@authurl.com/v1/container/obj" ) LOG.error(_("Invalid store uri %(uri)s: %(reason)s") % locals()) raise exception.BadStoreUri(message=reason) pieces = urlparse.urlparse(uri) assert pieces.scheme in ('swift', 'swift+http', 'swift+https') self.scheme = pieces.scheme netloc = pieces.netloc path = pieces.path.lstrip('/') if netloc != '': # > Python 2.6.1 if '@' in netloc: creds, netloc = netloc.split('@') else: creds = None else: # Python 2.6.1 compat # see lp659445 and Python issue7904 if '@' in path: creds, path = path.split('@') else: creds = None netloc = path[0:path.find('/')].strip('/') path = path[path.find('/'):].strip('/') if creds: cred_parts = creds.split(':') if len(cred_parts) != 2: reason = (_("Badly formed credentials '%(creds)s' in Swift " "URI") % locals()) LOG.error(reason) raise exception.BadStoreUri() user, key = cred_parts self.user = urllib.unquote(user) self.key = urllib.unquote(key) else: self.user = None self.key = None path_parts = path.split('/') try: self.obj = path_parts.pop() self.container = path_parts.pop() if not netloc.startswith('http'): # push hostname back into the remaining to build full authurl path_parts.insert(0, netloc) self.auth_or_store_url = '/'.join(path_parts) except IndexError: reason = _("Badly formed Swift URI: %s") % uri LOG.error(reason) raise exception.BadStoreUri() @property def swift_url(self): """ Creates a fully-qualified auth url that the Swift client library can use. The scheme for the auth_url is determined using the scheme included in the `location` field. HTTPS is assumed, unless 'swift+http' is specified. """ if self.scheme in ('swift+https', 'swift'): auth_scheme = 'https://' else: auth_scheme = 'http://' full_url = ''.join([auth_scheme, self.auth_or_store_url]) return full_url class Store(glance.store.base.Store): """An implementation of the swift backend adapter.""" EXAMPLE_URL = "swift://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<FILE>" CHUNKSIZE = 65536 def get_schemes(self): return ('swift+https', 'swift', 'swift+http') def configure(self): self.snet = CONF.swift_enable_snet self.multi_tenant = CONF.swift_store_multi_tenant self.admin_tenants = CONF.swift_store_admin_tenants self.region = CONF.swift_store_region self.auth_version = self._option_get('swift_store_auth_version') self.storage_url = None self.token = None def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.auth_address = self._option_get('swift_store_auth_address') self.user = self._option_get('swift_store_user') self.key = self._option_get('swift_store_key') self.container = CONF.swift_store_container if self.multi_tenant: if self.context is None: reason = _("Multi-tenant Swift storage requires a context.") raise exception.BadStoreConfiguration(store_name="swift", reason=reason) self.token = self.context.auth_tok self.key = None # multi-tenant uses tokens, not (passwords) if self.context.tenant and self.context.user: self.user = self.context.tenant + ':' + self.context.user if self.context.service_catalog: service_catalog = self.context.service_catalog self.storage_url = self._get_swift_endpoint(service_catalog) try: # The config file has swift_store_large_object_*size in MB, but # internally we store it in bytes, since the image_size parameter # passed to add() is also in bytes. _obj_size = CONF.swift_store_large_object_size self.large_object_size = _obj_size * ONE_MB _obj_chunk_size = CONF.swift_store_large_object_chunk_size self.large_object_chunk_size = _obj_chunk_size * ONE_MB except cfg.ConfigFileValueError, e: reason = _("Error in configuration conf: %s") % e LOG.error(reason) raise exception.BadStoreConfiguration(store_name="swift", reason=reason) self.scheme = 'swift+https' if self.auth_address.startswith('http://'): self.scheme = 'swift+http' self.full_auth_address = self.auth_address elif self.auth_address.startswith('https://'): self.full_auth_address = self.auth_address else: # Defaults https self.full_auth_address = 'https://' + self.auth_address def _get_swift_endpoint(self, service_catalog): return auth.get_endpoint(service_catalog, service_type='object-store') def get(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file, and returns a tuple of generator (for reading the image file) and image_size :param location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() :raises `glance.exception.NotFound` if image does not exist """ loc = location.store_location swift_conn = self._swift_connection_for_location(loc) try: (resp_headers, resp_body) = swift_conn.get_object( container=loc.container, obj=loc.obj, resp_chunk_size=self.CHUNKSIZE) except swiftclient.ClientException, e: if e.http_status == httplib.NOT_FOUND: uri = location.get_store_uri() raise exception.NotFound(_("Swift could not find image at " "uri %(uri)s") % locals()) else: raise class ResponseIndexable(glance.store.Indexable): def another(self): try: return self.wrapped.next() except StopIteration: return '' length = resp_headers.get('content-length') return (ResponseIndexable(resp_body, length), length) def get_size(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file, and returns the image_size (or 0 if unavailable) :param location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() """ loc = location.store_location swift_conn = self._swift_connection_for_location(loc) try: resp_headers = swift_conn.head_object(container=loc.container, obj=loc.obj) return resp_headers.get('content-length', 0) except Exception: return 0 def _swift_connection_for_location(self, loc): if loc.user: return self._make_swift_connection( loc.swift_url, loc.user, loc.key, region=self.region) else: if self.multi_tenant: return self._make_swift_connection( None, self.user, None, storage_url=loc.swift_url, token=self.token) else: reason = (_("Location is missing user:password information.")) LOG.error(reason) raise exception.BadStoreUri(message=reason) def _make_swift_connection(self, auth_url, user, key, region=None, storage_url=None, token=None): """ Creates a connection using the Swift client library. :param auth_url The authentication for v1 style Swift auth or v2 style Keystone auth. :param user A string containing the tenant:user information. :param key A string containing the key/password for the connection. :param region A string containing the swift endpoint region :param storage_url A string containing the storage URL. :param token A string containing the token """ snet = self.snet auth_version = self.auth_version full_auth_url = (auth_url if not auth_url or auth_url.endswith('/') else auth_url + '/') LOG.debug(_("Creating Swift connection with " "(auth_address=%(full_auth_url)s, user=%(user)s, " "snet=%(snet)s, auth_version=%(auth_version)s)") % locals()) tenant_name = None if self.auth_version == '2': tenant_user = user.split(':') if len(tenant_user) != 2: reason = (_("Badly formed tenant:user '%(tenant_user)s' in " "Swift URI") % locals()) LOG.error(reason) raise exception.BadStoreUri() (tenant_name, user) = tenant_user if self.multi_tenant: #NOTE: multi-tenant supports v2 auth only return swiftclient.Connection( None, user, None, preauthurl=storage_url, preauthtoken=token, snet=snet, tenant_name=tenant_name, auth_version='2') else: os_options = {} if region: os_options['region_name'] = region return swiftclient.Connection( full_auth_url, user, key, snet=snet, os_options=os_options, tenant_name=tenant_name, auth_version=auth_version) def _option_get(self, param): result = getattr(CONF, param) if not result: reason = (_("Could not find %(param)s in configuration " "options.") % locals()) LOG.error(reason) raise exception.BadStoreConfiguration(store_name="swift", reason=reason) return result def add(self, image_id, image_file, image_size): """ Stores an image file with supplied identifier to the backend storage system and returns an `glance.store.ImageAddResult` object containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval `glance.store.ImageAddResult` object :raises `glance.common.exception.Duplicate` if the image already existed Swift writes the image data using the scheme: ``swift://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<ID>` where: <USER> = ``swift_store_user`` <KEY> = ``swift_store_key`` <AUTH_ADDRESS> = ``swift_store_auth_address`` <CONTAINER> = ``swift_store_container`` <ID> = The id of the image being added :note Swift auth URLs by default use HTTPS. To specify an HTTP auth URL, you can specify http://someurl.com for the swift_store_auth_address config option :note Swift cannot natively/transparently handle objects >5GB in size. So, if the image is greater than 5GB, we write chunks of image data to Swift and then write an manifest to Swift that contains information about the chunks. This same chunking process is used by default for images of an unknown size, as pushing them directly to swift would fail if the image turns out to be greater than 5GB. """ swift_conn = self._make_swift_connection( self.full_auth_address, self.user, self.key, storage_url=self.storage_url, token=self.token) obj_name = str(image_id) if self.multi_tenant: # NOTE: When using multi-tenant we create containers for each # image so we can set permissions on each image in swift container = self.container + '_' + obj_name auth_or_store_url = self.storage_url else: container = self.container auth_or_store_url = self.auth_address create_container_if_missing(container, swift_conn) location = StoreLocation({'scheme': self.scheme, 'container': container, 'obj': obj_name, 'auth_or_store_url': auth_or_store_url, 'user': self.user, 'key': self.key}) LOG.debug(_("Adding image object '%(obj_name)s' " "to Swift") % locals()) try: if image_size > 0 and image_size < self.large_object_size: # Image size is known, and is less than large_object_size. # Send to Swift with regular PUT. obj_etag = swift_conn.put_object(container, obj_name, image_file, content_length=image_size) else: # Write the image into Swift in chunks. chunk_id = 1 if image_size > 0: total_chunks = str(int( math.ceil(float(image_size) / float(self.large_object_chunk_size)))) else: # image_size == 0 is when we don't know the size # of the image. This can occur with older clients # that don't inspect the payload size. LOG.debug(_("Cannot determine image size. Adding as a " "segmented object to Swift.")) total_chunks = '?' checksum = hashlib.md5() combined_chunks_size = 0 while True: chunk_size = self.large_object_chunk_size if image_size == 0: content_length = None else: left = image_size - combined_chunks_size if left == 0: break if chunk_size > left: chunk_size = left content_length = chunk_size chunk_name = "%s-%05d" % (obj_name, chunk_id) reader = ChunkReader(image_file, checksum, chunk_size) chunk_etag = swift_conn.put_object( container, chunk_name, reader, content_length=content_length) bytes_read = reader.bytes_read msg = _("Wrote chunk %(chunk_name)s (%(chunk_id)d/" "%(total_chunks)s) of length %(bytes_read)d " "to Swift returning MD5 of content: " "%(chunk_etag)s") LOG.debug(msg % locals()) if bytes_read == 0: # Delete the last chunk, because it's of zero size. # This will happen if image_size == 0. LOG.debug(_("Deleting final zero-length chunk")) swift_conn.delete_object(container, chunk_name) break chunk_id += 1 combined_chunks_size += bytes_read # In the case we have been given an unknown image size, # set the image_size to the total size of the combined chunks. if image_size == 0: image_size = combined_chunks_size # Now we write the object manifest and return the # manifest's etag... manifest = "%s/%s" % (container, obj_name) headers = {'ETag': hashlib.md5("").hexdigest(), 'X-Object-Manifest': manifest} # The ETag returned for the manifest is actually the # MD5 hash of the concatenated checksums of the strings # of each chunk...so we ignore this result in favour of # the MD5 of the entire image file contents, so that # users can verify the image file contents accordingly swift_conn.put_object(container, obj_name, None, headers=headers) obj_etag = checksum.hexdigest() # NOTE: We return the user and key here! Have to because # location is used by the API server to return the actual # image data. We *really* should consider NOT returning # the location attribute from GET /images/<ID> and # GET /images/details return (location.get_uri(), image_size, obj_etag) except swiftclient.ClientException, e: if e.http_status == httplib.CONFLICT: raise exception.Duplicate(_("Swift already has an image at " "location %s") % location.get_uri()) msg = (_("Failed to add object to Swift.\n" "Got error from Swift: %(e)s") % locals()) LOG.error(msg) raise glance.store.BackendException(msg) def delete(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file to delete :location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() :raises NotFound if image does not exist """ loc = location.store_location swift_conn = self._swift_connection_for_location(loc) try: # We request the manifest for the object. If one exists, # that means the object was uploaded in chunks/segments, # and we need to delete all the chunks as well as the # manifest. manifest = None try: headers = swift_conn.head_object(loc.container, loc.obj) manifest = headers.get('x-object-manifest') except swiftclient.ClientException, e: if e.http_status != httplib.NOT_FOUND: raise if manifest: # Delete all the chunks before the object manifest itself obj_container, obj_prefix = manifest.split('/', 1) for segment in swift_conn.get_container(obj_container, prefix=obj_prefix)[1]: # TODO(jaypipes): This would be an easy area to parallelize # since we're simply sending off parallelizable requests # to Swift to delete stuff. It's not like we're going to # be hogging up network or file I/O here... swift_conn.delete_object(obj_container, segment['name']) else: swift_conn.delete_object(loc.container, loc.obj) if self.multi_tenant: #NOTE: In multi-tenant mode containers are specific to # each object (Glance image) swift_conn.delete_container(loc.container) except swiftclient.ClientException, e: if e.http_status == httplib.NOT_FOUND: uri = location.get_store_uri() raise exception.NotFound(_("Swift could not find image at " "uri %(uri)s") % locals()) else: raise def set_acls(self, location, public=False, read_tenants=[], write_tenants=[]): """ Sets the read and write access control list for an image in the backend store. :location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() :public A boolean indicating whether the image should be public. :read_tenants A list of tenant strings which should be granted read access for an image. :write_tenants A list of tenant strings which should be granted write access for an image. """ if self.multi_tenant: loc = location.store_location swift_conn = self._swift_connection_for_location(loc) headers = {} if public: headers['X-Container-Read'] = ".r:*" elif read_tenants: headers['X-Container-Read'] = ','.join(read_tenants) else: headers['X-Container-Read'] = '' write_tenants.extend(self.admin_tenants) if write_tenants: headers['X-Container-Write'] = ','.join(write_tenants) else: headers['X-Container-Write'] = '' try: swift_conn.post_container(loc.container, headers=headers) except swiftclient.ClientException, e: if e.http_status == httplib.NOT_FOUND: uri = location.get_store_uri() raise exception.NotFound(_("Swift could not find image at " "uri %(uri)s") % locals()) else: raise class ChunkReader(object): def __init__(self, fd, checksum, total): self.fd = fd self.checksum = checksum self.total = total self.bytes_read = 0 def read(self, i): left = self.total - self.bytes_read if i > left: i = left result = self.fd.read(i) self.bytes_read += len(result) self.checksum.update(result) return result def create_container_if_missing(container, swift_conn): """ Creates a missing container in Swift if the ``swift_store_create_container_on_put`` option is set. :param container: Name of container to create :param swift_conn: Connection to Swift """ try: swift_conn.head_container(container) except swiftclient.ClientException, e: if e.http_status == httplib.NOT_FOUND: if CONF.swift_store_create_container_on_put: try: swift_conn.put_container(container) except swiftclient.ClientException, e: msg = _("Failed to add container to Swift.\n" "Got error from Swift: %(e)s") % locals() raise glance.store.BackendException(msg) else: msg = (_("The container %(container)s does not exist in " "Swift. Please set the " "swift_store_create_container_on_put option" "to add container to Swift automatically.") % locals()) raise glance.store.BackendException(msg) else: raise
./CrossVul/dataset_final_sorted/CWE-200/py/bad_5542_0
crossvul-python_data_good_5541_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2011 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Storage backend for SWIFT""" from __future__ import absolute_import import hashlib import httplib import logging import math import urlparse from glance.common import cfg from glance.common import exception import glance.store import glance.store.base import glance.store.location try: from swift.common import client as swift_client except ImportError: pass DEFAULT_CONTAINER = 'glance' DEFAULT_LARGE_OBJECT_SIZE = 5 * 1024 # 5GB DEFAULT_LARGE_OBJECT_CHUNK_SIZE = 200 # 200M ONE_MB = 1000 * 1024 logger = logging.getLogger('glance.store.swift') class StoreLocation(glance.store.location.StoreLocation): """ Class describing a Swift URI. A Swift URI can look like any of the following: swift://user:pass@authurl.com/container/obj-id swift://account:user:pass@authurl.com/container/obj-id swift+http://user:pass@authurl.com/container/obj-id swift+https://user:pass@authurl.com/container/obj-id The swift+http:// URIs indicate there is an HTTP authentication URL. The default for Swift is an HTTPS authentication URL, so swift:// and swift+https:// are the same... """ def process_specs(self): self.scheme = self.specs.get('scheme', 'swift+https') self.user = self.specs.get('user') self.key = self.specs.get('key') self.authurl = self.specs.get('authurl') self.container = self.specs.get('container') self.obj = self.specs.get('obj') def _get_credstring(self): if self.user: return '%s:%s@' % (self.user, self.key) return '' def get_uri(self): authurl = self.authurl if authurl.startswith('http://'): authurl = authurl[7:] elif authurl.startswith('https://'): authurl = authurl[8:] credstring = self._get_credstring() authurl = authurl.strip('/') container = self.container.strip('/') obj = self.obj.strip('/') return '%s://%s%s/%s/%s' % (self.scheme, credstring, authurl, container, obj) def parse_uri(self, uri): """ Parse URLs. This method fixes an issue where credentials specified in the URL are interpreted differently in Python 2.6.1+ than prior versions of Python. It also deals with the peculiarity that new-style Swift URIs have where a username can contain a ':', like so: swift://account:user:pass@authurl.com/container/obj """ # Make sure that URIs that contain multiple schemes, such as: # swift://user:pass@http://authurl.com/v1/container/obj # are immediately rejected. if uri.count('://') != 1: reason = _( "URI cannot contain more than one occurrence of a scheme." "If you have specified a URI like " "swift://user:pass@http://authurl.com/v1/container/obj" ", you need to change it to use the swift+http:// scheme, " "like so: " "swift+http://user:pass@authurl.com/v1/container/obj" ) logger.error(_("Invalid store URI: %(reason)s") % locals()) raise exception.BadStoreUri() pieces = urlparse.urlparse(uri) assert pieces.scheme in ('swift', 'swift+http', 'swift+https') self.scheme = pieces.scheme netloc = pieces.netloc path = pieces.path.lstrip('/') if netloc != '': # > Python 2.6.1 if '@' in netloc: creds, netloc = netloc.split('@') else: creds = None else: # Python 2.6.1 compat # see lp659445 and Python issue7904 if '@' in path: creds, path = path.split('@') else: creds = None netloc = path[0:path.find('/')].strip('/') path = path[path.find('/'):].strip('/') if creds: cred_parts = creds.split(':') # User can be account:user, in which case cred_parts[0:2] will be # the account and user. Combine them into a single username of # account:user if len(cred_parts) == 1: reason = (_("Badly formed credentials in Swift URI.")) logger.error(reason) raise exception.BadStoreUri() elif len(cred_parts) == 3: user = ':'.join(cred_parts[0:2]) else: user = cred_parts[0] key = cred_parts[-1] self.user = user self.key = key else: self.user = None path_parts = path.split('/') try: self.obj = path_parts.pop() self.container = path_parts.pop() if not netloc.startswith('http'): # push hostname back into the remaining to build full authurl path_parts.insert(0, netloc) self.authurl = '/'.join(path_parts) except IndexError: reason = _("Badly formed Swift URI.") logger.error(reason) raise exception.BadStoreUri() @property def swift_auth_url(self): """ Creates a fully-qualified auth url that the Swift client library can use. The scheme for the auth_url is determined using the scheme included in the `location` field. HTTPS is assumed, unless 'swift+http' is specified. """ if self.scheme in ('swift+https', 'swift'): auth_scheme = 'https://' else: auth_scheme = 'http://' full_url = ''.join([auth_scheme, self.authurl]) return full_url class Store(glance.store.base.Store): """An implementation of the swift backend adapter.""" EXAMPLE_URL = "swift://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<FILE>" CHUNKSIZE = 65536 opts = [ cfg.BoolOpt('swift_enable_snet', default=False), cfg.StrOpt('swift_store_auth_address'), cfg.StrOpt('swift_store_user', secret=True), cfg.StrOpt('swift_store_key', secret=True), cfg.StrOpt('swift_store_auth_version', default='2'), cfg.StrOpt('swift_store_container', default=DEFAULT_CONTAINER), cfg.IntOpt('swift_store_large_object_size', default=DEFAULT_LARGE_OBJECT_SIZE), cfg.IntOpt('swift_store_large_object_chunk_size', default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE), cfg.BoolOpt('swift_store_create_container_on_put', default=False), ] def configure(self): self.conf.register_opts(self.opts) self.snet = self.conf.swift_enable_snet self.auth_version = self._option_get('swift_store_auth_version') def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.auth_address = self._option_get('swift_store_auth_address') self.user = self._option_get('swift_store_user') self.key = self._option_get('swift_store_key') self.container = self.conf.swift_store_container try: # The config file has swift_store_large_object_*size in MB, but # internally we store it in bytes, since the image_size parameter # passed to add() is also in bytes. self.large_object_size = \ self.conf.swift_store_large_object_size * ONE_MB self.large_object_chunk_size = \ self.conf.swift_store_large_object_chunk_size * ONE_MB except cfg.ConfigFileValueError, e: reason = _("Error in configuration conf: %s") % e logger.error(reason) raise exception.BadStoreConfiguration(store_name="swift", reason=reason) self.scheme = 'swift+https' if self.auth_address.startswith('http://'): self.scheme = 'swift+http' self.full_auth_address = self.auth_address elif self.auth_address.startswith('https://'): self.full_auth_address = self.auth_address else: # Defaults https self.full_auth_address = 'https://' + self.auth_address def get(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file, and returns a tuple of generator (for reading the image file) and image_size :param location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() :raises `glance.exception.NotFound` if image does not exist """ loc = location.store_location swift_conn = self._make_swift_connection( auth_url=loc.swift_auth_url, user=loc.user, key=loc.key) try: (resp_headers, resp_body) = swift_conn.get_object( container=loc.container, obj=loc.obj, resp_chunk_size=self.CHUNKSIZE) except swift_client.ClientException, e: if e.http_status == httplib.NOT_FOUND: uri = location.get_store_uri() msg = _("Swift could not find image at URI.") raise exception.NotFound(msg) else: raise class ResponseIndexable(glance.store.Indexable): def another(self): try: return self.wrapped.next() except StopIteration: return '' length = resp_headers.get('content-length') return (ResponseIndexable(resp_body, length), length) def get_size(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file, and returns the image_size (or 0 if unavailable) :param location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() """ loc = location.store_location swift_conn = self._make_swift_connection( auth_url=loc.swift_auth_url, user=loc.user, key=loc.key) try: resp_headers = swift_conn.head_object(container=loc.container, obj=loc.obj) return resp_headers.get('content-length', 0) except Exception: return 0 def _make_swift_connection(self, auth_url, user, key): """ Creates a connection using the Swift client library. """ snet = self.snet auth_version = self.auth_version full_auth_url = (auth_url if not auth_url or auth_url.endswith('/') else auth_url + '/') logger.debug(_("Creating Swift connection with " "(auth_address=%(full_auth_url)s, user=%(user)s, " "snet=%(snet)s, auth_version=%(auth_version)s)") % locals()) return swift_client.Connection( authurl=full_auth_url, user=user, key=key, snet=snet, auth_version=auth_version) def _option_get(self, param): result = getattr(self.conf, param) if not result: reason = (_("Could not find %(param)s in configuration " "options.") % locals()) logger.error(reason) raise exception.BadStoreConfiguration(store_name="swift", reason=reason) return result def add(self, image_id, image_file, image_size): """ Stores an image file with supplied identifier to the backend storage system and returns an `glance.store.ImageAddResult` object containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval `glance.store.ImageAddResult` object :raises `glance.common.exception.Duplicate` if the image already existed Swift writes the image data using the scheme: ``swift://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<ID>` where: <USER> = ``swift_store_user`` <KEY> = ``swift_store_key`` <AUTH_ADDRESS> = ``swift_store_auth_address`` <CONTAINER> = ``swift_store_container`` <ID> = The id of the image being added :note Swift auth URLs by default use HTTPS. To specify an HTTP auth URL, you can specify http://someurl.com for the swift_store_auth_address config option :note Swift cannot natively/transparently handle objects >5GB in size. So, if the image is greater than 5GB, we write chunks of image data to Swift and then write an manifest to Swift that contains information about the chunks. This same chunking process is used by default for images of an unknown size, as pushing them directly to swift would fail if the image turns out to be greater than 5GB. """ swift_conn = self._make_swift_connection( auth_url=self.full_auth_address, user=self.user, key=self.key) create_container_if_missing(self.container, swift_conn, self.conf) obj_name = str(image_id) location = StoreLocation({'scheme': self.scheme, 'container': self.container, 'obj': obj_name, 'authurl': self.auth_address, 'user': self.user, 'key': self.key}) logger.debug(_("Adding image object '%(obj_name)s' " "to Swift") % locals()) try: if image_size > 0 and image_size < self.large_object_size: # Image size is known, and is less than large_object_size. # Send to Swift with regular PUT. obj_etag = swift_conn.put_object(self.container, obj_name, image_file, content_length=image_size) else: # Write the image into Swift in chunks. chunk_id = 1 if image_size > 0: total_chunks = str(int( math.ceil(float(image_size) / float(self.large_object_chunk_size)))) else: # image_size == 0 is when we don't know the size # of the image. This can occur with older clients # that don't inspect the payload size. logger.debug(_("Cannot determine image size. Adding as a " "segmented object to Swift.")) total_chunks = '?' checksum = hashlib.md5() combined_chunks_size = 0 while True: chunk_size = self.large_object_chunk_size if image_size == 0: content_length = None else: left = image_size - combined_chunks_size if left == 0: break if chunk_size > left: chunk_size = left content_length = chunk_size chunk_name = "%s-%05d" % (obj_name, chunk_id) reader = ChunkReader(image_file, checksum, chunk_size) chunk_etag = swift_conn.put_object( self.container, chunk_name, reader, content_length=content_length) bytes_read = reader.bytes_read msg = _("Wrote chunk %(chunk_name)s (%(chunk_id)d/" "%(total_chunks)s) of length %(bytes_read)d " "to Swift returning MD5 of content: " "%(chunk_etag)s") logger.debug(msg % locals()) if bytes_read == 0: # Delete the last chunk, because it's of zero size. # This will happen if image_size == 0. logger.debug(_("Deleting final zero-length chunk")) swift_conn.delete_object(self.container, chunk_name) break chunk_id += 1 combined_chunks_size += bytes_read # In the case we have been given an unknown image size, # set the image_size to the total size of the combined chunks. if image_size == 0: image_size = combined_chunks_size # Now we write the object manifest and return the # manifest's etag... manifest = "%s/%s" % (self.container, obj_name) headers = {'ETag': hashlib.md5("").hexdigest(), 'X-Object-Manifest': manifest} # The ETag returned for the manifest is actually the # MD5 hash of the concatenated checksums of the strings # of each chunk...so we ignore this result in favour of # the MD5 of the entire image file contents, so that # users can verify the image file contents accordingly swift_conn.put_object(self.container, obj_name, None, headers=headers) obj_etag = checksum.hexdigest() # NOTE: We return the user and key here! Have to because # location is used by the API server to return the actual # image data. We *really* should consider NOT returning # the location attribute from GET /images/<ID> and # GET /images/details return (location.get_uri(), image_size, obj_etag) except swift_client.ClientException, e: if e.http_status == httplib.CONFLICT: raise exception.Duplicate(_("Swift already has an image at " "this location.")) msg = (_("Failed to add object to Swift.\n" "Got error from Swift: %(e)s") % locals()) logger.error(msg) raise glance.store.BackendException(msg) def delete(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file to delete :location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() :raises NotFound if image does not exist """ loc = location.store_location swift_conn = self._make_swift_connection( auth_url=loc.swift_auth_url, user=loc.user, key=loc.key) try: # We request the manifest for the object. If one exists, # that means the object was uploaded in chunks/segments, # and we need to delete all the chunks as well as the # manifest. manifest = None try: headers = swift_conn.head_object(loc.container, loc.obj) manifest = headers.get('x-object-manifest') except swift_client.ClientException, e: if e.http_status != httplib.NOT_FOUND: raise if manifest: # Delete all the chunks before the object manifest itself obj_container, obj_prefix = manifest.split('/', 1) for segment in swift_conn.get_container(obj_container, prefix=obj_prefix)[1]: # TODO(jaypipes): This would be an easy area to parallelize # since we're simply sending off parallelizable requests # to Swift to delete stuff. It's not like we're going to # be hogging up network or file I/O here... swift_conn.delete_object(obj_container, segment['name']) else: swift_conn.delete_object(loc.container, loc.obj) except swift_client.ClientException, e: if e.http_status == httplib.NOT_FOUND: uri = location.get_store_uri() msg = _("Swift could not find image at URI.") raise exception.NotFound(msg) else: raise class ChunkReader(object): def __init__(self, fd, checksum, total): self.fd = fd self.checksum = checksum self.total = total self.bytes_read = 0 def read(self, i): left = self.total - self.bytes_read if i > left: i = left result = self.fd.read(i) self.bytes_read += len(result) self.checksum.update(result) return result def create_container_if_missing(container, swift_conn, conf): """ Creates a missing container in Swift if the ``swift_store_create_container_on_put`` option is set. :param container: Name of container to create :param swift_conn: Connection to Swift :param conf: Option mapping """ try: swift_conn.head_container(container) except swift_client.ClientException, e: if e.http_status == httplib.NOT_FOUND: if conf.swift_store_create_container_on_put: try: swift_conn.put_container(container) except swift_client.ClientException, e: msg = _("Failed to add container to Swift.\n" "Got error from Swift: %(e)s") % locals() raise glance.store.BackendException(msg) else: msg = (_("The container %(container)s does not exist in " "Swift. Please set the " "swift_store_create_container_on_put option" "to add container to Swift automatically.") % locals()) raise glance.store.BackendException(msg) else: raise glance.store.register_store(__name__, ['swift', 'swift+http', 'swift+https'])
./CrossVul/dataset_final_sorted/CWE-200/py/good_5541_0
crossvul-python_data_good_5543_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010-2011 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Storage backend for SWIFT""" from __future__ import absolute_import import hashlib import httplib import math import urllib import urlparse from glance.common import auth from glance.common import exception from glance.openstack.common import cfg import glance.openstack.common.log as logging import glance.store import glance.store.base import glance.store.location try: import swiftclient except ImportError: pass LOG = logging.getLogger(__name__) DEFAULT_CONTAINER = 'glance' DEFAULT_LARGE_OBJECT_SIZE = 5 * 1024 # 5GB DEFAULT_LARGE_OBJECT_CHUNK_SIZE = 200 # 200M ONE_MB = 1000 * 1024 swift_opts = [ cfg.BoolOpt('swift_enable_snet', default=False), cfg.StrOpt('swift_store_auth_address'), cfg.StrOpt('swift_store_user', secret=True), cfg.StrOpt('swift_store_key', secret=True), cfg.StrOpt('swift_store_auth_version', default='2'), cfg.StrOpt('swift_store_region'), cfg.StrOpt('swift_store_endpoint_type', default='publicURL'), cfg.StrOpt('swift_store_service_type', default='object-store'), cfg.StrOpt('swift_store_container', default=DEFAULT_CONTAINER), cfg.IntOpt('swift_store_large_object_size', default=DEFAULT_LARGE_OBJECT_SIZE), cfg.IntOpt('swift_store_large_object_chunk_size', default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE), cfg.BoolOpt('swift_store_create_container_on_put', default=False), cfg.BoolOpt('swift_store_multi_tenant', default=False), cfg.ListOpt('swift_store_admin_tenants', default=[]), ] CONF = cfg.CONF CONF.register_opts(swift_opts) class StoreLocation(glance.store.location.StoreLocation): """ Class describing a Swift URI. A Swift URI can look like any of the following: swift://user:pass@authurl.com/container/obj-id swift://account:user:pass@authurl.com/container/obj-id swift+http://user:pass@authurl.com/container/obj-id swift+https://user:pass@authurl.com/container/obj-id When using multi-tenant a URI might look like this (a storage URL): swift+https://example.com/container/obj-id The swift+http:// URIs indicate there is an HTTP authentication URL. The default for Swift is an HTTPS authentication URL, so swift:// and swift+https:// are the same... """ def process_specs(self): self.scheme = self.specs.get('scheme', 'swift+https') self.user = self.specs.get('user') self.key = self.specs.get('key') self.auth_or_store_url = self.specs.get('auth_or_store_url') self.container = self.specs.get('container') self.obj = self.specs.get('obj') def _get_credstring(self): if self.user and self.key: return '%s:%s@' % (urllib.quote(self.user), urllib.quote(self.key)) return '' def get_uri(self): auth_or_store_url = self.auth_or_store_url if auth_or_store_url.startswith('http://'): auth_or_store_url = auth_or_store_url[len('http://'):] elif auth_or_store_url.startswith('https://'): auth_or_store_url = auth_or_store_url[len('https://'):] credstring = self._get_credstring() auth_or_store_url = auth_or_store_url.strip('/') container = self.container.strip('/') obj = self.obj.strip('/') return '%s://%s%s/%s/%s' % (self.scheme, credstring, auth_or_store_url, container, obj) def parse_uri(self, uri): """ Parse URLs. This method fixes an issue where credentials specified in the URL are interpreted differently in Python 2.6.1+ than prior versions of Python. It also deals with the peculiarity that new-style Swift URIs have where a username can contain a ':', like so: swift://account:user:pass@authurl.com/container/obj """ # Make sure that URIs that contain multiple schemes, such as: # swift://user:pass@http://authurl.com/v1/container/obj # are immediately rejected. if uri.count('://') != 1: reason = _("URI cannot contain more than one occurrence " "of a scheme. If you have specified a URI like " "swift://user:pass@http://authurl.com/v1/container/obj" ", you need to change it to use the " "swift+http:// scheme, like so: " "swift+http://user:pass@authurl.com/v1/container/obj") LOG.debug(_("Invalid store URI: %(reason)s") % locals()) raise exception.BadStoreUri(message=reason) pieces = urlparse.urlparse(uri) assert pieces.scheme in ('swift', 'swift+http', 'swift+https') self.scheme = pieces.scheme netloc = pieces.netloc path = pieces.path.lstrip('/') if netloc != '': # > Python 2.6.1 if '@' in netloc: creds, netloc = netloc.split('@') else: creds = None else: # Python 2.6.1 compat # see lp659445 and Python issue7904 if '@' in path: creds, path = path.split('@') else: creds = None netloc = path[0:path.find('/')].strip('/') path = path[path.find('/'):].strip('/') if creds: cred_parts = creds.split(':') if len(cred_parts) != 2: reason = (_("Badly formed credentials in Swift URI.")) LOG.debug(reason) raise exception.BadStoreUri() user, key = cred_parts self.user = urllib.unquote(user) self.key = urllib.unquote(key) else: self.user = None self.key = None path_parts = path.split('/') try: self.obj = path_parts.pop() self.container = path_parts.pop() if not netloc.startswith('http'): # push hostname back into the remaining to build full authurl path_parts.insert(0, netloc) self.auth_or_store_url = '/'.join(path_parts) except IndexError: reason = _("Badly formed Swift URI.") LOG.debug(reason) raise exception.BadStoreUri() @property def swift_url(self): """ Creates a fully-qualified auth url that the Swift client library can use. The scheme for the auth_url is determined using the scheme included in the `location` field. HTTPS is assumed, unless 'swift+http' is specified. """ if self.auth_or_store_url.startswith('http'): return self.auth_or_store_url else: if self.scheme in ('swift+https', 'swift'): auth_scheme = 'https://' else: auth_scheme = 'http://' return ''.join([auth_scheme, self.auth_or_store_url]) def Store(context=None, loc=None): if (CONF.swift_store_multi_tenant and (loc is None or loc.store_location.user is None)): return MultiTenantStore(context, loc) return SingleTenantStore(context, loc) class BaseStore(glance.store.base.Store): CHUNKSIZE = 65536 def get_schemes(self): return ('swift+https', 'swift', 'swift+http') def configure(self): _obj_size = self._option_get('swift_store_large_object_size') self.large_object_size = _obj_size * ONE_MB _chunk_size = self._option_get('swift_store_large_object_chunk_size') self.large_object_chunk_size = _chunk_size * ONE_MB self.admin_tenants = CONF.swift_store_admin_tenants self.region = CONF.swift_store_region self.service_type = CONF.swift_store_service_type self.endpoint_type = CONF.swift_store_endpoint_type self.snet = CONF.swift_enable_snet def get(self, location, connection=None): location = location.store_location if not connection: connection = self.get_connection(location) try: resp_headers, resp_body = connection.get_object( container=location.container, obj=location.obj, resp_chunk_size=self.CHUNKSIZE) except swiftclient.ClientException, e: if e.http_status == httplib.NOT_FOUND: uri = location.get_uri() msg = _("Swift could not find image at URI.") raise exception.NotFound(msg) else: raise class ResponseIndexable(glance.store.Indexable): def another(self): try: return self.wrapped.next() except StopIteration: return '' length = int(resp_headers.get('content-length', 0)) return (ResponseIndexable(resp_body, length), length) def get_size(self, location, connection=None): location = location.store_location if not connection: connection = self.get_connection(location) try: resp_headers = connection.head_object( container=location.container, obj=location.obj) return int(resp_headers.get('content-length', 0)) except Exception: return 0 def _option_get(self, param): result = getattr(CONF, param) if not result: reason = (_("Could not find %(param)s in configuration " "options.") % locals()) LOG.error(reason) raise exception.BadStoreConfiguration(store_name="swift", reason=reason) return result def add(self, image_id, image_file, image_size, connection=None): location = self.create_location(image_id) if not connection: connection = self.get_connection(location) self._create_container_if_missing(location.container, connection) LOG.debug(_("Adding image object '%(obj_name)s' " "to Swift") % dict(obj_name=location.obj)) try: if image_size > 0 and image_size < self.large_object_size: # Image size is known, and is less than large_object_size. # Send to Swift with regular PUT. obj_etag = connection.put_object(location.container, location.obj, image_file, content_length=image_size) else: # Write the image into Swift in chunks. chunk_id = 1 if image_size > 0: total_chunks = str(int( math.ceil(float(image_size) / float(self.large_object_chunk_size)))) else: # image_size == 0 is when we don't know the size # of the image. This can occur with older clients # that don't inspect the payload size. LOG.debug(_("Cannot determine image size. Adding as a " "segmented object to Swift.")) total_chunks = '?' checksum = hashlib.md5() combined_chunks_size = 0 while True: chunk_size = self.large_object_chunk_size if image_size == 0: content_length = None else: left = image_size - combined_chunks_size if left == 0: break if chunk_size > left: chunk_size = left content_length = chunk_size chunk_name = "%s-%05d" % (location.obj, chunk_id) reader = ChunkReader(image_file, checksum, chunk_size) chunk_etag = connection.put_object( location.container, chunk_name, reader, content_length=content_length) bytes_read = reader.bytes_read msg = _("Wrote chunk %(chunk_name)s (%(chunk_id)d/" "%(total_chunks)s) of length %(bytes_read)d " "to Swift returning MD5 of content: " "%(chunk_etag)s") LOG.debug(msg % locals()) if bytes_read == 0: # Delete the last chunk, because it's of zero size. # This will happen if size == 0. LOG.debug(_("Deleting final zero-length chunk")) connection.delete_object(location.container, chunk_name) break chunk_id += 1 combined_chunks_size += bytes_read # In the case we have been given an unknown image size, # set the size to the total size of the combined chunks. if image_size == 0: image_size = combined_chunks_size # Now we write the object manifest and return the # manifest's etag... manifest = "%s/%s" % (location.container, location.obj) headers = {'ETag': hashlib.md5("").hexdigest(), 'X-Object-Manifest': manifest} # The ETag returned for the manifest is actually the # MD5 hash of the concatenated checksums of the strings # of each chunk...so we ignore this result in favour of # the MD5 of the entire image file contents, so that # users can verify the image file contents accordingly connection.put_object(location.container, location.obj, None, headers=headers) obj_etag = checksum.hexdigest() # NOTE: We return the user and key here! Have to because # location is used by the API server to return the actual # image data. We *really* should consider NOT returning # the location attribute from GET /images/<ID> and # GET /images/details return (location.get_uri(), image_size, obj_etag) except swiftclient.ClientException, e: if e.http_status == httplib.CONFLICT: raise exception.Duplicate(_("Swift already has an image at " "this location")) msg = (_("Failed to add object to Swift.\n" "Got error from Swift: %(e)s") % locals()) LOG.error(msg) raise glance.store.BackendException(msg) def delete(self, location, connection=None): location = location.store_location if not connection: connection = self.get_connection(location) try: # We request the manifest for the object. If one exists, # that means the object was uploaded in chunks/segments, # and we need to delete all the chunks as well as the # manifest. manifest = None try: headers = connection.head_object( location.container, location.obj) manifest = headers.get('x-object-manifest') except swiftclient.ClientException, e: if e.http_status != httplib.NOT_FOUND: raise if manifest: # Delete all the chunks before the object manifest itself obj_container, obj_prefix = manifest.split('/', 1) segments = connection.get_container( obj_container, prefix=obj_prefix)[1] for segment in segments: # TODO(jaypipes): This would be an easy area to parallelize # since we're simply sending off parallelizable requests # to Swift to delete stuff. It's not like we're going to # be hogging up network or file I/O here... connection.delete_object( obj_container, segment['name']) else: connection.delete_object(location.container, location.obj) except swiftclient.ClientException, e: if e.http_status == httplib.NOT_FOUND: uri = location.get_uri() msg = _("Swift could not find image at URI.") raise exception.NotFound(msg) else: raise def _create_container_if_missing(self, container, connection): """ Creates a missing container in Swift if the ``swift_store_create_container_on_put`` option is set. :param container: Name of container to create :param connection: Connection to swift service """ try: connection.head_container(container) except swiftclient.ClientException, e: if e.http_status == httplib.NOT_FOUND: if CONF.swift_store_create_container_on_put: try: connection.put_container(container) except swiftclient.ClientException, e: msg = _("Failed to add container to Swift.\n" "Got error from Swift: %(e)s") % locals() raise glance.store.BackendException(msg) else: msg = (_("The container %(container)s does not exist in " "Swift. Please set the " "swift_store_create_container_on_put option" "to add container to Swift automatically.") % locals()) raise glance.store.BackendException(msg) else: raise def get_connection(self): raise NotImplemented() def create_location(self): raise NotImplemented() class SingleTenantStore(BaseStore): EXAMPLE_URL = "swift://<USER>:<KEY>@<AUTH_ADDRESS>/<CONTAINER>/<FILE>" def configure(self): super(SingleTenantStore, self).configure() self.auth_version = self._option_get('swift_store_auth_version') def configure_add(self): self.auth_address = self._option_get('swift_store_auth_address') if self.auth_address.startswith('http://'): self.scheme = 'swift+http' else: self.scheme = 'swift+https' self.container = CONF.swift_store_container self.user = self._option_get('swift_store_user') self.key = self._option_get('swift_store_key') def create_location(self, image_id): specs = {'scheme': self.scheme, 'container': self.container, 'obj': str(image_id), 'auth_or_store_url': self.auth_address, 'user': self.user, 'key': self.key} return StoreLocation(specs) def get_connection(self, location): if not location.user: reason = (_("Location is missing user:password information.")) LOG.debug(reason) raise exception.BadStoreUri(message=reason) auth_url = location.swift_url if not auth_url.endswith('/'): auth_url += '/' if self.auth_version == '2': try: tenant_name, user = location.user.split(':') except ValueError: reason = (_("Badly formed tenant:user '%(user)s' in " "Swift URI") % {'user': location.user}) LOG.debug(reason) raise exception.BadStoreUri() else: tenant_name = None user = location.user os_options = {} if self.region: os_options['region_name'] = self.region os_options['endpoint_type'] = self.endpoint_type os_options['service_type'] = self.service_type return swiftclient.Connection( auth_url, user, location.key, tenant_name=tenant_name, snet=self.snet, auth_version=self.auth_version, os_options=os_options) class MultiTenantStore(BaseStore): EXAMPLE_URL = "swift://<SWIFT_URL>/<CONTAINER>/<FILE>" def configure_add(self): self.container = CONF.swift_store_container if self.context is None: reason = _("Multi-tenant Swift storage requires a context.") raise exception.BadStoreConfiguration(store_name="swift", reason=reason) if self.context.service_catalog is None: reason = _("Multi-tenant Swift storage requires " "a service catalog.") raise exception.BadStoreConfiguration(store_name="swift", reason=reason) self.storage_url = auth.get_endpoint( self.context.service_catalog, service_type=self.service_type, endpoint_region=self.region, endpoint_type=self.endpoint_type) if self.storage_url.startswith('http://'): self.scheme = 'swift+http' else: self.scheme = 'swift+https' def delete(self, location, connection=None): if not connection: connection = self.get_connection(location.store_location) super(MultiTenantStore, self).delete(location, connection) connection.delete_container(location.store_location.container) def set_acls(self, location, public=False, read_tenants=None, write_tenants=None, connection=None): location = location.store_location if not connection: connection = self.get_connection(location) if read_tenants is None: read_tenants = [] if write_tenants is None: write_tenants = [] headers = {} if public: headers['X-Container-Read'] = ".r:*" elif read_tenants: headers['X-Container-Read'] = ','.join(read_tenants) else: headers['X-Container-Read'] = '' write_tenants.extend(self.admin_tenants) if write_tenants: headers['X-Container-Write'] = ','.join(write_tenants) else: headers['X-Container-Write'] = '' try: connection.post_container(location.container, headers=headers) except swiftclient.ClientException, e: if e.http_status == httplib.NOT_FOUND: uri = location.get_uri() msg = _("Swift could not find image at URI.") raise exception.NotFound(msg) else: raise def create_location(self, image_id): specs = {'scheme': self.scheme, 'container': self.container + '_' + str(image_id), 'obj': str(image_id), 'auth_or_store_url': self.storage_url} return StoreLocation(specs) def get_connection(self, location): return swiftclient.Connection( None, self.context.user, None, preauthurl=location.swift_url, preauthtoken=self.context.auth_tok, tenant_name=self.context.tenant, auth_version='2', snet=self.snet) class ChunkReader(object): def __init__(self, fd, checksum, total): self.fd = fd self.checksum = checksum self.total = total self.bytes_read = 0 def read(self, i): left = self.total - self.bytes_read if i > left: i = left result = self.fd.read(i) self.bytes_read += len(result) self.checksum.update(result) return result
./CrossVul/dataset_final_sorted/CWE-200/py/good_5543_0
crossvul-python_data_bad_4177_0
import time from pydoc import locate from django.conf import settings DEFAULT_CONFIG = { 'config_version': 4, 'flag_prefix': 'ractf', 'graph_members': 10, 'register_end_time': -1, 'end_time': time.time() + 7 * 24 * 60 * 60, 'start_time': time.time(), 'register_start_time': time.time(), 'team_size': -1, 'email_regex': '', 'email_domain': '', 'login_provider': 'basic_auth', 'registration_provider': 'basic_auth', 'token_provider': 'basic_auth', 'enable_bot_users': True, 'enable_ctftime': True, 'enable_flag_submission': True, 'enable_flag_submission_after_competition': True, 'enable_force_admin_2fa': False, 'enable_track_incorrect_submissions': True, 'enable_login': True, 'enable_prelogin': True, 'enable_maintenance_mode': False, 'enable_registration': True, 'enable_scoreboard': True, 'enable_scoring': True, 'enable_solve_broadcast': True, 'enable_teams': True, 'enable_team_join': True, 'enable_view_challenges_after_competion': True, 'enable_team_leave': False, 'invite_required': False, 'hide_scoreboard_at': -1, 'setup_wizard_complete': False, 'sensitive_fields': ['sensitive_fields', 'enable_force_admin_2fa'] } backend = locate(settings.CONFIG['BACKEND'])() backend.load(defaults=DEFAULT_CONFIG) def get(key): return backend.get(key) def set(key, value): backend.set(key, value) def get_all(): return backend.get_all() def get_all_non_sensitive(): sensitive = backend.get('sensitive_fields') config = backend.get_all() for field in sensitive: del config[field] return config def set_bulk(values: dict): for key, value in values.items(): set(key, value) def add_plugin_config(name, config): DEFAULT_CONFIG[name] = config
./CrossVul/dataset_final_sorted/CWE-200/py/bad_4177_0
crossvul-python_data_good_3807_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. # Copyright (c) 2011 Piston Cloud Computing, Inc # Copyright (c) 2011 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import time from lxml import etree from nova import exception from nova.openstack.common import cfg from nova.openstack.common import log as logging from nova import utils from nova.virt import images CONF = cfg.CONF LOG = logging.getLogger(__name__) def execute(*args, **kwargs): return utils.execute(*args, **kwargs) def get_iscsi_initiator(): """Get iscsi initiator name for this machine""" # NOTE(vish) openiscsi stores initiator name in a file that # needs root permission to read. contents = utils.read_file_as_root('/etc/iscsi/initiatorname.iscsi') for l in contents.split('\n'): if l.startswith('InitiatorName='): return l[l.index('=') + 1:].strip() def create_image(disk_format, path, size): """Create a disk image :param disk_format: Disk image format (as known by qemu-img) :param path: Desired location of the disk image :param size: Desired size of disk image. May be given as an int or a string. If given as an int, it will be interpreted as bytes. If it's a string, it should consist of a number with an optional suffix ('K' for Kibibytes, M for Mebibytes, 'G' for Gibibytes, 'T' for Tebibytes). If no suffix is given, it will be interpreted as bytes. """ execute('qemu-img', 'create', '-f', disk_format, path, size) def create_cow_image(backing_file, path): """Create COW image Creates a COW image with the given backing file :param backing_file: Existing image on which to base the COW image :param path: Desired location of the COW image """ base_cmd = ['qemu-img', 'create', '-f', 'qcow2'] cow_opts = [] if backing_file: cow_opts += ['backing_file=%s' % backing_file] base_details = images.qemu_img_info(backing_file) else: base_details = None # This doesn't seem to get inherited so force it to... # http://paste.ubuntu.com/1213295/ # TODO(harlowja) probably file a bug against qemu-img/qemu if base_details and base_details.cluster_size is not None: cow_opts += ['cluster_size=%s' % base_details.cluster_size] # For now don't inherit this due the following discussion... # See: http://www.gossamer-threads.com/lists/openstack/dev/10592 # if 'preallocation' in base_details: # cow_opts += ['preallocation=%s' % base_details['preallocation']] if base_details and base_details.encryption: cow_opts += ['encryption=%s' % base_details.encryption] if cow_opts: # Format as a comma separated list csv_opts = ",".join(cow_opts) cow_opts = ['-o', csv_opts] cmd = base_cmd + cow_opts + [path] execute(*cmd) def create_lvm_image(vg, lv, size, sparse=False): """Create LVM image. Creates a LVM image with given size. :param vg: existing volume group which should hold this image :param lv: name for this image (logical volume) :size: size of image in bytes :sparse: create sparse logical volume """ free_space = volume_group_free_space(vg) def check_size(size): if size > free_space: raise RuntimeError(_('Insufficient Space on Volume Group %(vg)s.' ' Only %(free_space)db available,' ' but %(size)db required' ' by volume %(lv)s.') % locals()) if sparse: preallocated_space = 64 * 1024 * 1024 check_size(preallocated_space) if free_space < size: LOG.warning(_('Volume group %(vg)s will not be able' ' to hold sparse volume %(lv)s.' ' Virtual volume size is %(size)db,' ' but free space on volume group is' ' only %(free_space)db.') % locals()) cmd = ('lvcreate', '-L', '%db' % preallocated_space, '--virtualsize', '%db' % size, '-n', lv, vg) else: check_size(size) cmd = ('lvcreate', '-L', '%db' % size, '-n', lv, vg) execute(*cmd, run_as_root=True, attempts=3) def volume_group_free_space(vg): """Return available space on volume group in bytes. :param vg: volume group name """ out, err = execute('vgs', '--noheadings', '--nosuffix', '--units', 'b', '-o', 'vg_free', vg, run_as_root=True) return int(out.strip()) def list_logical_volumes(vg): """List logical volumes paths for given volume group. :param vg: volume group name """ out, err = execute('lvs', '--noheadings', '-o', 'lv_name', vg, run_as_root=True) return [line.strip() for line in out.splitlines()] def logical_volume_info(path): """Get logical volume info. :param path: logical volume path """ out, err = execute('lvs', '-o', 'vg_all,lv_all', '--separator', '|', path, run_as_root=True) info = [line.split('|') for line in out.splitlines()] if len(info) != 2: raise RuntimeError(_("Path %s must be LVM logical volume") % path) return dict(zip(*info)) def logical_volume_size(path): """Get logical volume size in bytes. :param path: logical volume path """ # TODO(p-draigbrady) POssibly replace with the more general # use of blockdev --getsize64 in future out, _err = execute('lvs', '-o', 'lv_size', '--noheadings', '--units', 'b', '--nosuffix', path, run_as_root=True) return int(out) def clear_logical_volume(path): """Obfuscate the logical volume. :param path: logical volume path """ # TODO(p-draigbrady): We currently overwrite with zeros # but we may want to make this configurable in future # for more or less security conscious setups. vol_size = logical_volume_size(path) bs = 1024 * 1024 direct_flags = ('oflag=direct',) remaining_bytes = vol_size # The loop caters for versions of dd that # don't support the iflag=count_bytes option. while remaining_bytes: zero_blocks = remaining_bytes / bs seek_blocks = (vol_size - remaining_bytes) / bs zero_cmd = ('dd', 'bs=%s' % bs, 'if=/dev/zero', 'of=%s' % path, 'seek=%s' % seek_blocks, 'count=%s' % zero_blocks) zero_cmd += direct_flags if zero_blocks: utils.execute(*zero_cmd, run_as_root=True) remaining_bytes %= bs bs /= 1024 # Limit to 3 iterations direct_flags = () # Only use O_DIRECT with initial block size def remove_logical_volumes(*paths): """Remove one or more logical volume.""" for path in paths: clear_logical_volume(path) if paths: lvremove = ('lvremove', '-f') + paths execute(*lvremove, attempts=3, run_as_root=True) def pick_disk_driver_name(is_block_dev=False): """Pick the libvirt primary backend driver name If the hypervisor supports multiple backend drivers, then the name attribute selects the primary backend driver name, while the optional type attribute provides the sub-type. For example, xen supports a name of "tap", "tap2", "phy", or "file", with a type of "aio" or "qcow2", while qemu only supports a name of "qemu", but multiple types including "raw", "bochs", "qcow2", and "qed". :param is_block_dev: :returns: driver_name or None """ if CONF.libvirt_type == "xen": if is_block_dev: return "phy" else: return "tap" elif CONF.libvirt_type in ('kvm', 'qemu'): return "qemu" else: # UML doesn't want a driver_name set return None def get_disk_size(path): """Get the (virtual) size of a disk image :param path: Path to the disk image :returns: Size (in bytes) of the given disk image as it would be seen by a virtual machine. """ size = images.qemu_img_info(path).virtual_size return int(size) def get_disk_backing_file(path): """Get the backing file of a disk image :param path: Path to the disk image :returns: a path to the image's backing store """ backing_file = images.qemu_img_info(path).backing_file if backing_file: backing_file = os.path.basename(backing_file) return backing_file def copy_image(src, dest, host=None): """Copy a disk image to an existing directory :param src: Source image :param dest: Destination path :param host: Remote host """ if not host: # We shell out to cp because that will intelligently copy # sparse files. I.E. holes will not be written to DEST, # rather recreated efficiently. In addition, since # coreutils 8.11, holes can be read efficiently too. execute('cp', src, dest) else: dest = "%s:%s" % (host, dest) # Try rsync first as that can compress and create sparse dest files. # Note however that rsync currently doesn't read sparse files # efficiently: https://bugzilla.samba.org/show_bug.cgi?id=8918 # At least network traffic is mitigated with compression. try: # Do a relatively light weight test first, so that we # can fall back to scp, without having run out of space # on the destination for example. execute('rsync', '--sparse', '--compress', '--dry-run', src, dest) except exception.ProcessExecutionError: execute('scp', src, dest) else: execute('rsync', '--sparse', '--compress', src, dest) def write_to_file(path, contents, umask=None): """Write the given contents to a file :param path: Destination file :param contents: Desired contents of the file :param umask: Umask to set when creating this file (will be reset) """ if umask: saved_umask = os.umask(umask) try: with open(path, 'w') as f: f.write(contents) finally: if umask: os.umask(saved_umask) def chown(path, owner): """Change ownership of file or directory :param path: File or directory whose ownership to change :param owner: Desired new owner (given as uid or username) """ execute('chown', owner, path, run_as_root=True) def create_snapshot(disk_path, snapshot_name): """Create a snapshot in a disk image :param disk_path: Path to disk image :param snapshot_name: Name of snapshot in disk image """ qemu_img_cmd = ('qemu-img', 'snapshot', '-c', snapshot_name, disk_path) # NOTE(vish): libvirt changes ownership of images execute(*qemu_img_cmd, run_as_root=True) def delete_snapshot(disk_path, snapshot_name): """Create a snapshot in a disk image :param disk_path: Path to disk image :param snapshot_name: Name of snapshot in disk image """ qemu_img_cmd = ('qemu-img', 'snapshot', '-d', snapshot_name, disk_path) # NOTE(vish): libvirt changes ownership of images execute(*qemu_img_cmd, run_as_root=True) def extract_snapshot(disk_path, source_fmt, snapshot_name, out_path, dest_fmt): """Extract a named snapshot from a disk image :param disk_path: Path to disk image :param snapshot_name: Name of snapshot in disk image :param out_path: Desired path of extracted snapshot """ # NOTE(markmc): ISO is just raw to qemu-img if dest_fmt == 'iso': dest_fmt = 'raw' qemu_img_cmd = ('qemu-img', 'convert', '-f', source_fmt, '-O', dest_fmt, '-s', snapshot_name, disk_path, out_path) execute(*qemu_img_cmd) def load_file(path): """Read contents of file :param path: File to read """ with open(path, 'r') as fp: return fp.read() def file_open(*args, **kwargs): """Open file see built-in file() documentation for more details Note: The reason this is kept in a separate module is to easily be able to provide a stub module that doesn't alter system state at all (for unit tests) """ return file(*args, **kwargs) def file_delete(path): """Delete (unlink) file Note: The reason this is kept in a separate module is to easily be able to provide a stub module that doesn't alter system state at all (for unit tests) """ return os.unlink(path) def find_disk(virt_dom): """Find root device path for instance May be file or device""" xml_desc = virt_dom.XMLDesc(0) domain = etree.fromstring(xml_desc) if CONF.libvirt_type == 'lxc': source = domain.find('devices/filesystem/source') disk_path = source.get('dir') disk_path = disk_path[0:disk_path.rfind('rootfs')] disk_path = os.path.join(disk_path, 'disk') else: source = domain.find('devices/disk/source') disk_path = source.get('file') or source.get('dev') if not disk_path: raise RuntimeError(_("Can't retrieve root device path " "from instance libvirt configuration")) return disk_path def get_disk_type(path): """Retrieve disk type (raw, qcow2, lvm) for given file""" if path.startswith('/dev'): return 'lvm' return images.qemu_img_info(path).file_format def get_fs_info(path): """Get free/used/total space info for a filesystem :param path: Any dirent on the filesystem :returns: A dict containing: :free: How much space is free (in bytes) :used: How much space is used (in bytes) :total: How big the filesystem is (in bytes) """ hddinfo = os.statvfs(path) total = hddinfo.f_frsize * hddinfo.f_blocks free = hddinfo.f_frsize * hddinfo.f_bavail used = hddinfo.f_frsize * (hddinfo.f_blocks - hddinfo.f_bfree) return {'total': total, 'free': free, 'used': used} def fetch_image(context, target, image_id, user_id, project_id): """Grab image""" images.fetch_to_raw(context, image_id, target, user_id, project_id)
./CrossVul/dataset_final_sorted/CWE-200/py/good_3807_0
crossvul-python_data_good_5622_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import gettext import os import sys from oslo.config import cfg from keystone.common import logging gettext.install('keystone', unicode=1) _DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s" _DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" _DEFAULT_AUTH_METHODS = ['password', 'token'] COMMON_CLI_OPTS = [ cfg.BoolOpt('debug', short='d', default=False, help='Print debugging output (set logging level to ' 'DEBUG instead of default WARNING level).'), cfg.BoolOpt('verbose', short='v', default=False, help='Print more verbose output (set logging level to ' 'INFO instead of default WARNING level).'), ] LOGGING_CLI_OPTS = [ cfg.StrOpt('log-config', metavar='PATH', help='If this option is specified, the logging configuration ' 'file specified is used and overrides any other logging ' 'options specified. Please see the Python logging module ' 'documentation for details on logging configuration ' 'files.'), cfg.StrOpt('log-format', default=_DEFAULT_LOG_FORMAT, metavar='FORMAT', help='A logging.Formatter log message format string which may ' 'use any of the available logging.LogRecord attributes.'), cfg.StrOpt('log-date-format', default=_DEFAULT_LOG_DATE_FORMAT, metavar='DATE_FORMAT', help='Format string for %%(asctime)s in log records.'), cfg.StrOpt('log-file', metavar='PATH', help='Name of log file to output. ' 'If not set, logging will go to stdout.'), cfg.StrOpt('log-dir', help='The directory in which to store log files. ' '(will be prepended to --log-file)'), cfg.BoolOpt('use-syslog', default=False, help='Use syslog for logging.'), cfg.StrOpt('syslog-log-facility', default='LOG_USER', help='syslog facility to receive log lines.') ] CONF = cfg.CONF def setup_logging(conf): """ Sets up the logging options for a log with supplied name :param conf: a cfg.ConfOpts object """ if conf.log_config: # Use a logging configuration file for all settings... if os.path.exists(conf.log_config): logging.config.fileConfig(conf.log_config) return else: raise RuntimeError(_('Unable to locate specified logging ' 'config file: %s') % conf.log_config) root_logger = logging.root if conf.debug: root_logger.setLevel(logging.DEBUG) elif conf.verbose: root_logger.setLevel(logging.INFO) else: root_logger.setLevel(logging.WARNING) formatter = logging.Formatter(conf.log_format, conf.log_date_format) if conf.use_syslog: try: facility = getattr(logging.SysLogHandler, conf.syslog_log_facility) except AttributeError: raise ValueError(_('Invalid syslog facility')) handler = logging.SysLogHandler(address='/dev/log', facility=facility) elif conf.log_file: logfile = conf.log_file if conf.log_dir: logfile = os.path.join(conf.log_dir, logfile) handler = logging.WatchedFileHandler(logfile) else: handler = logging.StreamHandler(sys.stdout) handler.setFormatter(formatter) root_logger.addHandler(handler) def setup_authentication(): # register any non-default auth methods here (used by extensions, etc) for method_name in CONF.auth.methods: if method_name not in _DEFAULT_AUTH_METHODS: register_str(method_name, group="auth") def register_str(*args, **kw): conf = kw.pop('conf', CONF) group = kw.pop('group', None) return conf.register_opt(cfg.StrOpt(*args, **kw), group=group) def register_cli_str(*args, **kw): conf = kw.pop('conf', CONF) group = kw.pop('group', None) return conf.register_cli_opt(cfg.StrOpt(*args, **kw), group=group) def register_list(*args, **kw): conf = kw.pop('conf', CONF) group = kw.pop('group', None) return conf.register_opt(cfg.ListOpt(*args, **kw), group=group) def register_cli_list(*args, **kw): conf = kw.pop('conf', CONF) group = kw.pop('group', None) return conf.register_cli_opt(cfg.ListOpt(*args, **kw), group=group) def register_bool(*args, **kw): conf = kw.pop('conf', CONF) group = kw.pop('group', None) return conf.register_opt(cfg.BoolOpt(*args, **kw), group=group) def register_cli_bool(*args, **kw): conf = kw.pop('conf', CONF) group = kw.pop('group', None) return conf.register_cli_opt(cfg.BoolOpt(*args, **kw), group=group) def register_int(*args, **kw): conf = kw.pop('conf', CONF) group = kw.pop('group', None) return conf.register_opt(cfg.IntOpt(*args, **kw), group=group) def register_cli_int(*args, **kw): conf = kw.pop('conf', CONF) group = kw.pop('group', None) return conf.register_cli_opt(cfg.IntOpt(*args, **kw), group=group) def configure(): CONF.register_cli_opts(COMMON_CLI_OPTS) CONF.register_cli_opts(LOGGING_CLI_OPTS) register_cli_bool('standard-threads', default=False) register_cli_str('pydev-debug-host', default=None) register_cli_int('pydev-debug-port', default=None) register_str('admin_token', secret=True, default='ADMIN') register_str('bind_host', default='0.0.0.0') register_int('compute_port', default=8774) register_int('admin_port', default=35357) register_int('public_port', default=5000) register_str( 'public_endpoint', default='http://localhost:%(public_port)d/') register_str('admin_endpoint', default='http://localhost:%(admin_port)d/') register_str('onready') register_str('auth_admin_prefix', default='') register_str('policy_file', default='policy.json') register_str('policy_default_rule', default=None) # default max request size is 112k register_int('max_request_body_size', default=114688) register_int('max_param_size', default=64) # we allow tokens to be a bit larger to accommodate PKI register_int('max_token_size', default=8192) register_str( 'member_role_id', default='9fe2ff9ee4384b1894a90878d3e92bab') register_str('member_role_name', default='_member_') # identity register_str('default_domain_id', group='identity', default='default') # trust register_bool('enabled', group='trust', default=True) # ssl register_bool('enable', group='ssl', default=False) register_str('certfile', group='ssl', default=None) register_str('keyfile', group='ssl', default=None) register_str('ca_certs', group='ssl', default=None) register_bool('cert_required', group='ssl', default=False) # signing register_str( 'token_format', group='signing', default="PKI") register_str( 'certfile', group='signing', default="/etc/keystone/ssl/certs/signing_cert.pem") register_str( 'keyfile', group='signing', default="/etc/keystone/ssl/private/signing_key.pem") register_str( 'ca_certs', group='signing', default="/etc/keystone/ssl/certs/ca.pem") register_int('key_size', group='signing', default=1024) register_int('valid_days', group='signing', default=3650) register_str('ca_password', group='signing', default=None) # sql register_str('connection', group='sql', default='sqlite:///keystone.db') register_int('idle_timeout', group='sql', default=200) register_str( 'driver', group='catalog', default='keystone.catalog.backends.sql.Catalog') register_str( 'driver', group='identity', default='keystone.identity.backends.sql.Identity') register_str( 'driver', group='policy', default='keystone.policy.backends.sql.Policy') register_str( 'driver', group='token', default='keystone.token.backends.kvs.Token') register_str( 'driver', group='trust', default='keystone.trust.backends.sql.Trust') register_str( 'driver', group='ec2', default='keystone.contrib.ec2.backends.kvs.Ec2') register_str( 'driver', group='stats', default='keystone.contrib.stats.backends.kvs.Stats') # ldap register_str('url', group='ldap', default='ldap://localhost') register_str('user', group='ldap', default=None) register_str('password', group='ldap', secret=True, default=None) register_str('suffix', group='ldap', default='cn=example,cn=com') register_bool('use_dumb_member', group='ldap', default=False) register_str('dumb_member', group='ldap', default='cn=dumb,dc=nonexistent') register_bool('allow_subtree_delete', group='ldap', default=False) register_str('query_scope', group='ldap', default='one') register_int('page_size', group='ldap', default=0) register_str('alias_dereferencing', group='ldap', default='default') register_str('user_tree_dn', group='ldap', default=None) register_str('user_filter', group='ldap', default=None) register_str('user_objectclass', group='ldap', default='inetOrgPerson') register_str('user_id_attribute', group='ldap', default='cn') register_str('user_name_attribute', group='ldap', default='sn') register_str('user_mail_attribute', group='ldap', default='email') register_str('user_pass_attribute', group='ldap', default='userPassword') register_str('user_enabled_attribute', group='ldap', default='enabled') register_str( 'user_domain_id_attribute', group='ldap', default='businessCategory') register_int('user_enabled_mask', group='ldap', default=0) register_str('user_enabled_default', group='ldap', default='True') register_list( 'user_attribute_ignore', group='ldap', default='tenant_id,tenants') register_bool('user_allow_create', group='ldap', default=True) register_bool('user_allow_update', group='ldap', default=True) register_bool('user_allow_delete', group='ldap', default=True) register_bool('user_enabled_emulation', group='ldap', default=False) register_str('user_enabled_emulation_dn', group='ldap', default=None) register_str('tenant_tree_dn', group='ldap', default=None) register_str('tenant_filter', group='ldap', default=None) register_str('tenant_objectclass', group='ldap', default='groupOfNames') register_str('tenant_id_attribute', group='ldap', default='cn') register_str('tenant_member_attribute', group='ldap', default='member') register_str('tenant_name_attribute', group='ldap', default='ou') register_str('tenant_desc_attribute', group='ldap', default='description') register_str('tenant_enabled_attribute', group='ldap', default='enabled') register_str( 'tenant_domain_id_attribute', group='ldap', default='businessCategory') register_list('tenant_attribute_ignore', group='ldap', default='') register_bool('tenant_allow_create', group='ldap', default=True) register_bool('tenant_allow_update', group='ldap', default=True) register_bool('tenant_allow_delete', group='ldap', default=True) register_bool('tenant_enabled_emulation', group='ldap', default=False) register_str('tenant_enabled_emulation_dn', group='ldap', default=None) register_str('role_tree_dn', group='ldap', default=None) register_str('role_filter', group='ldap', default=None) register_str( 'role_objectclass', group='ldap', default='organizationalRole') register_str('role_id_attribute', group='ldap', default='cn') register_str('role_name_attribute', group='ldap', default='ou') register_str('role_member_attribute', group='ldap', default='roleOccupant') register_list('role_attribute_ignore', group='ldap', default='') register_bool('role_allow_create', group='ldap', default=True) register_bool('role_allow_update', group='ldap', default=True) register_bool('role_allow_delete', group='ldap', default=True) register_str('group_tree_dn', group='ldap', default=None) register_str('group_filter', group='ldap', default=None) register_str('group_objectclass', group='ldap', default='groupOfNames') register_str('group_id_attribute', group='ldap', default='cn') register_str('group_name_attribute', group='ldap', default='ou') register_str('group_member_attribute', group='ldap', default='member') register_str('group_desc_attribute', group='ldap', default='description') register_str( 'group_domain_id_attribute', group='ldap', default='businessCategory') register_list('group_attribute_ignore', group='ldap', default='') register_bool('group_allow_create', group='ldap', default=True) register_bool('group_allow_update', group='ldap', default=True) register_bool('group_allow_delete', group='ldap', default=True) register_str('domain_tree_dn', group='ldap', default=None) register_str('domain_filter', group='ldap', default=None) register_str('domain_objectclass', group='ldap', default='groupOfNames') register_str('domain_id_attribute', group='ldap', default='cn') register_str('domain_name_attribute', group='ldap', default='ou') register_str('domain_member_attribute', group='ldap', default='member') register_str('domain_desc_attribute', group='ldap', default='description') register_str('domain_enabled_attribute', group='ldap', default='enabled') register_list('domain_attribute_ignore', group='ldap', default='') register_bool('domain_allow_create', group='ldap', default=True) register_bool('domain_allow_update', group='ldap', default=True) register_bool('domain_allow_delete', group='ldap', default=True) register_bool('domain_enabled_emulation', group='ldap', default=False) register_str('domain_enabled_emulation_dn', group='ldap', default=None) # pam register_str('url', group='pam', default=None) register_str('userid', group='pam', default=None) register_str('password', group='pam', default=None) # default authentication methods register_list('methods', group='auth', default=_DEFAULT_AUTH_METHODS) register_str( 'password', group='auth', default='keystone.auth.plugins.token.Token') register_str( 'token', group='auth', default='keystone.auth.plugins.password.Password') # register any non-default auth methods here (used by extensions, etc) for method_name in CONF.auth.methods: if method_name not in _DEFAULT_AUTH_METHODS: register_str(method_name, group='auth')
./CrossVul/dataset_final_sorted/CWE-200/py/good_5622_0
crossvul-python_data_bad_3325_2
# -*- coding: utf-8 -*- ''' Manage information about regular files, directories, and special files on the minion, set/read user, group, mode, and data ''' # TODO: We should add the capability to do u+r type operations here # some time in the future from __future__ import absolute_import, print_function # Import python libs import datetime import difflib import errno import fileinput import fnmatch import itertools import logging import operator import os import re import shutil import stat import string import sys import tempfile import time import glob import hashlib import mmap from functools import reduce # pylint: disable=redefined-builtin from collections import Iterable, Mapping # pylint: disable=import-error,no-name-in-module,redefined-builtin import salt.ext.six as six from salt.ext.six.moves import range, zip from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: enable=import-error,no-name-in-module,redefined-builtin try: import grp import pwd except ImportError: pass # Import salt libs import salt.utils import salt.utils.atomicfile import salt.utils.find import salt.utils.filebuffer import salt.utils.files import salt.utils.locales import salt.utils.templates import salt.utils.url from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError, get_error_message as _get_error_message log = logging.getLogger(__name__) __func_alias__ = { 'makedirs_': 'makedirs' } HASHES = { 'sha512': 128, 'sha384': 96, 'sha256': 64, 'sha224': 56, 'sha1': 40, 'md5': 32, } HASHES_REVMAP = dict([(y, x) for x, y in six.iteritems(HASHES)]) def __virtual__(): ''' Only work on POSIX-like systems ''' # win_file takes care of windows if salt.utils.is_windows(): return (False, 'The file execution module cannot be loaded: only available on non-Windows systems - use win_file instead.') return True def __clean_tmp(sfn): ''' Clean out a template temp file ''' if sfn.startswith(os.path.join(tempfile.gettempdir(), salt.utils.files.TEMPFILE_PREFIX)): # Don't remove if it exists in file_roots (any saltenv) all_roots = itertools.chain.from_iterable( six.itervalues(__opts__['file_roots'])) in_roots = any(sfn.startswith(root) for root in all_roots) # Only clean up files that exist if os.path.exists(sfn) and not in_roots: os.remove(sfn) def _error(ret, err_msg): ''' Common function for setting error information for return dicts ''' ret['result'] = False ret['comment'] = err_msg return ret def _binary_replace(old, new): ''' This function does NOT do any diffing, it just checks the old and new files to see if either is binary, and provides an appropriate string noting the difference between the two files. If neither file is binary, an empty string is returned. This function should only be run AFTER it has been determined that the files differ. ''' old_isbin = not salt.utils.istextfile(old) new_isbin = not salt.utils.istextfile(new) if any((old_isbin, new_isbin)): if all((old_isbin, new_isbin)): return 'Replace binary file' elif old_isbin: return 'Replace binary file with text file' elif new_isbin: return 'Replace text file with binary file' return '' def _get_bkroot(): ''' Get the location of the backup dir in the minion cache ''' # Get the cachedir from the minion config return os.path.join(__salt__['config.get']('cachedir'), 'file_backup') def _splitlines_preserving_trailing_newline(str): ''' Returns a list of the lines in the string, breaking at line boundaries and preserving a trailing newline (if present). Essentially, this works like ``str.striplines(False)`` but preserves an empty line at the end. This is equivalent to the following code: .. code-block:: python lines = str.splitlines() if str.endswith('\n') or str.endswith('\r'): lines.append('') ''' lines = str.splitlines() if str.endswith('\n') or str.endswith('\r'): lines.append('') return lines def gid_to_group(gid): ''' Convert the group id to the group name on this system gid gid to convert to a group name CLI Example: .. code-block:: bash salt '*' file.gid_to_group 0 ''' try: gid = int(gid) except ValueError: # This is not an integer, maybe it's already the group name? gid = group_to_gid(gid) if gid == '': # Don't even bother to feed it to grp return '' try: return grp.getgrgid(gid).gr_name except (KeyError, NameError): # If group is not present, fall back to the gid. return gid def group_to_gid(group): ''' Convert the group to the gid on this system group group to convert to its gid CLI Example: .. code-block:: bash salt '*' file.group_to_gid root ''' if group is None: return '' try: if isinstance(group, int): return group return grp.getgrnam(group).gr_gid except KeyError: return '' def get_gid(path, follow_symlinks=True): ''' Return the id of the group that owns a given file path file or directory of which to get the gid follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_gid /etc/passwd .. versionchanged:: 0.16.4 ``follow_symlinks`` option added ''' return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('gid', -1) def get_group(path, follow_symlinks=True): ''' Return the group that owns a given file path file or directory of which to get the group follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_group /etc/passwd .. versionchanged:: 0.16.4 ``follow_symlinks`` option added ''' return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('group', False) def uid_to_user(uid): ''' Convert a uid to a user name uid uid to convert to a username CLI Example: .. code-block:: bash salt '*' file.uid_to_user 0 ''' try: return pwd.getpwuid(uid).pw_name except (KeyError, NameError): # If user is not present, fall back to the uid. return uid def user_to_uid(user): ''' Convert user name to a uid user user name to convert to its uid CLI Example: .. code-block:: bash salt '*' file.user_to_uid root ''' if user is None: user = salt.utils.get_user() try: if isinstance(user, int): return user return pwd.getpwnam(user).pw_uid except KeyError: return '' def get_uid(path, follow_symlinks=True): ''' Return the id of the user that owns a given file path file or directory of which to get the uid follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_uid /etc/passwd .. versionchanged:: 0.16.4 ``follow_symlinks`` option added ''' return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('uid', -1) def get_user(path, follow_symlinks=True): ''' Return the user that owns a given file path file or directory of which to get the user follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_user /etc/passwd .. versionchanged:: 0.16.4 ``follow_symlinks`` option added ''' return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('user', False) def get_mode(path, follow_symlinks=True): ''' Return the mode of a file path file or directory of which to get the mode follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_mode /etc/passwd .. versionchanged:: 2014.1.0 ``follow_symlinks`` option added ''' return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('mode', '') def set_mode(path, mode): ''' Set the mode of a file path file or directory of which to set the mode mode mode to set the path to CLI Example: .. code-block:: bash salt '*' file.set_mode /etc/passwd 0644 ''' path = os.path.expanduser(path) mode = str(mode).lstrip('0Oo') if not mode: mode = '0' if not os.path.exists(path): raise CommandExecutionError('{0}: File not found'.format(path)) try: os.chmod(path, int(mode, 8)) except Exception: return 'Invalid Mode ' + mode return get_mode(path) def lchown(path, user, group): ''' Chown a file, pass the file the desired user and group without following symlinks. path path to the file or directory user user owner group group owner CLI Example: .. code-block:: bash salt '*' file.chown /etc/passwd root root ''' path = os.path.expanduser(path) uid = user_to_uid(user) gid = group_to_gid(group) err = '' if uid == '': if user: err += 'User does not exist\n' else: uid = -1 if gid == '': if group: err += 'Group does not exist\n' else: gid = -1 return os.lchown(path, uid, gid) def chown(path, user, group): ''' Chown a file, pass the file the desired user and group path path to the file or directory user user owner group group owner CLI Example: .. code-block:: bash salt '*' file.chown /etc/passwd root root ''' path = os.path.expanduser(path) uid = user_to_uid(user) gid = group_to_gid(group) err = '' if uid == '': if user: err += 'User does not exist\n' else: uid = -1 if gid == '': if group: err += 'Group does not exist\n' else: gid = -1 if not os.path.exists(path): try: # Broken symlinks will return false, but still need to be chowned return os.lchown(path, uid, gid) except OSError: pass err += 'File not found' if err: return err return os.chown(path, uid, gid) def chgrp(path, group): ''' Change the group of a file path path to the file or directory group group owner CLI Example: .. code-block:: bash salt '*' file.chgrp /etc/passwd root ''' path = os.path.expanduser(path) user = get_user(path) return chown(path, user, group) def get_sum(path, form='sha256'): ''' Return the checksum for the given file. The following checksum algorithms are supported: * md5 * sha1 * sha224 * sha256 **(default)** * sha384 * sha512 path path to the file or directory form desired sum format CLI Example: .. code-block:: bash salt '*' file.get_sum /etc/passwd sha512 ''' path = os.path.expanduser(path) if not os.path.isfile(path): return 'File not found' return salt.utils.get_hash(path, form, 4096) def get_hash(path, form='sha256', chunk_size=65536): ''' Get the hash sum of a file This is better than ``get_sum`` for the following reasons: - It does not read the entire file into memory. - It does not return a string on error. The returned value of ``get_sum`` cannot really be trusted since it is vulnerable to collisions: ``get_sum(..., 'xyz') == 'Hash xyz not supported'`` path path to the file or directory form desired sum format chunk_size amount to sum at once CLI Example: .. code-block:: bash salt '*' file.get_hash /etc/shadow ''' return salt.utils.get_hash(os.path.expanduser(path), form, chunk_size) def get_source_sum(file_name='', source='', source_hash=None, source_hash_name=None, saltenv='base'): ''' .. versionadded:: 2016.11.0 Used by :py:func:`file.get_managed <salt.modules.file.get_managed>` to obtain the hash and hash type from the parameters specified below. file_name Optional file name being managed, for matching with :py:func:`file.extract_hash <salt.modules.file.extract_hash>`. .. versionadded:: 2016.11.0 source Source file, as used in :py:mod:`file <salt.states.file>` and other states. If ``source_hash`` refers to a file containing hashes, then this filename will be used to match a filename in that file. If the ``source_hash`` is a hash expression, then this argument will be ignored. source_hash Hash file/expression, as used in :py:mod:`file <salt.states.file>` and other states. If this value refers to a remote URL or absolute path to a local file, it will be cached and :py:func:`file.extract_hash <salt.modules.file.extract_hash>` will be used to obtain a hash from it. source_hash_name Specific file name to look for when ``source_hash`` refers to a remote file, used to disambiguate ambiguous matches. .. versionadded:: 2016.11.0 saltenv : base Salt fileserver environment from which to retrive the source_hash. This value will only be used when ``source_hash`` refers to a file on the Salt fileserver (i.e. one beginning with ``salt://``). CLI Example: .. code-block:: bash salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=499ae16dcae71eeb7c3a30c75ea7a1a6 salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=https://mydomain.tld/hashes.md5 salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=https://mydomain.tld/hashes.md5 source_hash_name=./dir2/foo.tar.gz ''' def _invalid_source_hash_format(): ''' DRY helper for reporting invalid source_hash input ''' raise CommandExecutionError( 'Source hash {0} format is invalid. The supported formats are: ' '1) a hash, 2) an expression in the format <hash_type>=<hash>, or ' '3) either a path to a local file containing hashes, or a URI of ' 'a remote hash file. Supported protocols for remote hash files ' 'are: {1}. The hash may also not be of a valid length, the ' 'following are supported hash types and lengths: {2}.'.format( source_hash, ', '.join(salt.utils.files.VALID_PROTOS), ', '.join( ['{0} ({1})'.format(HASHES_REVMAP[x], x) for x in sorted(HASHES_REVMAP)] ), ) ) hash_fn = None if os.path.isabs(source_hash): hash_fn = source_hash else: try: proto = _urlparse(source_hash).scheme if proto in salt.utils.files.VALID_PROTOS: hash_fn = __salt__['cp.cache_file'](source_hash, saltenv) if not hash_fn: raise CommandExecutionError( 'Source hash file {0} not found'.format(source_hash) ) else: if proto != '': # Some unsupported protocol (e.g. foo://) is being used. # We'll get into this else block if a hash expression # (like md5=<md5 checksum here>), but in those cases, the # protocol will be an empty string, in which case we avoid # this error condition. _invalid_source_hash_format() except (AttributeError, TypeError): _invalid_source_hash_format() if hash_fn is not None: ret = extract_hash(hash_fn, '', file_name, source, source_hash_name) if ret is None: _invalid_source_hash_format() return ret else: # The source_hash is a hash expression ret = {} try: ret['hash_type'], ret['hsum'] = \ [x.strip() for x in source_hash.split('=', 1)] except AttributeError: _invalid_source_hash_format() except ValueError: # No hash type, try to figure out by hash length if not re.match('^[{0}]+$'.format(string.hexdigits), source_hash): _invalid_source_hash_format() ret['hsum'] = source_hash source_hash_len = len(source_hash) if source_hash_len in HASHES_REVMAP: ret['hash_type'] = HASHES_REVMAP[source_hash_len] else: _invalid_source_hash_format() if ret['hash_type'] not in HASHES: raise CommandExecutionError( 'Invalid hash type \'{0}\'. Supported hash types are: {1}. ' 'Either remove the hash type and simply use \'{2}\' as the ' 'source_hash, or change the hash type to a supported type.' .format(ret['hash_type'], ', '.join(HASHES), ret['hsum']) ) else: hsum_len = len(ret['hsum']) if hsum_len not in HASHES_REVMAP: _invalid_source_hash_format() elif hsum_len != HASHES[ret['hash_type']]: raise CommandExecutionError( 'Invalid length ({0}) for hash type \'{1}\'. Either ' 'remove the hash type and simply use \'{2}\' as the ' 'source_hash, or change the hash type to \'{3}\''.format( hsum_len, ret['hash_type'], ret['hsum'], HASHES_REVMAP[hsum_len], ) ) return ret def check_hash(path, file_hash): ''' Check if a file matches the given hash string Returns ``True`` if the hash matches, otherwise ``False``. path Path to a file local to the minion. hash The hash to check against the file specified in the ``path`` argument. For versions 2016.11.4 and newer, the hash can be specified without an accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``), but for earlier releases it is necessary to also specify the hash type in the format ``<hash_type>:<hash_value>`` (e.g. ``md5:e138491e9d5b97023cea823fe17bac22``). CLI Example: .. code-block:: bash salt '*' file.check_hash /etc/fstab e138491e9d5b97023cea823fe17bac22 salt '*' file.check_hash /etc/fstab md5:e138491e9d5b97023cea823fe17bac22 ''' path = os.path.expanduser(path) if not isinstance(file_hash, six.string_types): raise SaltInvocationError('hash must be a string') for sep in (':', '='): if sep in file_hash: hash_type, hash_value = file_hash.split(sep, 1) break else: hash_value = file_hash hash_len = len(file_hash) hash_type = HASHES_REVMAP.get(hash_len) if hash_type is None: raise SaltInvocationError( 'Hash {0} (length: {1}) could not be matched to a supported ' 'hash type. The supported hash types and lengths are: ' '{2}'.format( file_hash, hash_len, ', '.join( ['{0} ({1})'.format(HASHES_REVMAP[x], x) for x in sorted(HASHES_REVMAP)] ), ) ) return get_hash(path, hash_type) == hash_value def find(path, *args, **kwargs): ''' Approximate the Unix ``find(1)`` command and return a list of paths that meet the specified criteria. The options include match criteria: .. code-block:: text name = path-glob # case sensitive iname = path-glob # case insensitive regex = path-regex # case sensitive iregex = path-regex # case insensitive type = file-types # match any listed type user = users # match any listed user group = groups # match any listed group size = [+-]number[size-unit] # default unit = byte mtime = interval # modified since date grep = regex # search file contents and/or actions: .. code-block:: text delete [= file-types] # default type = 'f' exec = command [arg ...] # where {} is replaced by pathname print [= print-opts] and/or depth criteria: .. code-block:: text maxdepth = maximum depth to transverse in path mindepth = minimum depth to transverse before checking files or directories The default action is ``print=path`` ``path-glob``: .. code-block:: text * = match zero or more chars ? = match any char [abc] = match a, b, or c [!abc] or [^abc] = match anything except a, b, and c [x-y] = match chars x through y [!x-y] or [^x-y] = match anything except chars x through y {a,b,c} = match a or b or c ``path-regex``: a Python Regex (regular expression) pattern to match pathnames ``file-types``: a string of one or more of the following: .. code-block:: text a: all file types b: block device c: character device d: directory p: FIFO (named pipe) f: plain file l: symlink s: socket ``users``: a space and/or comma separated list of user names and/or uids ``groups``: a space and/or comma separated list of group names and/or gids ``size-unit``: .. code-block:: text b: bytes k: kilobytes m: megabytes g: gigabytes t: terabytes interval: .. code-block:: text [<num>w] [<num>d] [<num>h] [<num>m] [<num>s] where: w: week d: day h: hour m: minute s: second print-opts: a comma and/or space separated list of one or more of the following: .. code-block:: text group: group name md5: MD5 digest of file contents mode: file permissions (as integer) mtime: last modification time (as time_t) name: file basename path: file absolute path size: file size in bytes type: file type user: user name CLI Examples: .. code-block:: bash salt '*' file.find / type=f name=\\*.bak size=+10m salt '*' file.find /var mtime=+30d size=+10m print=path,size,mtime salt '*' file.find /var/log name=\\*.[0-9] mtime=+30d size=+10m delete ''' if 'delete' in args: kwargs['delete'] = 'f' elif 'print' in args: kwargs['print'] = 'path' try: finder = salt.utils.find.Finder(kwargs) except ValueError as ex: return 'error: {0}'.format(ex) ret = [item for i in [finder.find(p) for p in glob.glob(os.path.expanduser(path))] for item in i] ret.sort() return ret def _sed_esc(string, escape_all=False): ''' Escape single quotes and forward slashes ''' special_chars = "^.[$()|*+?{" string = string.replace("'", "'\"'\"'").replace("/", "\\/") if escape_all is True: for char in special_chars: string = string.replace(char, "\\" + char) return string def sed(path, before, after, limit='', backup='.bak', options='-r -e', flags='g', escape_all=False, negate_match=False): ''' .. deprecated:: 0.17.0 Use :py:func:`~salt.modules.file.replace` instead. Make a simple edit to a file Equivalent to: .. code-block:: bash sed <backup> <options> "/<limit>/ s/<before>/<after>/<flags> <file>" path The full path to the file to be edited before A pattern to find in order to replace with ``after`` after Text that will replace ``before`` limit : ``''`` An initial pattern to search for before searching for ``before`` backup : ``.bak`` The file will be backed up before edit with this file extension; **WARNING:** each time ``sed``/``comment``/``uncomment`` is called will overwrite this backup options : ``-r -e`` Options to pass to sed flags : ``g`` Flags to modify the sed search; e.g., ``i`` for case-insensitive pattern matching negate_match : False Negate the search command (``!``) .. versionadded:: 0.17.0 Forward slashes and single quotes will be escaped automatically in the ``before`` and ``after`` patterns. CLI Example: .. code-block:: bash salt '*' file.sed /etc/httpd/httpd.conf 'LogLevel warn' 'LogLevel info' ''' # Largely inspired by Fabric's contrib.files.sed() # XXX:dc: Do we really want to always force escaping? # path = os.path.expanduser(path) if not os.path.exists(path): return False # Mandate that before and after are strings before = str(before) after = str(after) before = _sed_esc(before, escape_all) after = _sed_esc(after, escape_all) limit = _sed_esc(limit, escape_all) if sys.platform == 'darwin': options = options.replace('-r', '-E') cmd = ['sed'] cmd.append('-i{0}'.format(backup) if backup else '-i') cmd.extend(salt.utils.shlex_split(options)) cmd.append( r'{limit}{negate_match}s/{before}/{after}/{flags}'.format( limit='/{0}/ '.format(limit) if limit else '', negate_match='!' if negate_match else '', before=before, after=after, flags=flags ) ) cmd.append(path) return __salt__['cmd.run_all'](cmd, python_shell=False) def sed_contains(path, text, limit='', flags='g'): ''' .. deprecated:: 0.17.0 Use :func:`search` instead. Return True if the file at ``path`` contains ``text``. Utilizes sed to perform the search (line-wise search). Note: the ``p`` flag will be added to any flags you pass in. CLI Example: .. code-block:: bash salt '*' file.contains /etc/crontab 'mymaintenance.sh' ''' # Largely inspired by Fabric's contrib.files.contains() path = os.path.expanduser(path) if not os.path.exists(path): return False before = _sed_esc(str(text), False) limit = _sed_esc(str(limit), False) options = '-n -r -e' if sys.platform == 'darwin': options = options.replace('-r', '-E') cmd = ['sed'] cmd.extend(salt.utils.shlex_split(options)) cmd.append( r'{limit}s/{before}/$/{flags}'.format( limit='/{0}/ '.format(limit) if limit else '', before=before, flags='p{0}'.format(flags) ) ) cmd.append(path) result = __salt__['cmd.run'](cmd, python_shell=False) return bool(result) def psed(path, before, after, limit='', backup='.bak', flags='gMS', escape_all=False, multi=False): ''' .. deprecated:: 0.17.0 Use :py:func:`~salt.modules.file.replace` instead. Make a simple edit to a file (pure Python version) Equivalent to: .. code-block:: bash sed <backup> <options> "/<limit>/ s/<before>/<after>/<flags> <file>" path The full path to the file to be edited before A pattern to find in order to replace with ``after`` after Text that will replace ``before`` limit : ``''`` An initial pattern to search for before searching for ``before`` backup : ``.bak`` The file will be backed up before edit with this file extension; **WARNING:** each time ``sed``/``comment``/``uncomment`` is called will overwrite this backup flags : ``gMS`` Flags to modify the search. Valid values are: - ``g``: Replace all occurrences of the pattern, not just the first. - ``I``: Ignore case. - ``L``: Make ``\\w``, ``\\W``, ``\\b``, ``\\B``, ``\\s`` and ``\\S`` dependent on the locale. - ``M``: Treat multiple lines as a single line. - ``S``: Make `.` match all characters, including newlines. - ``U``: Make ``\\w``, ``\\W``, ``\\b``, ``\\B``, ``\\d``, ``\\D``, ``\\s`` and ``\\S`` dependent on Unicode. - ``X``: Verbose (whitespace is ignored). multi: ``False`` If True, treat the entire file as a single line Forward slashes and single quotes will be escaped automatically in the ``before`` and ``after`` patterns. CLI Example: .. code-block:: bash salt '*' file.sed /etc/httpd/httpd.conf 'LogLevel warn' 'LogLevel info' ''' # Largely inspired by Fabric's contrib.files.sed() # XXX:dc: Do we really want to always force escaping? # # Mandate that before and after are strings path = os.path.expanduser(path) multi = bool(multi) before = str(before) after = str(after) before = _sed_esc(before, escape_all) # The pattern to replace with does not need to be escaped!!! #after = _sed_esc(after, escape_all) limit = _sed_esc(limit, escape_all) shutil.copy2(path, '{0}{1}'.format(path, backup)) with salt.utils.fopen(path, 'w') as ofile: with salt.utils.fopen('{0}{1}'.format(path, backup), 'r') as ifile: if multi is True: for line in ifile.readline(): ofile.write(_psed(line, before, after, limit, flags)) else: ofile.write(_psed(ifile.read(), before, after, limit, flags)) RE_FLAG_TABLE = {'I': re.I, 'L': re.L, 'M': re.M, 'S': re.S, 'U': re.U, 'X': re.X} def _psed(text, before, after, limit, flags): ''' Does the actual work for file.psed, so that single lines can be passed in ''' atext = text if limit: limit = re.compile(limit) comps = text.split(limit) atext = ''.join(comps[1:]) count = 1 if 'g' in flags: count = 0 flags = flags.replace('g', '') aflags = 0 for flag in flags: aflags |= RE_FLAG_TABLE[flag] before = re.compile(before, flags=aflags) text = re.sub(before, after, atext, count=count) return text def uncomment(path, regex, char='#', backup='.bak'): ''' .. deprecated:: 0.17.0 Use :py:func:`~salt.modules.file.replace` instead. Uncomment specified commented lines in a file path The full path to the file to be edited regex A regular expression used to find the lines that are to be uncommented. This regex should not include the comment character. A leading ``^`` character will be stripped for convenience (for easily switching between comment() and uncomment()). char : ``#`` The character to remove in order to uncomment a line backup : ``.bak`` The file will be backed up before edit with this file extension; **WARNING:** each time ``sed``/``comment``/``uncomment`` is called will overwrite this backup CLI Example: .. code-block:: bash salt '*' file.uncomment /etc/hosts.deny 'ALL: PARANOID' ''' return comment_line(path=path, regex=regex, char=char, cmnt=False, backup=backup) def comment(path, regex, char='#', backup='.bak'): ''' .. deprecated:: 0.17.0 Use :py:func:`~salt.modules.file.replace` instead. Comment out specified lines in a file path The full path to the file to be edited regex A regular expression used to find the lines that are to be commented; this pattern will be wrapped in parenthesis and will move any preceding/trailing ``^`` or ``$`` characters outside the parenthesis (e.g., the pattern ``^foo$`` will be rewritten as ``^(foo)$``) char : ``#`` The character to be inserted at the beginning of a line in order to comment it out backup : ``.bak`` The file will be backed up before edit with this file extension .. warning:: This backup will be overwritten each time ``sed`` / ``comment`` / ``uncomment`` is called. Meaning the backup will only be useful after the first invocation. CLI Example: .. code-block:: bash salt '*' file.comment /etc/modules pcspkr ''' return comment_line(path=path, regex=regex, char=char, cmnt=True, backup=backup) def comment_line(path, regex, char='#', cmnt=True, backup='.bak'): r''' Comment or Uncomment a line in a text file. :param path: string The full path to the text file. :param regex: string A regex expression that begins with ``^`` that will find the line you wish to comment. Can be as simple as ``^color =`` :param char: string The character used to comment a line in the type of file you're referencing. Default is ``#`` :param cmnt: boolean True to comment the line. False to uncomment the line. Default is True. :param backup: string The file extension to give the backup file. Default is ``.bak`` Set to False/None to not keep a backup. :return: boolean Returns True if successful, False if not CLI Example: The following example will comment out the ``pcspkr`` line in the ``/etc/modules`` file using the default ``#`` character and create a backup file named ``modules.bak`` .. code-block:: bash salt '*' file.comment_line '/etc/modules' '^pcspkr' CLI Example: The following example will uncomment the ``log_level`` setting in ``minion`` config file if it is set to either ``warning``, ``info``, or ``debug`` using the ``#`` character and create a backup file named ``minion.bk`` .. code-block:: bash salt '*' file.comment_line 'C:\salt\conf\minion' '^log_level: (warning|info|debug)' '#' False '.bk' ''' # Get the regex for comment or uncomment if cmnt: regex = '{0}({1}){2}'.format( '^' if regex.startswith('^') else '', regex.lstrip('^').rstrip('$'), '$' if regex.endswith('$') else '') else: regex = r'^{0}\s*({1}){2}'.format( char, regex.lstrip('^').rstrip('$'), '$' if regex.endswith('$') else '') # Load the real path to the file path = os.path.realpath(os.path.expanduser(path)) # Make sure the file exists if not os.path.isfile(path): raise SaltInvocationError('File not found: {0}'.format(path)) # Make sure it is a text file if not salt.utils.istextfile(path): raise SaltInvocationError( 'Cannot perform string replacements on a binary file: {0}'.format(path)) # First check the whole file, determine whether to make the replacement # Searching first avoids modifying the time stamp if there are no changes found = False # Dictionaries for comparing changes orig_file = [] new_file = [] # Buffer size for fopen bufsize = os.path.getsize(path) try: # Use a read-only handle to open the file with salt.utils.fopen(path, mode='rb', buffering=bufsize) as r_file: # Loop through each line of the file and look for a match for line in r_file: # Is it in this line if re.match(regex, line): # Load lines into dictionaries, set found to True orig_file.append(line) if cmnt: new_file.append('{0}{1}'.format(char, line)) else: new_file.append(line.lstrip(char)) found = True except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to open file '{0}'. " "Exception: {1}".format(path, exc) ) # We've searched the whole file. If we didn't find anything, return False if not found: return False if not salt.utils.is_windows(): pre_user = get_user(path) pre_group = get_group(path) pre_mode = salt.utils.normalize_mode(get_mode(path)) # Create a copy to read from and to use as a backup later try: temp_file = _mkstemp_copy(path=path, preserve_inode=False) except (OSError, IOError) as exc: raise CommandExecutionError("Exception: {0}".format(exc)) try: # Open the file in write mode with salt.utils.fopen(path, mode='wb', buffering=bufsize) as w_file: try: # Open the temp file in read mode with salt.utils.fopen(temp_file, mode='rb', buffering=bufsize) as r_file: # Loop through each line of the file and look for a match for line in r_file: try: # Is it in this line if re.match(regex, line): # Write the new line if cmnt: w_file.write('{0}{1}'.format(char, line)) else: w_file.write(line.lstrip(char)) else: # Write the existing line (no change) w_file.write(line) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to write file '{0}'. Contents may " "be truncated. Temporary file contains copy " "at '{1}'. " "Exception: {2}".format(path, temp_file, exc) ) except (OSError, IOError) as exc: raise CommandExecutionError("Exception: {0}".format(exc)) except (OSError, IOError) as exc: raise CommandExecutionError("Exception: {0}".format(exc)) if backup: # Move the backup file to the original directory backup_name = '{0}{1}'.format(path, backup) try: shutil.move(temp_file, backup_name) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to move the temp file '{0}' to the " "backup file '{1}'. " "Exception: {2}".format(path, temp_file, exc) ) else: os.remove(temp_file) if not salt.utils.is_windows(): check_perms(path, None, pre_user, pre_group, pre_mode) # Return a diff using the two dictionaries return ''.join(difflib.unified_diff(orig_file, new_file)) def _get_flags(flags): ''' Return an integer appropriate for use as a flag for the re module from a list of human-readable strings .. code-block:: python >>> _get_flags(['MULTILINE', 'IGNORECASE']) 10 >>> _get_flags('MULTILINE') 8 >>> _get_flags(2) 2 ''' if isinstance(flags, six.string_types): flags = [flags] if isinstance(flags, Iterable) and not isinstance(flags, Mapping): _flags_acc = [] for flag in flags: _flag = getattr(re, str(flag).upper()) if not isinstance(_flag, six.integer_types): raise SaltInvocationError( 'Invalid re flag given: {0}'.format(flag) ) _flags_acc.append(_flag) return reduce(operator.__or__, _flags_acc) elif isinstance(flags, six.integer_types): return flags else: raise SaltInvocationError( 'Invalid re flags: "{0}", must be given either as a single flag ' 'string, a list of strings, or as an integer'.format(flags) ) def _add_flags(flags, new_flags): ''' Combine ``flags`` and ``new_flags`` ''' flags = _get_flags(flags) new_flags = _get_flags(new_flags) return flags | new_flags def _mkstemp_copy(path, preserve_inode=True): ''' Create a temp file and move/copy the contents of ``path`` to the temp file. Return the path to the temp file. path The full path to the file whose contents will be moved/copied to a temp file. Whether it's moved or copied depends on the value of ``preserve_inode``. preserve_inode Preserve the inode of the file, so that any hard links continue to share the inode with the original filename. This works by *copying* the file, reading from the copy, and writing to the file at the original inode. If ``False``, the file will be *moved* rather than copied, and a new file will be written to a new inode, but using the original filename. Hard links will then share an inode with the backup, instead (if using ``backup`` to create a backup copy). Default is ``True``. ''' temp_file = None # Create the temp file try: temp_file = salt.utils.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to create temp file. " "Exception: {0}".format(exc) ) # use `copy` to preserve the inode of the # original file, and thus preserve hardlinks # to the inode. otherwise, use `move` to # preserve prior behavior, which results in # writing the file to a new inode. if preserve_inode: try: shutil.copy2(path, temp_file) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to copy file '{0}' to the " "temp file '{1}'. " "Exception: {2}".format(path, temp_file, exc) ) else: try: shutil.move(path, temp_file) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to move file '{0}' to the " "temp file '{1}'. " "Exception: {2}".format(path, temp_file, exc) ) return temp_file def _starts_till(src, probe, strip_comments=True): ''' Returns True if src and probe at least begins till some point. ''' def _strip_comments(txt): ''' Strip possible comments. Usually commends are one or two symbols ''' buff = txt.split(" ", 1) return len(buff) == 2 and len(buff[0]) < 2 and buff[1] or txt def _to_words(txt): ''' Split by words ''' return txt and [w for w in txt.strip().split(" ") if w.strip()] or txt no_match = -1 equal = 0 if not src or not probe: return no_match if src == probe: return equal src = _to_words(strip_comments and _strip_comments(src) or src) probe = _to_words(strip_comments and _strip_comments(probe) or probe) a_buff, b_buff = len(src) < len(probe) and (src, probe) or (probe, src) b_buff = ' '.join(b_buff) for idx in range(len(a_buff)): prb = ' '.join(a_buff[:-(idx + 1)]) if prb and b_buff.startswith(prb): return idx return no_match def _regex_to_static(src, regex): ''' Expand regular expression to static match. ''' if not src or not regex: return None try: src = re.search(regex, src) except Exception as ex: raise CommandExecutionError("{0}: '{1}'".format(_get_error_message(ex), regex)) return src and src.group() or regex def _assert_occurrence(src, probe, target, amount=1): ''' Raise an exception, if there are different amount of specified occurrences in src. ''' occ = src.count(probe) if occ > amount: msg = 'more than' elif occ < amount: msg = 'less than' elif not occ: msg = 'no' else: msg = None if msg: raise CommandExecutionError('Found {0} expected occurrences in "{1}" expression'.format(msg, target)) def _get_line_indent(src, line, indent): ''' Indent the line with the source line. ''' if not indent: return line idt = [] for c in src: if c not in ['\t', ' ']: break idt.append(c) return ''.join(idt) + line.strip() def line(path, content, match=None, mode=None, location=None, before=None, after=None, show_changes=True, backup=False, quiet=False, indent=True): ''' .. versionadded:: 2015.8.0 Edit a line in the configuration file. The ``path`` and ``content`` arguments are required, as well as passing in one of the ``mode`` options. path Filesystem path to the file to be edited. content Content of the line. match Match the target line for an action by a fragment of a string or regular expression. If neither ``before`` nor ``after`` are provided, and ``match`` is also ``None``, match becomes the ``content`` value. mode Defines how to edit a line. One of the following options is required: - ensure If line does not exist, it will be added. This is based on the ``content`` argument. - replace If line already exists, it will be replaced. - delete Delete the line, once found. - insert Insert a line. .. note:: If ``mode=insert`` is used, at least one of the following options must also be defined: ``location``, ``before``, or ``after``. If ``location`` is used, it takes precedence over the other two options. location Defines where to place content in the line. Note this option is only used when ``mode=insert`` is specified. If a location is passed in, it takes precedence over both the ``before`` and ``after`` kwargs. Valid locations are: - start Place the content at the beginning of the file. - end Place the content at the end of the file. before Regular expression or an exact case-sensitive fragment of the string. This option is only used when either the ``ensure`` or ``insert`` mode is defined. after Regular expression or an exact case-sensitive fragment of the string. This option is only used when either the ``ensure`` or ``insert`` mode is defined. show_changes Output a unified diff of the old file and the new file. If ``False`` return a boolean if any changes were made. Default is ``True`` .. note:: Using this option will store two copies of the file in-memory (the original version and the edited version) in order to generate the diff. backup Create a backup of the original file with the extension: "Year-Month-Day-Hour-Minutes-Seconds". quiet Do not raise any exceptions. E.g. ignore the fact that the file that is tried to be edited does not exist and nothing really happened. indent Keep indentation with the previous line. This option is not considered when the ``delete`` mode is specified. CLI Example: .. code-block:: bash salt '*' file.line /etc/nsswitch.conf "networks:\tfiles dns" after="hosts:.*?" mode='ensure' .. note:: If an equal sign (``=``) appears in an argument to a Salt command, it is interpreted as a keyword argument in the format of ``key=val``. That processing can be bypassed in order to pass an equal sign through to the remote shell command by manually specifying the kwarg: .. code-block:: bash salt '*' file.line /path/to/file content="CREATEMAIL_SPOOL=no" match="CREATE_MAIL_SPOOL=yes" mode="replace" ''' path = os.path.realpath(os.path.expanduser(path)) if not os.path.isfile(path): if not quiet: raise CommandExecutionError('File "{0}" does not exists or is not a file.'.format(path)) return False # No changes had happened mode = mode and mode.lower() or mode if mode not in ['insert', 'ensure', 'delete', 'replace']: if mode is None: raise CommandExecutionError('Mode was not defined. How to process the file?') else: raise CommandExecutionError('Unknown mode: "{0}"'.format(mode)) # Before/after has privilege. If nothing defined, match is used by content. if before is None and after is None and not match: match = content with salt.utils.fopen(path, mode='r') as fp_: body = fp_.read() body_before = hashlib.sha256(salt.utils.to_bytes(body)).hexdigest() after = _regex_to_static(body, after) before = _regex_to_static(body, before) match = _regex_to_static(body, match) if os.stat(path).st_size == 0 and mode in ('delete', 'replace'): log.warning('Cannot find text to {0}. File \'{1}\' is empty.'.format(mode, path)) body = '' elif mode == 'delete': body = os.linesep.join([line for line in body.split(os.linesep) if line.find(match) < 0]) elif mode == 'replace': body = os.linesep.join([(_get_line_indent(file_line, content, indent) if (file_line.find(match) > -1 and not file_line == content) else file_line) for file_line in body.split(os.linesep)]) elif mode == 'insert': if not location and not before and not after: raise CommandExecutionError('On insert must be defined either "location" or "before/after" conditions.') if not location: if before and after: _assert_occurrence(body, before, 'before') _assert_occurrence(body, after, 'after') out = [] lines = body.split(os.linesep) for idx in range(len(lines)): _line = lines[idx] if _line.find(before) > -1 and idx <= len(lines) and lines[idx - 1].find(after) > -1: out.append(_get_line_indent(_line, content, indent)) out.append(_line) else: out.append(_line) body = os.linesep.join(out) if before and not after: _assert_occurrence(body, before, 'before') out = [] lines = body.split(os.linesep) for idx in range(len(lines)): _line = lines[idx] if _line.find(before) > -1: cnd = _get_line_indent(_line, content, indent) if not idx or (idx and _starts_till(lines[idx - 1], cnd) < 0): # Job for replace instead out.append(cnd) out.append(_line) body = os.linesep.join(out) elif after and not before: _assert_occurrence(body, after, 'after') out = [] lines = body.split(os.linesep) for idx in range(len(lines)): _line = lines[idx] out.append(_line) cnd = _get_line_indent(_line, content, indent) if _line.find(after) > -1: # No dupes or append, if "after" is the last line if (idx < len(lines) and _starts_till(lines[idx + 1], cnd) < 0) or idx + 1 == len(lines): out.append(cnd) body = os.linesep.join(out) else: if location == 'start': body = ''.join([content, body]) elif location == 'end': body = ''.join([body, _get_line_indent(body[-1], content, indent) if body else content]) elif mode == 'ensure': after = after and after.strip() before = before and before.strip() if before and after: _assert_occurrence(body, before, 'before') _assert_occurrence(body, after, 'after') a_idx = b_idx = -1 idx = 0 body = body.split(os.linesep) for _line in body: idx += 1 if _line.find(before) > -1 and b_idx < 0: b_idx = idx if _line.find(after) > -1 and a_idx < 0: a_idx = idx # Add if not b_idx - a_idx - 1: body = body[:a_idx] + [content] + body[b_idx - 1:] elif b_idx - a_idx - 1 == 1: if _starts_till(body[a_idx:b_idx - 1][0], content) > -1: body[a_idx] = _get_line_indent(body[a_idx - 1], content, indent) else: raise CommandExecutionError('Found more than one line between boundaries "before" and "after".') body = os.linesep.join(body) elif before and not after: _assert_occurrence(body, before, 'before') body = body.split(os.linesep) out = [] for idx in range(len(body)): if body[idx].find(before) > -1: prev = (idx > 0 and idx or 1) - 1 out.append(_get_line_indent(body[prev], content, indent)) if _starts_till(out[prev], content) > -1: del out[prev] out.append(body[idx]) body = os.linesep.join(out) elif not before and after: _assert_occurrence(body, after, 'after') body = body.split(os.linesep) skip = None out = [] for idx in range(len(body)): if skip != body[idx]: out.append(body[idx]) if body[idx].find(after) > -1: next_line = idx + 1 < len(body) and body[idx + 1] or None if next_line is not None and _starts_till(next_line, content) > -1: skip = next_line out.append(_get_line_indent(body[idx], content, indent)) body = os.linesep.join(out) else: raise CommandExecutionError("Wrong conditions? " "Unable to ensure line without knowing " "where to put it before and/or after.") changed = body_before != hashlib.sha256(salt.utils.to_bytes(body)).hexdigest() if backup and changed and __opts__['test'] is False: try: temp_file = _mkstemp_copy(path=path, preserve_inode=True) shutil.move(temp_file, '{0}.{1}'.format(path, time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()))) except (OSError, IOError) as exc: raise CommandExecutionError("Unable to create the backup file of {0}. Exception: {1}".format(path, exc)) changes_diff = None if changed: if show_changes: with salt.utils.fopen(path, 'r') as fp_: path_content = _splitlines_preserving_trailing_newline( fp_.read()) changes_diff = ''.join(difflib.unified_diff( path_content, _splitlines_preserving_trailing_newline(body))) if __opts__['test'] is False: fh_ = None try: fh_ = salt.utils.atomicfile.atomic_open(path, 'w') fh_.write(body) finally: if fh_: fh_.close() return show_changes and changes_diff or changed def replace(path, pattern, repl, count=0, flags=8, bufsize=1, append_if_not_found=False, prepend_if_not_found=False, not_found_content=None, backup='.bak', dry_run=False, search_only=False, show_changes=True, ignore_if_missing=False, preserve_inode=True, ): ''' .. versionadded:: 0.17.0 Replace occurrences of a pattern in a file. If ``show_changes`` is ``True``, then a diff of what changed will be returned, otherwise a ``True`` will be returned when changes are made, and ``False`` when no changes are made. This is a pure Python implementation that wraps Python's :py:func:`~re.sub`. path Filesystem path to the file to be edited. If a symlink is specified, it will be resolved to its target. pattern A regular expression, to be matched using Python's :py:func:`~re.search`. repl The replacement text count : 0 Maximum number of pattern occurrences to be replaced. If count is a positive integer ``n``, only ``n`` occurrences will be replaced, otherwise all occurrences will be replaced. flags (list or int) A list of flags defined in the :ref:`re module documentation <contents-of-module-re>`. Each list item should be a string that will correlate to the human-friendly flag name. E.g., ``['IGNORECASE', 'MULTILINE']``. Optionally, ``flags`` may be an int, with a value corresponding to the XOR (``|``) of all the desired flags. Defaults to 8 (which supports 'MULTILINE'). bufsize (int or str) How much of the file to buffer into memory at once. The default value ``1`` processes one line at a time. The special value ``file`` may be specified which will read the entire file into memory before processing. append_if_not_found : False .. versionadded:: 2014.7.0 If set to ``True``, and pattern is not found, then the content will be appended to the file. prepend_if_not_found : False .. versionadded:: 2014.7.0 If set to ``True`` and pattern is not found, then the content will be prepended to the file. not_found_content .. versionadded:: 2014.7.0 Content to use for append/prepend if not found. If None (default), uses ``repl``. Useful when ``repl`` uses references to group in pattern. backup : .bak The file extension to use for a backup of the file before editing. Set to ``False`` to skip making a backup. dry_run : False If set to ``True``, no changes will be made to the file, the function will just return the changes that would have been made (or a ``True``/``False`` value if ``show_changes`` is set to ``False``). search_only : False If set to true, this no changes will be performed on the file, and this function will simply return ``True`` if the pattern was matched, and ``False`` if not. show_changes : True If ``True``, return a diff of changes made. Otherwise, return ``True`` if changes were made, and ``False`` if not. .. note:: Using this option will store two copies of the file in memory (the original version and the edited version) in order to generate the diff. This may not normally be a concern, but could impact performance if used with large files. ignore_if_missing : False .. versionadded:: 2015.8.0 If set to ``True``, this function will simply return ``False`` if the file doesn't exist. Otherwise, an error will be thrown. preserve_inode : True .. versionadded:: 2015.8.0 Preserve the inode of the file, so that any hard links continue to share the inode with the original filename. This works by *copying* the file, reading from the copy, and writing to the file at the original inode. If ``False``, the file will be *moved* rather than copied, and a new file will be written to a new inode, but using the original filename. Hard links will then share an inode with the backup, instead (if using ``backup`` to create a backup copy). If an equal sign (``=``) appears in an argument to a Salt command it is interpreted as a keyword argument in the format ``key=val``. That processing can be bypassed in order to pass an equal sign through to the remote shell command by manually specifying the kwarg: .. code-block:: bash salt '*' file.replace /path/to/file pattern='=' repl=':' salt '*' file.replace /path/to/file pattern="bind-address\\s*=" repl='bind-address:' CLI Examples: .. code-block:: bash salt '*' file.replace /etc/httpd/httpd.conf pattern='LogLevel warn' repl='LogLevel info' salt '*' file.replace /some/file pattern='before' repl='after' flags='[MULTILINE, IGNORECASE]' ''' symlink = False if is_link(path): symlink = True target_path = os.readlink(path) given_path = os.path.expanduser(path) path = os.path.realpath(os.path.expanduser(path)) if not os.path.exists(path): if ignore_if_missing: return False else: raise SaltInvocationError('File not found: {0}'.format(path)) if not salt.utils.istextfile(path): raise SaltInvocationError( 'Cannot perform string replacements on a binary file: {0}' .format(path) ) if search_only and (append_if_not_found or prepend_if_not_found): raise SaltInvocationError( 'search_only cannot be used with append/prepend_if_not_found' ) if append_if_not_found and prepend_if_not_found: raise SaltInvocationError( 'Only one of append and prepend_if_not_found is permitted' ) flags_num = _get_flags(flags) cpattern = re.compile(salt.utils.to_bytes(pattern), flags_num) filesize = os.path.getsize(path) if bufsize == 'file': bufsize = filesize # Search the file; track if any changes have been made for the return val has_changes = False orig_file = [] # used if show_changes new_file = [] # used if show_changes if not salt.utils.is_windows(): pre_user = get_user(path) pre_group = get_group(path) pre_mode = salt.utils.normalize_mode(get_mode(path)) # Avoid TypeErrors by forcing repl to be bytearray related to mmap # Replacement text may contains integer: 123 for example repl = salt.utils.to_bytes(str(repl)) if not_found_content: not_found_content = salt.utils.to_bytes(not_found_content) found = False temp_file = None content = salt.utils.to_str(not_found_content) if not_found_content and \ (prepend_if_not_found or append_if_not_found) \ else salt.utils.to_str(repl) try: # First check the whole file, determine whether to make the replacement # Searching first avoids modifying the time stamp if there are no changes r_data = None # Use a read-only handle to open the file with salt.utils.fopen(path, mode='rb', buffering=bufsize) as r_file: try: # mmap throws a ValueError if the file is empty. r_data = mmap.mmap(r_file.fileno(), 0, access=mmap.ACCESS_READ) except (ValueError, mmap.error): # size of file in /proc is 0, but contains data r_data = salt.utils.to_bytes("".join(r_file)) if search_only: # Just search; bail as early as a match is found if re.search(cpattern, r_data): return True # `with` block handles file closure else: result, nrepl = re.subn(cpattern, repl, r_data, count) # found anything? (even if no change) if nrepl > 0: found = True # Identity check the potential change has_changes = True if pattern != repl else has_changes if prepend_if_not_found or append_if_not_found: # Search for content, to avoid pre/appending the # content if it was pre/appended in a previous run. if re.search(salt.utils.to_bytes('^{0}$'.format(re.escape(content))), r_data, flags=flags_num): # Content was found, so set found. found = True # Keep track of show_changes here, in case the file isn't # modified if show_changes or append_if_not_found or \ prepend_if_not_found: orig_file = r_data.read(filesize).splitlines(True) \ if isinstance(r_data, mmap.mmap) \ else r_data.splitlines(True) new_file = result.splitlines(True) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to open file '{0}'. " "Exception: {1}".format(path, exc) ) finally: if r_data and isinstance(r_data, mmap.mmap): r_data.close() if has_changes and not dry_run: # Write the replacement text in this block. try: # Create a copy to read from and to use as a backup later temp_file = _mkstemp_copy(path=path, preserve_inode=preserve_inode) except (OSError, IOError) as exc: raise CommandExecutionError("Exception: {0}".format(exc)) r_data = None try: # Open the file in write mode with salt.utils.fopen(path, mode='w', buffering=bufsize) as w_file: try: # Open the temp file in read mode with salt.utils.fopen(temp_file, mode='r', buffering=bufsize) as r_file: r_data = mmap.mmap(r_file.fileno(), 0, access=mmap.ACCESS_READ) result, nrepl = re.subn(cpattern, repl, r_data, count) try: w_file.write(salt.utils.to_str(result)) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to write file '{0}'. Contents may " "be truncated. Temporary file contains copy " "at '{1}'. " "Exception: {2}".format(path, temp_file, exc) ) except (OSError, IOError) as exc: raise CommandExecutionError("Exception: {0}".format(exc)) finally: if r_data and isinstance(r_data, mmap.mmap): r_data.close() except (OSError, IOError) as exc: raise CommandExecutionError("Exception: {0}".format(exc)) if not found and (append_if_not_found or prepend_if_not_found): if not_found_content is None: not_found_content = repl if prepend_if_not_found: new_file.insert(0, not_found_content + b'\n') else: # append_if_not_found # Make sure we have a newline at the end of the file if 0 != len(new_file): if not new_file[-1].endswith(b'\n'): new_file[-1] += b'\n' new_file.append(not_found_content + b'\n') has_changes = True if not dry_run: try: # Create a copy to read from and for later use as a backup temp_file = _mkstemp_copy(path=path, preserve_inode=preserve_inode) except (OSError, IOError) as exc: raise CommandExecutionError("Exception: {0}".format(exc)) # write new content in the file while avoiding partial reads try: fh_ = salt.utils.atomicfile.atomic_open(path, 'w') for line in new_file: fh_.write(salt.utils.to_str(line)) finally: fh_.close() if backup and has_changes and not dry_run: # keep the backup only if it was requested # and only if there were any changes backup_name = '{0}{1}'.format(path, backup) try: shutil.move(temp_file, backup_name) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to move the temp file '{0}' to the " "backup file '{1}'. " "Exception: {2}".format(path, temp_file, exc) ) if symlink: symlink_backup = '{0}{1}'.format(given_path, backup) target_backup = '{0}{1}'.format(target_path, backup) # Always clobber any existing symlink backup # to match the behaviour of the 'backup' option try: os.symlink(target_backup, symlink_backup) except OSError: os.remove(symlink_backup) os.symlink(target_backup, symlink_backup) except: raise CommandExecutionError( "Unable create backup symlink '{0}'. " "Target was '{1}'. " "Exception: {2}".format(symlink_backup, target_backup, exc) ) elif temp_file: try: os.remove(temp_file) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to delete temp file '{0}'. " "Exception: {1}".format(temp_file, exc) ) if not dry_run and not salt.utils.is_windows(): check_perms(path, None, pre_user, pre_group, pre_mode) if show_changes: orig_file_as_str = ''.join([salt.utils.to_str(x) for x in orig_file]) new_file_as_str = ''.join([salt.utils.to_str(x) for x in new_file]) return ''.join(difflib.unified_diff(orig_file_as_str, new_file_as_str)) return has_changes def blockreplace(path, marker_start='#-- start managed zone --', marker_end='#-- end managed zone --', content='', append_if_not_found=False, prepend_if_not_found=False, backup='.bak', dry_run=False, show_changes=True, append_newline=False, ): ''' .. versionadded:: 2014.1.0 Replace content of a text block in a file, delimited by line markers A block of content delimited by comments can help you manage several lines entries without worrying about old entries removal. .. note:: This function will store two copies of the file in-memory (the original version and the edited version) in order to detect changes and only edit the targeted file if necessary. path Filesystem path to the file to be edited marker_start The line content identifying a line as the start of the content block. Note that the whole line containing this marker will be considered, so whitespace or extra content before or after the marker is included in final output marker_end The line content identifying a line as the end of the content block. Note that the whole line containing this marker will be considered, so whitespace or extra content before or after the marker is included in final output content The content to be used between the two lines identified by marker_start and marker_stop. append_if_not_found : False If markers are not found and set to ``True`` then, the markers and content will be appended to the file. prepend_if_not_found : False If markers are not found and set to ``True`` then, the markers and content will be prepended to the file. backup The file extension to use for a backup of the file if any edit is made. Set to ``False`` to skip making a backup. dry_run Don't make any edits to the file. show_changes Output a unified diff of the old file and the new file. If ``False``, return a boolean if any changes were made. append_newline: Append a newline to the content block. For more information see: https://github.com/saltstack/salt/issues/33686 .. versionadded:: 2016.3.4 CLI Example: .. code-block:: bash salt '*' file.blockreplace /etc/hosts '#-- start managed zone foobar : DO NOT EDIT --' \\ '#-- end managed zone foobar --' $'10.0.1.1 foo.foobar\\n10.0.1.2 bar.foobar' True ''' path = os.path.expanduser(path) if not os.path.exists(path): raise SaltInvocationError('File not found: {0}'.format(path)) if append_if_not_found and prepend_if_not_found: raise SaltInvocationError( 'Only one of append and prepend_if_not_found is permitted' ) if not salt.utils.istextfile(path): raise SaltInvocationError( 'Cannot perform string replacements on a binary file: {0}' .format(path) ) # Search the file; track if any changes have been made for the return val has_changes = False orig_file = [] new_file = [] in_block = False old_content = '' done = False # we do not use in_place editing to avoid file attrs modifications when # no changes are required and to avoid any file access on a partially # written file. # we could also use salt.utils.filebuffer.BufferedReader try: fi_file = fileinput.input(path, inplace=False, backup=False, bufsize=1, mode='r') for line in fi_file: result = line if marker_start in line: # managed block start found, start recording in_block = True else: if in_block: if marker_end in line: # end of block detected in_block = False # Check for multi-line '\n' terminated content as split will # introduce an unwanted additional new line. if content and content[-1] == '\n': content = content[:-1] # push new block content in file for cline in content.split('\n'): new_file.append(cline + '\n') done = True else: # remove old content, but keep a trace old_content += line result = None # else: we are not in the marked block, keep saving things orig_file.append(line) if result is not None: new_file.append(result) # end for. If we are here without block management we maybe have some problems, # or we need to initialise the marked block finally: fi_file.close() if in_block: # unterminated block => bad, always fail raise CommandExecutionError( 'Unterminated marked block. End of file reached before marker_end.' ) if not done: if prepend_if_not_found: # add the markers and content at the beginning of file new_file.insert(0, marker_end + '\n') if append_newline is True: new_file.insert(0, content + '\n') else: new_file.insert(0, content) new_file.insert(0, marker_start + '\n') done = True elif append_if_not_found: # Make sure we have a newline at the end of the file if 0 != len(new_file): if not new_file[-1].endswith('\n'): new_file[-1] += '\n' # add the markers and content at the end of file new_file.append(marker_start + '\n') if append_newline is True: new_file.append(content + '\n') else: new_file.append(content) new_file.append(marker_end + '\n') done = True else: raise CommandExecutionError( 'Cannot edit marked block. Markers were not found in file.' ) if done: diff = ''.join(difflib.unified_diff(orig_file, new_file)) has_changes = diff is not '' if has_changes and not dry_run: # changes detected # backup file attrs perms = {} perms['user'] = get_user(path) perms['group'] = get_group(path) perms['mode'] = salt.utils.normalize_mode(get_mode(path)) # backup old content if backup is not False: backup_path = '{0}{1}'.format(path, backup) shutil.copy2(path, backup_path) # copy2 does not preserve ownership check_perms(backup_path, None, perms['user'], perms['group'], perms['mode']) # write new content in the file while avoiding partial reads try: fh_ = salt.utils.atomicfile.atomic_open(path, 'w') for line in new_file: fh_.write(line) finally: fh_.close() # this may have overwritten file attrs check_perms(path, None, perms['user'], perms['group'], perms['mode']) if show_changes: return diff return has_changes def search(path, pattern, flags=8, bufsize=1, ignore_if_missing=False, multiline=False ): ''' .. versionadded:: 0.17.0 Search for occurrences of a pattern in a file Except for multiline, params are identical to :py:func:`~salt.modules.file.replace`. multiline If true, inserts 'MULTILINE' into ``flags`` and sets ``bufsize`` to 'file'. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' file.search /etc/crontab 'mymaintenance.sh' ''' if multiline: flags = _add_flags(flags, 'MULTILINE') bufsize = 'file' # This function wraps file.replace on purpose in order to enforce # consistent usage, compatible regex's, expected behavior, *and* bugs. :) # Any enhancements or fixes to one should affect the other. return replace(path, pattern, '', flags=flags, bufsize=bufsize, dry_run=True, search_only=True, show_changes=False, ignore_if_missing=ignore_if_missing) def patch(originalfile, patchfile, options='', dry_run=False): ''' .. versionadded:: 0.10.4 Apply a patch to a file or directory. Equivalent to: .. code-block:: bash patch <options> -i <patchfile> <originalfile> Or, when a directory is patched: .. code-block:: bash patch <options> -i <patchfile> -d <originalfile> -p0 originalfile The full path to the file or directory to be patched patchfile A patch file to apply to ``originalfile`` options Options to pass to patch. CLI Example: .. code-block:: bash salt '*' file.patch /opt/file.txt /tmp/file.txt.patch ''' patchpath = salt.utils.which('patch') if not patchpath: raise CommandExecutionError( 'patch executable not found. Is the distribution\'s patch ' 'package installed?' ) cmd = [patchpath] cmd.extend(salt.utils.shlex_split(options)) if dry_run: if __grains__['kernel'] in ('FreeBSD', 'OpenBSD'): cmd.append('-C') else: cmd.append('--dry-run') # this argument prevents interactive prompts when the patch fails to apply. # the exit code will still be greater than 0 if that is the case. if '-N' not in cmd and '--forward' not in cmd: cmd.append('--forward') has_rejectfile_option = False for option in cmd: if option == '-r' or option.startswith('-r ') \ or option.startswith('--reject-file'): has_rejectfile_option = True break # by default, patch will write rejected patch files to <filename>.rej. # this option prevents that. if not has_rejectfile_option: cmd.append('--reject-file=-') cmd.extend(['-i', patchfile]) if os.path.isdir(originalfile): cmd.extend(['-d', originalfile]) has_strip_option = False for option in cmd: if option.startswith('-p') or option.startswith('--strip='): has_strip_option = True break if not has_strip_option: cmd.append('--strip=0') else: cmd.append(originalfile) return __salt__['cmd.run_all'](cmd, python_shell=False) def contains(path, text): ''' .. deprecated:: 0.17.0 Use :func:`search` instead. Return ``True`` if the file at ``path`` contains ``text`` CLI Example: .. code-block:: bash salt '*' file.contains /etc/crontab 'mymaintenance.sh' ''' path = os.path.expanduser(path) if not os.path.exists(path): return False stripped_text = str(text).strip() try: with salt.utils.filebuffer.BufferedReader(path) as breader: for chunk in breader: if stripped_text in chunk: return True return False except (IOError, OSError): return False def contains_regex(path, regex, lchar=''): ''' .. deprecated:: 0.17.0 Use :func:`search` instead. Return True if the given regular expression matches on any line in the text of a given file. If the lchar argument (leading char) is specified, it will strip `lchar` from the left side of each line before trying to match CLI Example: .. code-block:: bash salt '*' file.contains_regex /etc/crontab ''' path = os.path.expanduser(path) if not os.path.exists(path): return False try: with salt.utils.fopen(path, 'r') as target: for line in target: if lchar: line = line.lstrip(lchar) if re.search(regex, line): return True return False except (IOError, OSError): return False def contains_glob(path, glob_expr): ''' .. deprecated:: 0.17.0 Use :func:`search` instead. Return ``True`` if the given glob matches a string in the named file CLI Example: .. code-block:: bash salt '*' file.contains_glob /etc/foobar '*cheese*' ''' path = os.path.expanduser(path) if not os.path.exists(path): return False try: with salt.utils.filebuffer.BufferedReader(path) as breader: for chunk in breader: if fnmatch.fnmatch(chunk, glob_expr): return True return False except (IOError, OSError): return False def append(path, *args, **kwargs): ''' .. versionadded:: 0.9.5 Append text to the end of a file path path to file `*args` strings to append to file CLI Example: .. code-block:: bash salt '*' file.append /etc/motd \\ "With all thine offerings thou shalt offer salt." \\ "Salt is what makes things taste bad when it isn't in them." .. admonition:: Attention If you need to pass a string to append and that string contains an equal sign, you **must** include the argument name, args. For example: .. code-block:: bash salt '*' file.append /etc/motd args='cheese=spam' salt '*' file.append /etc/motd args="['cheese=spam','spam=cheese']" ''' path = os.path.expanduser(path) # Largely inspired by Fabric's contrib.files.append() if 'args' in kwargs: if isinstance(kwargs['args'], list): args = kwargs['args'] else: args = [kwargs['args']] # Make sure we have a newline at the end of the file. Do this in binary # mode so SEEK_END with nonzero offset will work. with salt.utils.fopen(path, 'rb+') as ofile: linesep = salt.utils.to_bytes(os.linesep) try: ofile.seek(-len(linesep), os.SEEK_END) except IOError as exc: if exc.errno in (errno.EINVAL, errno.ESPIPE): # Empty file, simply append lines at the beginning of the file pass else: raise else: if ofile.read(len(linesep)) != linesep: ofile.seek(0, os.SEEK_END) ofile.write(linesep) # Append lines in text mode with salt.utils.fopen(path, 'a') as ofile: for new_line in args: ofile.write('{0}{1}'.format(new_line, os.linesep)) return 'Wrote {0} lines to "{1}"'.format(len(args), path) def prepend(path, *args, **kwargs): ''' .. versionadded:: 2014.7.0 Prepend text to the beginning of a file path path to file `*args` strings to prepend to the file CLI Example: .. code-block:: bash salt '*' file.prepend /etc/motd \\ "With all thine offerings thou shalt offer salt." \\ "Salt is what makes things taste bad when it isn't in them." .. admonition:: Attention If you need to pass a string to append and that string contains an equal sign, you **must** include the argument name, args. For example: .. code-block:: bash salt '*' file.prepend /etc/motd args='cheese=spam' salt '*' file.prepend /etc/motd args="['cheese=spam','spam=cheese']" ''' path = os.path.expanduser(path) if 'args' in kwargs: if isinstance(kwargs['args'], list): args = kwargs['args'] else: args = [kwargs['args']] try: with salt.utils.fopen(path) as fhr: contents = fhr.readlines() except IOError: contents = [] preface = [] for line in args: preface.append('{0}\n'.format(line)) with salt.utils.fopen(path, "w") as ofile: contents = preface + contents ofile.write(''.join(contents)) return 'Prepended {0} lines to "{1}"'.format(len(args), path) def write(path, *args, **kwargs): ''' .. versionadded:: 2014.7.0 Write text to a file, overwriting any existing contents. path path to file `*args` strings to write to the file CLI Example: .. code-block:: bash salt '*' file.write /etc/motd \\ "With all thine offerings thou shalt offer salt." .. admonition:: Attention If you need to pass a string to append and that string contains an equal sign, you **must** include the argument name, args. For example: .. code-block:: bash salt '*' file.write /etc/motd args='cheese=spam' salt '*' file.write /etc/motd args="['cheese=spam','spam=cheese']" ''' path = os.path.expanduser(path) if 'args' in kwargs: if isinstance(kwargs['args'], list): args = kwargs['args'] else: args = [kwargs['args']] contents = [] for line in args: contents.append('{0}\n'.format(line)) with salt.utils.fopen(path, "w") as ofile: ofile.write(''.join(contents)) return 'Wrote {0} lines to "{1}"'.format(len(contents), path) def touch(name, atime=None, mtime=None): ''' .. versionadded:: 0.9.5 Just like the ``touch`` command, create a file if it doesn't exist or simply update the atime and mtime if it already does. atime: Access time in Unix epoch time mtime: Last modification in Unix epoch time CLI Example: .. code-block:: bash salt '*' file.touch /var/log/emptyfile ''' name = os.path.expanduser(name) if atime and atime.isdigit(): atime = int(atime) if mtime and mtime.isdigit(): mtime = int(mtime) try: if not os.path.exists(name): with salt.utils.fopen(name, 'a') as fhw: fhw.write('') if not atime and not mtime: times = None elif not mtime and atime: times = (atime, time.time()) elif not atime and mtime: times = (time.time(), mtime) else: times = (atime, mtime) os.utime(name, times) except TypeError: raise SaltInvocationError('atime and mtime must be integers') except (IOError, OSError) as exc: raise CommandExecutionError(exc.strerror) return os.path.exists(name) def seek_read(path, size, offset): ''' .. versionadded:: 2014.1.0 Seek to a position on a file and read it path path to file seek amount to read at once offset offset to start into the file CLI Example: .. code-block:: bash salt '*' file.seek_read /path/to/file 4096 0 ''' path = os.path.expanduser(path) try: seek_fh = os.open(path, os.O_RDONLY) os.lseek(seek_fh, int(offset), 0) data = os.read(seek_fh, int(size)) finally: os.close(seek_fh) return data def seek_write(path, data, offset): ''' .. versionadded:: 2014.1.0 Seek to a position on a file and write to it path path to file data data to write to file offset position in file to start writing CLI Example: .. code-block:: bash salt '*' file.seek_write /path/to/file 'some data' 4096 ''' path = os.path.expanduser(path) try: seek_fh = os.open(path, os.O_WRONLY) os.lseek(seek_fh, int(offset), 0) ret = os.write(seek_fh, data) os.fsync(seek_fh) finally: os.close(seek_fh) return ret def truncate(path, length): ''' .. versionadded:: 2014.1.0 Seek to a position on a file and delete everything after that point path path to file length offset into file to truncate CLI Example: .. code-block:: bash salt '*' file.truncate /path/to/file 512 ''' path = os.path.expanduser(path) with salt.utils.fopen(path, 'rb+') as seek_fh: seek_fh.truncate(int(length)) def link(src, path): ''' .. versionadded:: 2014.1.0 Create a hard link to a file CLI Example: .. code-block:: bash salt '*' file.link /path/to/file /path/to/link ''' src = os.path.expanduser(src) if not os.path.isabs(src): raise SaltInvocationError('File path must be absolute.') try: os.link(src, path) return True except (OSError, IOError): raise CommandExecutionError('Could not create \'{0}\''.format(path)) return False def is_link(path): ''' Check if the path is a symbolic link CLI Example: .. code-block:: bash salt '*' file.is_link /path/to/link ''' # This function exists because os.path.islink does not support Windows, # therefore a custom function will need to be called. This function # therefore helps API consistency by providing a single function to call for # both operating systems. return os.path.islink(os.path.expanduser(path)) def symlink(src, path): ''' Create a symbolic link (symlink, soft link) to a file CLI Example: .. code-block:: bash salt '*' file.symlink /path/to/file /path/to/link ''' path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError('File path must be absolute.') try: os.symlink(src, path) return True except (OSError, IOError): raise CommandExecutionError('Could not create \'{0}\''.format(path)) return False def rename(src, dst): ''' Rename a file or directory CLI Example: .. code-block:: bash salt '*' file.rename /path/to/src /path/to/dst ''' src = os.path.expanduser(src) dst = os.path.expanduser(dst) if not os.path.isabs(src): raise SaltInvocationError('File path must be absolute.') try: os.rename(src, dst) return True except OSError: raise CommandExecutionError( 'Could not rename \'{0}\' to \'{1}\''.format(src, dst) ) return False def copy(src, dst, recurse=False, remove_existing=False): ''' Copy a file or directory from source to dst In order to copy a directory, the recurse flag is required, and will by default overwrite files in the destination with the same path, and retain all other existing files. (similar to cp -r on unix) remove_existing will remove all files in the target directory, and then copy files from the source. .. note:: The copy function accepts paths that are local to the Salt minion. This function does not support salt://, http://, or the other additional file paths that are supported by :mod:`states.file.managed <salt.states.file.managed>` and :mod:`states.file.recurse <salt.states.file.recurse>`. CLI Example: .. code-block:: bash salt '*' file.copy /path/to/src /path/to/dst salt '*' file.copy /path/to/src_dir /path/to/dst_dir recurse=True salt '*' file.copy /path/to/src_dir /path/to/dst_dir recurse=True remove_existing=True ''' src = os.path.expanduser(src) dst = os.path.expanduser(dst) if not os.path.isabs(src): raise SaltInvocationError('File path must be absolute.') if not os.path.exists(src): raise CommandExecutionError('No such file or directory \'{0}\''.format(src)) if not salt.utils.is_windows(): pre_user = get_user(src) pre_group = get_group(src) pre_mode = salt.utils.normalize_mode(get_mode(src)) try: if (os.path.exists(dst) and os.path.isdir(dst)) or os.path.isdir(src): if not recurse: raise SaltInvocationError( "Cannot copy overwriting a directory without recurse flag set to true!") if remove_existing: if os.path.exists(dst): shutil.rmtree(dst) shutil.copytree(src, dst) else: salt.utils.files.recursive_copy(src, dst) else: shutil.copyfile(src, dst) except OSError: raise CommandExecutionError( 'Could not copy \'{0}\' to \'{1}\''.format(src, dst) ) if not salt.utils.is_windows(): check_perms(dst, None, pre_user, pre_group, pre_mode) return True def lstat(path): ''' .. versionadded:: 2014.1.0 Returns the lstat attributes for the given file or dir. Does not support symbolic links. CLI Example: .. code-block:: bash salt '*' file.lstat /path/to/file ''' path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError('Path to file must be absolute.') try: lst = os.lstat(path) return dict((key, getattr(lst, key)) for key in ('st_atime', 'st_ctime', 'st_gid', 'st_mode', 'st_mtime', 'st_nlink', 'st_size', 'st_uid')) except Exception: return {} def access(path, mode): ''' .. versionadded:: 2014.1.0 Test whether the Salt process has the specified access to the file. One of the following modes must be specified: .. code-block::text f: Test the existence of the path r: Test the readability of the path w: Test the writability of the path x: Test whether the path can be executed CLI Example: .. code-block:: bash salt '*' file.access /path/to/file f salt '*' file.access /path/to/file x ''' path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError('Path to link must be absolute.') modes = {'f': os.F_OK, 'r': os.R_OK, 'w': os.W_OK, 'x': os.X_OK} if mode in modes: return os.access(path, modes[mode]) elif mode in six.itervalues(modes): return os.access(path, mode) else: raise SaltInvocationError('Invalid mode specified.') def readlink(path, canonicalize=False): ''' .. versionadded:: 2014.1.0 Return the path that a symlink points to If canonicalize is set to True, then it return the final target CLI Example: .. code-block:: bash salt '*' file.readlink /path/to/link ''' path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError('Path to link must be absolute.') if not os.path.islink(path): raise SaltInvocationError('A valid link was not specified.') if canonicalize: return os.path.realpath(path) else: return os.readlink(path) def readdir(path): ''' .. versionadded:: 2014.1.0 Return a list containing the contents of a directory CLI Example: .. code-block:: bash salt '*' file.readdir /path/to/dir/ ''' path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError('Dir path must be absolute.') if not os.path.isdir(path): raise SaltInvocationError('A valid directory was not specified.') dirents = ['.', '..'] dirents.extend(os.listdir(path)) return dirents def statvfs(path): ''' .. versionadded:: 2014.1.0 Perform a statvfs call against the filesystem that the file resides on CLI Example: .. code-block:: bash salt '*' file.statvfs /path/to/file ''' path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError('File path must be absolute.') try: stv = os.statvfs(path) return dict((key, getattr(stv, key)) for key in ('f_bavail', 'f_bfree', 'f_blocks', 'f_bsize', 'f_favail', 'f_ffree', 'f_files', 'f_flag', 'f_frsize', 'f_namemax')) except (OSError, IOError): raise CommandExecutionError('Could not statvfs \'{0}\''.format(path)) return False def stats(path, hash_type=None, follow_symlinks=True): ''' Return a dict containing the stats for a given file CLI Example: .. code-block:: bash salt '*' file.stats /etc/passwd ''' path = os.path.expanduser(path) ret = {} if not os.path.exists(path): try: # Broken symlinks will return False for os.path.exists(), but still # have a uid and gid pstat = os.lstat(path) except OSError: # Not a broken symlink, just a nonexistent path return ret else: if follow_symlinks: pstat = os.stat(path) else: pstat = os.lstat(path) ret['inode'] = pstat.st_ino ret['uid'] = pstat.st_uid ret['gid'] = pstat.st_gid ret['group'] = gid_to_group(pstat.st_gid) ret['user'] = uid_to_user(pstat.st_uid) ret['atime'] = pstat.st_atime ret['mtime'] = pstat.st_mtime ret['ctime'] = pstat.st_ctime ret['size'] = pstat.st_size ret['mode'] = str(oct(stat.S_IMODE(pstat.st_mode))) if hash_type: ret['sum'] = get_hash(path, hash_type) ret['type'] = 'file' if stat.S_ISDIR(pstat.st_mode): ret['type'] = 'dir' if stat.S_ISCHR(pstat.st_mode): ret['type'] = 'char' if stat.S_ISBLK(pstat.st_mode): ret['type'] = 'block' if stat.S_ISREG(pstat.st_mode): ret['type'] = 'file' if stat.S_ISLNK(pstat.st_mode): ret['type'] = 'link' if stat.S_ISFIFO(pstat.st_mode): ret['type'] = 'pipe' if stat.S_ISSOCK(pstat.st_mode): ret['type'] = 'socket' ret['target'] = os.path.realpath(path) return ret def rmdir(path): ''' .. versionadded:: 2014.1.0 Remove the specified directory. Fails if a directory is not empty. CLI Example: .. code-block:: bash salt '*' file.rmdir /tmp/foo/ ''' path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError('File path must be absolute.') if not os.path.isdir(path): raise SaltInvocationError('A valid directory was not specified.') try: os.rmdir(path) return True except OSError as exc: return exc.strerror def remove(path): ''' Remove the named file. If a directory is supplied, it will be recursively deleted. CLI Example: .. code-block:: bash salt '*' file.remove /tmp/foo ''' path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError('File path must be absolute: {0}'.format(path)) try: if os.path.isfile(path) or os.path.islink(path): os.remove(path) return True elif os.path.isdir(path): shutil.rmtree(path) return True except (OSError, IOError) as exc: raise CommandExecutionError( 'Could not remove \'{0}\': {1}'.format(path, exc) ) return False def directory_exists(path): ''' Tests to see if path is a valid directory. Returns True/False. CLI Example: .. code-block:: bash salt '*' file.directory_exists /etc ''' return os.path.isdir(os.path.expanduser(path)) def file_exists(path): ''' Tests to see if path is a valid file. Returns True/False. CLI Example: .. code-block:: bash salt '*' file.file_exists /etc/passwd ''' return os.path.isfile(os.path.expanduser(path)) def path_exists_glob(path): ''' Tests to see if path after expansion is a valid path (file or directory). Expansion allows usage of ? * and character ranges []. Tilde expansion is not supported. Returns True/False. .. versionadded:: Hellium CLI Example: .. code-block:: bash salt '*' file.path_exists_glob /etc/pam*/pass* ''' return True if glob.glob(os.path.expanduser(path)) else False def restorecon(path, recursive=False): ''' Reset the SELinux context on a given path CLI Example: .. code-block:: bash salt '*' file.restorecon /home/user/.ssh/authorized_keys ''' if recursive: cmd = ['restorecon', '-FR', path] else: cmd = ['restorecon', '-F', path] return not __salt__['cmd.retcode'](cmd, python_shell=False) def get_selinux_context(path): ''' Get an SELinux context from a given path CLI Example: .. code-block:: bash salt '*' file.get_selinux_context /etc/hosts ''' out = __salt__['cmd.run'](['ls', '-Z', path], python_shell=False) try: ret = re.search(r'\w+:\w+:\w+:\w+', out).group(0) except AttributeError: ret = ( 'No selinux context information is available for {0}'.format(path) ) return ret def set_selinux_context(path, user=None, role=None, type=None, # pylint: disable=W0622 range=None): # pylint: disable=W0622 ''' Set a specific SELinux label on a given path CLI Example: .. code-block:: bash salt '*' file.set_selinux_context path <role> <type> <range> ''' if not any((user, role, type, range)): return False cmd = ['chcon'] if user: cmd.extend(['-u', user]) if role: cmd.extend(['-r', role]) if type: cmd.extend(['-t', type]) if range: cmd.extend(['-l', range]) cmd.append(path) ret = not __salt__['cmd.retcode'](cmd, python_shell=False) if ret: return get_selinux_context(path) else: return ret def source_list(source, source_hash, saltenv): ''' Check the source list and return the source to use CLI Example: .. code-block:: bash salt '*' file.source_list salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' base ''' contextkey = '{0}_|-{1}_|-{2}'.format(source, source_hash, saltenv) if contextkey in __context__: return __context__[contextkey] # get the master file list if isinstance(source, list): mfiles = [(f, saltenv) for f in __salt__['cp.list_master'](saltenv)] mdirs = [(d, saltenv) for d in __salt__['cp.list_master_dirs'](saltenv)] for single in source: if isinstance(single, dict): single = next(iter(single)) path, senv = salt.utils.url.parse(single) if senv: mfiles += [(f, senv) for f in __salt__['cp.list_master'](senv)] mdirs += [(d, senv) for d in __salt__['cp.list_master_dirs'](senv)] ret = None for single in source: if isinstance(single, dict): # check the proto, if it is http or ftp then download the file # to check, if it is salt then check the master list # if it is a local file, check if the file exists if len(single) != 1: continue single_src = next(iter(single)) single_hash = single[single_src] if single[single_src] else source_hash urlparsed_single_src = _urlparse(single_src) proto = urlparsed_single_src.scheme if proto == 'salt': path, senv = salt.utils.url.parse(single_src) if not senv: senv = saltenv if (path, saltenv) in mfiles or (path, saltenv) in mdirs: ret = (single_src, single_hash) break elif proto.startswith('http') or proto == 'ftp': try: if __salt__['cp.cache_file'](single_src): ret = (single_src, single_hash) break except MinionError as exc: # Error downloading file. Log the caught exception and # continue on to the next source. log.exception(exc) elif proto == 'file' and os.path.exists(urlparsed_single_src.path): ret = (single_src, single_hash) break elif single_src.startswith('/') and os.path.exists(single_src): ret = (single_src, single_hash) break elif isinstance(single, six.string_types): path, senv = salt.utils.url.parse(single) if not senv: senv = saltenv if (path, senv) in mfiles or (path, senv) in mdirs: ret = (single, source_hash) break urlparsed_src = _urlparse(single) proto = urlparsed_src.scheme if proto == 'file' and os.path.exists(urlparsed_src.path): ret = (single, source_hash) break elif proto.startswith('http') or proto == 'ftp': if __salt__['cp.cache_file'](single): ret = (single, source_hash) break elif single.startswith('/') and os.path.exists(single): ret = (single, source_hash) break if ret is None: # None of the list items matched raise CommandExecutionError( 'none of the specified sources were found' ) else: ret = (source, source_hash) __context__[contextkey] = ret return ret def apply_template_on_contents( contents, template, context, defaults, saltenv): ''' Return the contents after applying the templating engine contents template string template template format context Overrides default context variables passed to the template. defaults Default context passed to the template. CLI Example: .. code-block:: bash salt '*' file.apply_template_on_contents \\ contents='This is a {{ template }} string.' \\ template=jinja \\ "context={}" "defaults={'template': 'cool'}" \\ saltenv=base ''' if template in salt.utils.templates.TEMPLATE_REGISTRY: context_dict = defaults if defaults else {} if context: context_dict.update(context) # Apply templating contents = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, context=context_dict, saltenv=saltenv, grains=__opts__['grains'], pillar=__pillar__, salt=__salt__, opts=__opts__)['data'] if six.PY2: contents = contents.encode('utf-8') else: ret = {} ret['result'] = False ret['comment'] = ('Specified template format {0} is not supported' ).format(template) return ret return contents def get_managed( name, template, source, source_hash, source_hash_name, user, group, mode, saltenv, context, defaults, skip_verify=False, **kwargs): ''' Return the managed file data for file.managed name location where the file lives on the server template template format source managed source file source_hash hash of the source file source_hash_name When ``source_hash`` refers to a remote file, this specifies the filename to look for in that file. .. versionadded:: 2016.3.5 user Owner of file group Group owner of file mode Permissions of file context Variables to add to the template context defaults Default values of for context_dict skip_verify If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped, and the ``source_hash`` argument will be ignored. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' file.get_managed /etc/httpd/conf.d/httpd.conf jinja salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' None root root '755' base None None ''' # Copy the file to the minion and templatize it sfn = '' source_sum = {} def _get_local_file_source_sum(path): ''' DRY helper for getting the source_sum value from a locally cached path. ''' return {'hsum': get_hash(path, form='sha256'), 'hash_type': 'sha256'} # If we have a source defined, let's figure out what the hash is if source: urlparsed_source = _urlparse(source) parsed_scheme = urlparsed_source.scheme parsed_path = os.path.join( urlparsed_source.netloc, urlparsed_source.path).rstrip(os.sep) if parsed_scheme and parsed_scheme.lower() in 'abcdefghijklmnopqrstuvwxyz': parsed_path = ':'.join([parsed_scheme, parsed_path]) parsed_scheme = 'file' if parsed_scheme == 'salt': source_sum = __salt__['cp.hash_file'](source, saltenv) if not source_sum: return '', {}, 'Source file {0} not found'.format(source) elif not source_hash and parsed_scheme == 'file': source_sum = _get_local_file_source_sum(parsed_path) elif not source_hash and source.startswith(os.sep): source_sum = _get_local_file_source_sum(source) else: if not skip_verify: if source_hash: try: source_sum = get_source_sum(name, source, source_hash, source_hash_name, saltenv) except CommandExecutionError as exc: return '', {}, exc.strerror else: msg = ( 'Unable to verify upstream hash of source file {0}, ' 'please set source_hash or set skip_verify to True' .format(source) ) return '', {}, msg if source and (template or parsed_scheme in salt.utils.files.REMOTE_PROTOS): # Check if we have the template or remote file cached cache_refetch = False cached_dest = __salt__['cp.is_cached'](source, saltenv) if cached_dest and (source_hash or skip_verify): htype = source_sum.get('hash_type', 'sha256') cached_sum = get_hash(cached_dest, form=htype) if skip_verify: # prev: if skip_verify or cached_sum == source_sum['hsum']: # but `cached_sum == source_sum['hsum']` is elliptical as prev if sfn = cached_dest source_sum = {'hsum': cached_sum, 'hash_type': htype} elif cached_sum != source_sum.get('hsum', __opts__['hash_type']): cache_refetch = True # If we didn't have the template or remote file, let's get it # Similarly when the file has been updated and the cache has to be refreshed if not sfn or cache_refetch: try: sfn = __salt__['cp.cache_file'](source, saltenv) except Exception as exc: # A 404 or other error code may raise an exception, catch it # and return a comment that will fail the calling state. return '', {}, 'Failed to cache {0}: {1}'.format(source, exc) # If cache failed, sfn will be False, so do a truth check on sfn first # as invoking os.path.exists() on a bool raises a TypeError. if not sfn or not os.path.exists(sfn): return sfn, {}, 'Source file \'{0}\' not found'.format(source) if sfn == name: raise SaltInvocationError( 'Source file cannot be the same as destination' ) if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: context_dict = defaults if defaults else {} if context: context_dict.update(context) data = salt.utils.templates.TEMPLATE_REGISTRY[template]( sfn, name=name, source=source, user=user, group=group, mode=mode, saltenv=saltenv, context=context_dict, salt=__salt__, pillar=__pillar__, grains=__opts__['grains'], opts=__opts__, **kwargs) else: return sfn, {}, ('Specified template format {0} is not supported' ).format(template) if data['result']: sfn = data['data'] hsum = get_hash(sfn, form='sha256') source_sum = {'hash_type': 'sha256', 'hsum': hsum} else: __clean_tmp(sfn) return sfn, {}, data['data'] return sfn, source_sum, '' def extract_hash(hash_fn, hash_type='sha256', file_name='', source='', source_hash_name=None): ''' .. versionchanged:: 2016.3.5 Prior to this version, only the ``file_name`` argument was considered for filename matches in the hash file. This would be problematic for cases in which the user was relying on a remote checksum file that they do not control, and they wished to use a different name for that file on the minion from the filename on the remote server (and in the checksum file). For example, managing ``/tmp/myfile.tar.gz`` when the remote file was at ``https://mydomain.tld/different_name.tar.gz``. The :py:func:`file.managed <salt.states.file.managed>` state now also passes this function the source URI as well as the ``source_hash_name`` (if specified). In cases where ``source_hash_name`` is specified, it takes precedence over both the ``file_name`` and ``source``. When it is not specified, ``file_name`` takes precedence over ``source``. This allows for better capability for matching hashes. .. versionchanged:: 2016.11.0 File name and source URI matches are no longer disregarded when ``source_hash_name`` is specified. They will be used as fallback matches if there is no match to the ``source_hash_name`` value. This routine is called from the :mod:`file.managed <salt.states.file.managed>` state to pull a hash from a remote file. Regular expressions are used line by line on the ``source_hash`` file, to find a potential candidate of the indicated hash type. This avoids many problems of arbitrary file layout rules. It specifically permits pulling hash codes from debian ``*.dsc`` files. If no exact match of a hash and filename are found, then the first hash found (if any) will be returned. If no hashes at all are found, then ``None`` will be returned. For example: .. code-block:: yaml openerp_7.0-latest-1.tar.gz: file.managed: - name: /tmp/openerp_7.0-20121227-075624-1_all.deb - source: http://nightly.openerp.com/7.0/nightly/deb/openerp_7.0-20121227-075624-1.tar.gz - source_hash: http://nightly.openerp.com/7.0/nightly/deb/openerp_7.0-20121227-075624-1.dsc CLI Example: .. code-block:: bash salt '*' file.extract_hash /path/to/hash/file sha512 /etc/foo ''' hash_len = HASHES.get(hash_type) if hash_len is None: if hash_type: log.warning( 'file.extract_hash: Unsupported hash_type \'%s\', falling ' 'back to matching any supported hash_type', hash_type ) hash_type = '' hash_len_expr = '{0},{1}'.format(min(HASHES_REVMAP), max(HASHES_REVMAP)) else: hash_len_expr = str(hash_len) filename_separators = string.whitespace + r'\/' if source_hash_name: if not isinstance(source_hash_name, six.string_types): source_hash_name = str(source_hash_name) source_hash_name_idx = (len(source_hash_name) + 1) * -1 log.debug( 'file.extract_hash: Extracting %s hash for file matching ' 'source_hash_name \'%s\'', 'any supported' if not hash_type else hash_type, source_hash_name ) if file_name: if not isinstance(file_name, six.string_types): file_name = str(file_name) file_name_basename = os.path.basename(file_name) file_name_idx = (len(file_name_basename) + 1) * -1 if source: if not isinstance(source, six.string_types): source = str(source) urlparsed_source = _urlparse(source) source_basename = os.path.basename( urlparsed_source.path or urlparsed_source.netloc ) source_idx = (len(source_basename) + 1) * -1 basename_searches = [x for x in (file_name, source) if x] if basename_searches: log.debug( 'file.extract_hash: %s %s hash for file matching%s: %s', 'If no source_hash_name match found, will extract' if source_hash_name else 'Extracting', 'any supported' if not hash_type else hash_type, '' if len(basename_searches) == 1 else ' either of the following', ', '.join(basename_searches) ) partial = None found = {} with salt.utils.fopen(hash_fn, 'r') as fp_: for line in fp_: line = line.strip() hash_re = r'(?i)(?<![a-z0-9])([a-f0-9]{' + hash_len_expr + '})(?![a-z0-9])' hash_match = re.search(hash_re, line) matched = None if hash_match: matched_hsum = hash_match.group(1) if matched_hsum is not None: matched_type = HASHES_REVMAP.get(len(matched_hsum)) if matched_type is None: # There was a match, but it's not of the correct length # to match one of the supported hash types. matched = None else: matched = {'hsum': matched_hsum, 'hash_type': matched_type} if matched is None: log.debug( 'file.extract_hash: In line \'%s\', no %shash found', line, '' if not hash_type else hash_type + ' ' ) continue if partial is None: partial = matched def _add_to_matches(found, line, match_type, value, matched): log.debug( 'file.extract_hash: Line \'%s\' matches %s \'%s\'', line, match_type, value ) found.setdefault(match_type, []).append(matched) hash_matched = False if source_hash_name: if line.endswith(source_hash_name): # Checking the character before where the basename # should start for either whitespace or a path # separator. We can't just rsplit on spaces/whitespace, # because the filename may contain spaces. try: if line[source_hash_name_idx] in string.whitespace: _add_to_matches(found, line, 'source_hash_name', source_hash_name, matched) hash_matched = True except IndexError: pass elif re.match(source_hash_name.replace('.', r'\.') + r'\s+', line): _add_to_matches(found, line, 'source_hash_name', source_hash_name, matched) hash_matched = True if file_name: if line.endswith(file_name_basename): # Checking the character before where the basename # should start for either whitespace or a path # separator. We can't just rsplit on spaces/whitespace, # because the filename may contain spaces. try: if line[file_name_idx] in filename_separators: _add_to_matches(found, line, 'file_name', file_name, matched) hash_matched = True except IndexError: pass elif re.match(file_name.replace('.', r'\.') + r'\s+', line): _add_to_matches(found, line, 'file_name', file_name, matched) hash_matched = True if source: if line.endswith(source_basename): # Same as above, we can't just do an rsplit here. try: if line[source_idx] in filename_separators: _add_to_matches(found, line, 'source', source, matched) hash_matched = True except IndexError: pass elif re.match(source.replace('.', r'\.') + r'\s+', line): _add_to_matches(found, line, 'source', source, matched) hash_matched = True if not hash_matched: log.debug( 'file.extract_hash: Line \'%s\' contains %s hash ' '\'%s\', but line did not meet the search criteria', line, matched['hash_type'], matched['hsum'] ) for found_type, found_str in (('source_hash_name', source_hash_name), ('file_name', file_name), ('source', source)): if found_type in found: if len(found[found_type]) > 1: log.debug( 'file.extract_hash: Multiple %s matches for %s: %s', found_type, found_str, ', '.join( ['{0} ({1})'.format(x['hsum'], x['hash_type']) for x in found[found_type]] ) ) ret = found[found_type][0] log.debug( 'file.extract_hash: Returning %s hash \'%s\' as a match of %s', ret['hash_type'], ret['hsum'], found_str ) return ret if partial: log.debug( 'file.extract_hash: Returning the partially identified %s hash ' '\'%s\'', partial['hash_type'], partial['hsum'] ) return partial log.debug('file.extract_hash: No matches, returning None') return None def check_perms(name, ret, user, group, mode, follow_symlinks=False): ''' Check the permissions on files and chown if needed CLI Example: .. code-block:: bash salt '*' file.check_perms /etc/sudoers '{}' root root 400 .. versionchanged:: 2014.1.3 ``follow_symlinks`` option added ''' name = os.path.expanduser(name) if not ret: ret = {'name': name, 'changes': {}, 'comment': [], 'result': True} orig_comment = '' else: orig_comment = ret['comment'] ret['comment'] = [] # Check permissions perms = {} cur = stats(name, follow_symlinks=follow_symlinks) if not cur: # NOTE: The file.directory state checks the content of the error # message in this exception. Any changes made to the message for this # exception will reflect the file.directory state as well, and will # likely require changes there. raise CommandExecutionError('{0} does not exist'.format(name)) perms['luser'] = cur['user'] perms['lgroup'] = cur['group'] perms['lmode'] = salt.utils.normalize_mode(cur['mode']) # Mode changes if needed if mode is not None: # File is a symlink, ignore the mode setting # if follow_symlinks is False if os.path.islink(name) and not follow_symlinks: pass else: mode = salt.utils.normalize_mode(mode) if mode != perms['lmode']: if __opts__['test'] is True: ret['changes']['mode'] = mode else: set_mode(name, mode) if mode != salt.utils.normalize_mode(get_mode(name)): ret['result'] = False ret['comment'].append( 'Failed to change mode to {0}'.format(mode) ) else: ret['changes']['mode'] = mode # user/group changes if needed, then check if it worked if user: if isinstance(user, int): user = uid_to_user(user) if (salt.utils.is_windows() and user_to_uid(user) != user_to_uid(perms['luser']) ) or ( not salt.utils.is_windows() and user != perms['luser'] ): perms['cuser'] = user if group: if isinstance(group, int): group = gid_to_group(group) if (salt.utils.is_windows() and group_to_gid(group) != group_to_gid(perms['lgroup']) ) or ( not salt.utils.is_windows() and group != perms['lgroup'] ): perms['cgroup'] = group if 'cuser' in perms or 'cgroup' in perms: if not __opts__['test']: if os.path.islink(name) and not follow_symlinks: chown_func = lchown else: chown_func = chown if user is None: user = perms['luser'] if group is None: group = perms['lgroup'] try: chown_func(name, user, group) except OSError: ret['result'] = False if user: if isinstance(user, int): user = uid_to_user(user) if (salt.utils.is_windows() and user_to_uid(user) != user_to_uid( get_user(name, follow_symlinks=follow_symlinks)) and user != '' ) or ( not salt.utils.is_windows() and user != get_user(name, follow_symlinks=follow_symlinks) and user != '' ): if __opts__['test'] is True: ret['changes']['user'] = user else: ret['result'] = False ret['comment'].append('Failed to change user to {0}' .format(user)) elif 'cuser' in perms and user != '': ret['changes']['user'] = user if group: if isinstance(group, int): group = gid_to_group(group) if (salt.utils.is_windows() and group_to_gid(group) != group_to_gid( get_group(name, follow_symlinks=follow_symlinks)) and user != '') or ( not salt.utils.is_windows() and group != get_group(name, follow_symlinks=follow_symlinks) and user != '' ): if __opts__['test'] is True: ret['changes']['group'] = group else: ret['result'] = False ret['comment'].append('Failed to change group to {0}' .format(group)) elif 'cgroup' in perms and user != '': ret['changes']['group'] = group if isinstance(orig_comment, six.string_types): if orig_comment: ret['comment'].insert(0, orig_comment) ret['comment'] = '; '.join(ret['comment']) if __opts__['test'] is True and ret['changes']: ret['result'] = None return ret, perms def check_managed( name, source, source_hash, source_hash_name, user, group, mode, template, context, defaults, saltenv, contents=None, skip_verify=False, **kwargs): ''' Check to see what changes need to be made for a file CLI Example: .. code-block:: bash salt '*' file.check_managed /etc/httpd/conf.d/httpd.conf salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root, root, '755' jinja True None None base ''' # If the source is a list then find which file exists source, source_hash = source_list(source, # pylint: disable=W0633 source_hash, saltenv) sfn = '' source_sum = None if contents is None: # Gather the source file from the server sfn, source_sum, comments = get_managed( name, template, source, source_hash, source_hash_name, user, group, mode, saltenv, context, defaults, skip_verify, **kwargs) if comments: __clean_tmp(sfn) return False, comments changes = check_file_meta(name, sfn, source, source_sum, user, group, mode, saltenv, contents) # Ignore permission for files written temporary directories # Files in any path will still be set correctly using get_managed() if name.startswith(tempfile.gettempdir()): for key in ['user', 'group', 'mode']: changes.pop(key, None) __clean_tmp(sfn) if changes: log.info(changes) comments = ['The following values are set to be changed:\n'] comments.extend('{0}: {1}\n'.format(key, val) for key, val in six.iteritems(changes)) return None, ''.join(comments) return True, 'The file {0} is in the correct state'.format(name) def check_managed_changes( name, source, source_hash, source_hash_name, user, group, mode, template, context, defaults, saltenv, contents=None, skip_verify=False, keep_mode=False, **kwargs): ''' Return a dictionary of what changes need to be made for a file CLI Example: .. code-block:: bash salt '*' file.check_managed_changes /etc/httpd/conf.d/httpd.conf salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root, root, '755' jinja True None None base ''' # If the source is a list then find which file exists source, source_hash = source_list(source, # pylint: disable=W0633 source_hash, saltenv) sfn = '' source_sum = None if contents is None: # Gather the source file from the server sfn, source_sum, comments = get_managed( name, template, source, source_hash, source_hash_name, user, group, mode, saltenv, context, defaults, skip_verify, **kwargs) if comments: __clean_tmp(sfn) return False, comments if sfn and source and keep_mode: if _urlparse(source).scheme in ('salt', 'file') \ or source.startswith('/'): try: mode = salt.utils.st_mode_to_octal(os.stat(sfn).st_mode) except Exception as exc: log.warning('Unable to stat %s: %s', sfn, exc) changes = check_file_meta(name, sfn, source, source_sum, user, group, mode, saltenv, contents) __clean_tmp(sfn) return changes def check_file_meta( name, sfn, source, source_sum, user, group, mode, saltenv, contents=None): ''' Check for the changes in the file metadata. CLI Example: .. code-block:: bash salt '*' file.check_file_meta /etc/httpd/conf.d/httpd.conf salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root, root, '755' base .. note:: Supported hash types include sha512, sha384, sha256, sha224, sha1, and md5. name Path to file destination sfn Template-processed source file contents source URL to file source source_sum File checksum information as a dictionary .. code-block:: yaml {hash_type: md5, hsum: <md5sum>} user Destination file user owner group Destination file group owner mode Destination file permissions mode saltenv Salt environment used to resolve source files contents File contents ''' changes = {} if not source_sum: source_sum = dict() lstats = stats(name, hash_type=source_sum.get('hash_type', None), follow_symlinks=False) if not lstats: changes['newfile'] = name return changes if 'hsum' in source_sum: if source_sum['hsum'] != lstats['sum']: if not sfn and source: sfn = __salt__['cp.cache_file'](source, saltenv) if sfn: if __salt__['config.option']('obfuscate_templates'): changes['diff'] = '<Obfuscated Template>' else: # Check to see if the files are bins bdiff = _binary_replace(name, sfn) if bdiff: changes['diff'] = bdiff else: with salt.utils.fopen(sfn, 'r') as src: slines = src.readlines() with salt.utils.fopen(name, 'r') as name_: nlines = name_.readlines() changes['diff'] = \ ''.join(difflib.unified_diff(nlines, slines)) else: changes['sum'] = 'Checksum differs' if contents is not None: # Write a tempfile with the static contents tmp = salt.utils.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX, text=True) if salt.utils.is_windows(): contents = os.linesep.join( _splitlines_preserving_trailing_newline(contents)) with salt.utils.fopen(tmp, 'w') as tmp_: tmp_.write(str(contents)) # Compare the static contents with the named file with salt.utils.fopen(tmp, 'r') as src: slines = src.readlines() with salt.utils.fopen(name, 'r') as name_: nlines = name_.readlines() __clean_tmp(tmp) if ''.join(nlines) != ''.join(slines): if __salt__['config.option']('obfuscate_templates'): changes['diff'] = '<Obfuscated Template>' else: if salt.utils.istextfile(name): changes['diff'] = \ ''.join(difflib.unified_diff(nlines, slines)) else: changes['diff'] = 'Replace binary file with text file' if (user is not None and user != lstats['user'] and user != lstats['uid']): changes['user'] = user if (group is not None and group != lstats['group'] and group != lstats['gid']): changes['group'] = group # Normalize the file mode smode = salt.utils.normalize_mode(lstats['mode']) mode = salt.utils.normalize_mode(mode) if mode is not None and mode != smode: changes['mode'] = mode return changes def get_diff( minionfile, masterfile, saltenv='base'): ''' Return unified diff of file compared to file on master CLI Example: .. code-block:: bash salt '*' file.get_diff /home/fred/.vimrc salt://users/fred/.vimrc ''' minionfile = os.path.expanduser(minionfile) ret = '' if not os.path.exists(minionfile): ret = 'File {0} does not exist on the minion'.format(minionfile) return ret sfn = __salt__['cp.cache_file'](masterfile, saltenv) if sfn: with salt.utils.fopen(sfn, 'r') as src: slines = src.readlines() with salt.utils.fopen(minionfile, 'r') as name_: nlines = name_.readlines() if ''.join(nlines) != ''.join(slines): bdiff = _binary_replace(minionfile, sfn) if bdiff: ret += bdiff else: ret += ''.join(difflib.unified_diff(nlines, slines, minionfile, masterfile)) else: ret = 'Failed to copy file from master' return ret def manage_file(name, sfn, ret, source, source_sum, user, group, mode, saltenv, backup, makedirs=False, template=None, # pylint: disable=W0613 show_changes=True, contents=None, dir_mode=None, follow_symlinks=True, skip_verify=False, keep_mode=False, **kwargs): ''' Checks the destination against what was retrieved with get_managed and makes the appropriate modifications (if necessary). name location to place the file sfn location of cached file on the minion This is the path to the file stored on the minion. This file is placed on the minion using cp.cache_file. If the hash sum of that file matches the source_sum, we do not transfer the file to the minion again. This file is then grabbed and if it has template set, it renders the file to be placed into the correct place on the system using salt.files.utils.copyfile() ret The initial state return data structure. Pass in ``None`` to use the default structure. source file reference on the master source_hash sum hash for source user user owner group group owner backup backup_mode makedirs make directories if they do not exist template format of templating show_changes Include diff in state return contents: contents to be placed in the file dir_mode mode for directories created with makedirs skip_verify : False If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped, and the ``source_hash`` argument will be ignored. .. versionadded:: 2016.3.0 keep_mode : False If ``True``, and the ``source`` is a file from the Salt fileserver (or a local file on the minion), the mode of the destination file will be set to the mode of the source file. CLI Example: .. code-block:: bash salt '*' file.manage_file /etc/httpd/conf.d/httpd.conf '' '{}' salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root root '755' base '' .. versionchanged:: 2014.7.0 ``follow_symlinks`` option added ''' name = os.path.expanduser(name) if not ret: ret = {'name': name, 'changes': {}, 'comment': '', 'result': True} if source and not sfn: # File is not present, cache it sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: return _error( ret, 'Source file \'{0}\' not found'.format(source)) htype = source_sum.get('hash_type', __opts__['hash_type']) # Recalculate source sum now that file has been cached source_sum = { 'hash_type': htype, 'hsum': get_hash(sfn, form=htype) } if keep_mode: if _urlparse(source).scheme in ('salt', 'file') \ or source.startswith('/'): try: mode = salt.utils.st_mode_to_octal(os.stat(sfn).st_mode) except Exception as exc: log.warning('Unable to stat %s: %s', sfn, exc) # Check changes if the target file exists if os.path.isfile(name) or os.path.islink(name): if os.path.islink(name) and follow_symlinks: real_name = os.path.realpath(name) else: real_name = name # Only test the checksums on files with managed contents if source and not (not follow_symlinks and os.path.islink(real_name)): name_sum = get_hash(real_name, source_sum.get('hash_type', __opts__['hash_type'])) else: name_sum = None # Check if file needs to be replaced if source and (name_sum is None or source_sum.get('hsum', __opts__['hash_type']) != name_sum): if not sfn: sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: return _error( ret, 'Source file \'{0}\' not found'.format(source)) # If the downloaded file came from a non salt server or local # source, and we are not skipping checksum verification, then # verify that it matches the specified checksum. if not skip_verify \ and _urlparse(source).scheme not in ('salt', ''): dl_sum = get_hash(sfn, source_sum['hash_type']) if dl_sum != source_sum['hsum']: ret['comment'] = ( 'Specified {0} checksum for {1} ({2}) does not match ' 'actual checksum ({3}). If the \'source_hash\' value ' 'refers to a remote file with multiple possible ' 'matches, then it may be necessary to set ' '\'source_hash_name\'.'.format( source_sum['hash_type'], source, source_sum['hsum'], dl_sum ) ) ret['result'] = False return ret # Print a diff equivalent to diff -u old new if __salt__['config.option']('obfuscate_templates'): ret['changes']['diff'] = '<Obfuscated Template>' elif not show_changes: ret['changes']['diff'] = '<show_changes=False>' else: # Check to see if the files are bins bdiff = _binary_replace(real_name, sfn) if bdiff: ret['changes']['diff'] = bdiff else: with salt.utils.fopen(sfn, 'r') as src: slines = src.readlines() with salt.utils.fopen(real_name, 'r') as name_: nlines = name_.readlines() sndiff = ''.join(difflib.unified_diff(nlines, slines)) if sndiff: ret['changes']['diff'] = sndiff # Pre requisites are met, and the file needs to be replaced, do it try: salt.utils.files.copyfile(sfn, real_name, __salt__['config.backup_mode'](backup), __opts__['cachedir']) except IOError as io_error: __clean_tmp(sfn) return _error( ret, 'Failed to commit change: {0}'.format(io_error)) if contents is not None: # Write the static contents to a temporary file tmp = salt.utils.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX, text=True) if salt.utils.is_windows(): contents = os.linesep.join( _splitlines_preserving_trailing_newline(contents)) with salt.utils.fopen(tmp, 'w') as tmp_: tmp_.write(str(contents)) # Compare contents of files to know if we need to replace with salt.utils.fopen(tmp, 'r') as src: slines = src.readlines() with salt.utils.fopen(real_name, 'r') as name_: nlines = name_.readlines() different = ''.join(slines) != ''.join(nlines) if different: if __salt__['config.option']('obfuscate_templates'): ret['changes']['diff'] = '<Obfuscated Template>' elif not show_changes: ret['changes']['diff'] = '<show_changes=False>' else: if salt.utils.istextfile(real_name): ret['changes']['diff'] = \ ''.join(difflib.unified_diff(nlines, slines)) else: ret['changes']['diff'] = \ 'Replace binary file with text file' # Pre requisites are met, the file needs to be replaced, do it try: salt.utils.files.copyfile(tmp, real_name, __salt__['config.backup_mode'](backup), __opts__['cachedir']) except IOError as io_error: __clean_tmp(tmp) return _error( ret, 'Failed to commit change: {0}'.format(io_error)) __clean_tmp(tmp) # Check for changing symlink to regular file here if os.path.islink(name) and not follow_symlinks: if not sfn: sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: return _error( ret, 'Source file \'{0}\' not found'.format(source)) # If the downloaded file came from a non salt server source verify # that it matches the intended sum value if not skip_verify and _urlparse(source).scheme != 'salt': dl_sum = get_hash(sfn, source_sum['hash_type']) if dl_sum != source_sum['hsum']: ret['comment'] = ( 'Specified {0} checksum for {1} ({2}) does not match ' 'actual checksum ({3})'.format( source_sum['hash_type'], name, source_sum['hsum'], dl_sum ) ) ret['result'] = False return ret try: salt.utils.files.copyfile(sfn, name, __salt__['config.backup_mode'](backup), __opts__['cachedir']) except IOError as io_error: __clean_tmp(sfn) return _error( ret, 'Failed to commit change: {0}'.format(io_error)) ret['changes']['diff'] = \ 'Replace symbolic link with regular file' ret, _ = check_perms(name, ret, user, group, mode, follow_symlinks) if ret['changes']: ret['comment'] = 'File {0} updated'.format(name) elif not ret['changes'] and ret['result']: ret['comment'] = u'File {0} is in the correct state'.format( salt.utils.locales.sdecode(name) ) if sfn: __clean_tmp(sfn) return ret else: # target file does not exist contain_dir = os.path.dirname(name) def _set_mode_and_make_dirs(name, dir_mode, mode, user, group): # check for existence of windows drive letter if salt.utils.is_windows(): drive, _ = os.path.splitdrive(name) if drive and not os.path.exists(drive): __clean_tmp(sfn) return _error(ret, '{0} drive not present'.format(drive)) if dir_mode is None and mode is not None: # Add execute bit to each nonzero digit in the mode, if # dir_mode was not specified. Otherwise, any # directories created with makedirs_() below can't be # listed via a shell. mode_list = [x for x in str(mode)][-3:] for idx in range(len(mode_list)): if mode_list[idx] != '0': mode_list[idx] = str(int(mode_list[idx]) | 1) dir_mode = ''.join(mode_list) makedirs_(name, user=user, group=group, mode=dir_mode) if source: # It is a new file, set the diff accordingly ret['changes']['diff'] = 'New file' # Apply the new file if not sfn: sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: return _error( ret, 'Source file \'{0}\' not found'.format(source)) # If the downloaded file came from a non salt server source verify # that it matches the intended sum value if not skip_verify \ and _urlparse(source).scheme != 'salt': dl_sum = get_hash(sfn, source_sum['hash_type']) if dl_sum != source_sum['hsum']: ret['comment'] = ( 'Specified {0} checksum for {1} ({2}) does not match ' 'actual checksum ({3})'.format( source_sum['hash_type'], name, source_sum['hsum'], dl_sum ) ) ret['result'] = False return ret if not os.path.isdir(contain_dir): if makedirs: _set_mode_and_make_dirs(name, dir_mode, mode, user, group) else: __clean_tmp(sfn) # No changes actually made ret['changes'].pop('diff', None) return _error(ret, 'Parent directory not present') else: # source != True if not os.path.isdir(contain_dir): if makedirs: _set_mode_and_make_dirs(name, dir_mode, mode, user, group) else: __clean_tmp(sfn) # No changes actually made ret['changes'].pop('diff', None) return _error(ret, 'Parent directory not present') # Create the file, user rw-only if mode will be set to prevent # a small security race problem before the permissions are set if mode: current_umask = os.umask(0o77) # Create a new file when test is False and source is None if contents is None: if not __opts__['test']: if touch(name): ret['changes']['new'] = 'file {0} created'.format(name) ret['comment'] = 'Empty file' else: return _error( ret, 'Empty file {0} not created'.format(name) ) else: if not __opts__['test']: if touch(name): ret['changes']['diff'] = 'New file' else: return _error( ret, 'File {0} not created'.format(name) ) if mode: os.umask(current_umask) if contents is not None: # Write the static contents to a temporary file tmp = salt.utils.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX, text=True) if salt.utils.is_windows(): contents = os.linesep.join( _splitlines_preserving_trailing_newline(contents)) with salt.utils.fopen(tmp, 'w') as tmp_: tmp_.write(str(contents)) # Copy into place salt.utils.files.copyfile(tmp, name, __salt__['config.backup_mode'](backup), __opts__['cachedir']) __clean_tmp(tmp) # Now copy the file contents if there is a source file elif sfn: salt.utils.files.copyfile(sfn, name, __salt__['config.backup_mode'](backup), __opts__['cachedir']) __clean_tmp(sfn) # This is a new file, if no mode specified, use the umask to figure # out what mode to use for the new file. if mode is None and not salt.utils.is_windows(): # Get current umask mask = os.umask(0) os.umask(mask) # Calculate the mode value that results from the umask mode = oct((0o777 ^ mask) & 0o666) ret, _ = check_perms(name, ret, user, group, mode) if not ret['comment']: ret['comment'] = 'File ' + name + ' updated' if __opts__['test']: ret['comment'] = 'File ' + name + ' not updated' elif not ret['changes'] and ret['result']: ret['comment'] = 'File ' + name + ' is in the correct state' if sfn: __clean_tmp(sfn) return ret def mkdir(dir_path, user=None, group=None, mode=None): ''' Ensure that a directory is available. CLI Example: .. code-block:: bash salt '*' file.mkdir /opt/jetty/context ''' dir_path = os.path.expanduser(dir_path) directory = os.path.normpath(dir_path) if not os.path.isdir(directory): # If a caller such as managed() is invoked with makedirs=True, make # sure that any created dirs are created with the same user and group # to follow the principal of least surprise method. makedirs_perms(directory, user, group, mode) return True def makedirs_(path, user=None, group=None, mode=None): ''' Ensure that the directory containing this path is available. .. note:: The path must end with a trailing slash otherwise the directory/directories will be created up to the parent directory. For example if path is ``/opt/code``, then it would be treated as ``/opt/`` but if the path ends with a trailing slash like ``/opt/code/``, then it would be treated as ``/opt/code/``. CLI Example: .. code-block:: bash salt '*' file.makedirs /opt/code/ ''' path = os.path.expanduser(path) # walk up the directory structure until we find the first existing # directory dirname = os.path.normpath(os.path.dirname(path)) if os.path.isdir(dirname): # There's nothing for us to do msg = 'Directory \'{0}\' already exists'.format(dirname) log.debug(msg) return msg if os.path.exists(dirname): msg = 'The path \'{0}\' already exists and is not a directory'.format( dirname ) log.debug(msg) return msg directories_to_create = [] while True: if os.path.isdir(dirname): break directories_to_create.append(dirname) current_dirname = dirname dirname = os.path.dirname(dirname) if current_dirname == dirname: raise SaltInvocationError( 'Recursive creation for path \'{0}\' would result in an ' 'infinite loop. Please use an absolute path.'.format(dirname) ) # create parent directories from the topmost to the most deeply nested one directories_to_create.reverse() for directory_to_create in directories_to_create: # all directories have the user, group and mode set!! log.debug('Creating directory: %s', directory_to_create) mkdir(directory_to_create, user=user, group=group, mode=mode) def makedirs_perms(name, user=None, group=None, mode='0755'): ''' Taken and modified from os.makedirs to set user, group and mode for each directory created. CLI Example: .. code-block:: bash salt '*' file.makedirs_perms /opt/code ''' name = os.path.expanduser(name) path = os.path head, tail = path.split(name) if not tail: head, tail = path.split(head) if head and tail and not path.exists(head): try: makedirs_perms(head, user, group, mode) except OSError as exc: # be happy if someone already created the path if exc.errno != errno.EEXIST: raise if tail == os.curdir: # xxx/newdir/. exists if xxx/newdir exists return os.mkdir(name) check_perms(name, None, user, group, int('{0}'.format(mode)) if mode else None) def get_devmm(name): ''' Get major/minor info from a device CLI Example: .. code-block:: bash salt '*' file.get_devmm /dev/chr ''' name = os.path.expanduser(name) if is_chrdev(name) or is_blkdev(name): stat_structure = os.stat(name) return ( os.major(stat_structure.st_rdev), os.minor(stat_structure.st_rdev)) else: return (0, 0) def is_chrdev(name): ''' Check if a file exists and is a character device. CLI Example: .. code-block:: bash salt '*' file.is_chrdev /dev/chr ''' name = os.path.expanduser(name) stat_structure = None try: stat_structure = os.stat(name) except OSError as exc: if exc.errno == errno.ENOENT: # If the character device does not exist in the first place return False else: raise return stat.S_ISCHR(stat_structure.st_mode) def mknod_chrdev(name, major, minor, user=None, group=None, mode='0660'): ''' .. versionadded:: 0.17.0 Create a character device. CLI Example: .. code-block:: bash salt '*' file.mknod_chrdev /dev/chr 180 31 ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} log.debug('Creating character device name:{0} major:{1} minor:{2} mode:{3}' .format(name, major, minor, mode)) try: if __opts__['test']: ret['changes'] = {'new': 'Character device {0} created.'.format(name)} ret['result'] = None else: if os.mknod(name, int(str(mode).lstrip('0Oo'), 8) | stat.S_IFCHR, os.makedev(major, minor)) is None: ret['changes'] = {'new': 'Character device {0} created.'.format(name)} ret['result'] = True except OSError as exc: # be happy it is already there....however, if you are trying to change the # major/minor, you will need to unlink it first as os.mknod will not overwrite if exc.errno != errno.EEXIST: raise else: ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name) # quick pass at verifying the permissions of the newly created character device check_perms(name, None, user, group, int('{0}'.format(mode)) if mode else None) return ret def is_blkdev(name): ''' Check if a file exists and is a block device. CLI Example: .. code-block:: bash salt '*' file.is_blkdev /dev/blk ''' name = os.path.expanduser(name) stat_structure = None try: stat_structure = os.stat(name) except OSError as exc: if exc.errno == errno.ENOENT: # If the block device does not exist in the first place return False else: raise return stat.S_ISBLK(stat_structure.st_mode) def mknod_blkdev(name, major, minor, user=None, group=None, mode='0660'): ''' .. versionadded:: 0.17.0 Create a block device. CLI Example: .. code-block:: bash salt '*' file.mknod_blkdev /dev/blk 8 999 ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} log.debug('Creating block device name:{0} major:{1} minor:{2} mode:{3}' .format(name, major, minor, mode)) try: if __opts__['test']: ret['changes'] = {'new': 'Block device {0} created.'.format(name)} ret['result'] = None else: if os.mknod(name, int(str(mode).lstrip('0Oo'), 8) | stat.S_IFBLK, os.makedev(major, minor)) is None: ret['changes'] = {'new': 'Block device {0} created.'.format(name)} ret['result'] = True except OSError as exc: # be happy it is already there....however, if you are trying to change the # major/minor, you will need to unlink it first as os.mknod will not overwrite if exc.errno != errno.EEXIST: raise else: ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name) # quick pass at verifying the permissions of the newly created block device check_perms(name, None, user, group, int('{0}'.format(mode)) if mode else None) return ret def is_fifo(name): ''' Check if a file exists and is a FIFO. CLI Example: .. code-block:: bash salt '*' file.is_fifo /dev/fifo ''' name = os.path.expanduser(name) stat_structure = None try: stat_structure = os.stat(name) except OSError as exc: if exc.errno == errno.ENOENT: # If the fifo does not exist in the first place return False else: raise return stat.S_ISFIFO(stat_structure.st_mode) def mknod_fifo(name, user=None, group=None, mode='0660'): ''' .. versionadded:: 0.17.0 Create a FIFO pipe. CLI Example: .. code-block:: bash salt '*' file.mknod_fifo /dev/fifo ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} log.debug('Creating FIFO name: {0}'.format(name)) try: if __opts__['test']: ret['changes'] = {'new': 'Fifo pipe {0} created.'.format(name)} ret['result'] = None else: if os.mkfifo(name, int(str(mode).lstrip('0Oo'), 8)) is None: ret['changes'] = {'new': 'Fifo pipe {0} created.'.format(name)} ret['result'] = True except OSError as exc: # be happy it is already there if exc.errno != errno.EEXIST: raise else: ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name) # quick pass at verifying the permissions of the newly created fifo check_perms(name, None, user, group, int('{0}'.format(mode)) if mode else None) return ret def mknod(name, ntype, major=0, minor=0, user=None, group=None, mode='0600'): ''' .. versionadded:: 0.17.0 Create a block device, character device, or fifo pipe. Identical to the gnu mknod. CLI Examples: .. code-block:: bash salt '*' file.mknod /dev/chr c 180 31 salt '*' file.mknod /dev/blk b 8 999 salt '*' file.nknod /dev/fifo p ''' ret = False makedirs_(name, user, group) if ntype == 'c': ret = mknod_chrdev(name, major, minor, user, group, mode) elif ntype == 'b': ret = mknod_blkdev(name, major, minor, user, group, mode) elif ntype == 'p': ret = mknod_fifo(name, user, group, mode) else: raise SaltInvocationError( 'Node type unavailable: \'{0}\'. Available node types are ' 'character (\'c\'), block (\'b\'), and pipe (\'p\').'.format(ntype) ) return ret def list_backups(path, limit=None): ''' .. versionadded:: 0.17.0 Lists the previous versions of a file backed up using Salt's :ref:`file state backup <file-state-backups>` system. path The path on the minion to check for backups limit Limit the number of results to the most recent N backups CLI Example: .. code-block:: bash salt '*' file.list_backups /foo/bar/baz.txt ''' path = os.path.expanduser(path) try: limit = int(limit) except TypeError: pass except ValueError: log.error('file.list_backups: \'limit\' value must be numeric') limit = None bkroot = _get_bkroot() parent_dir, basename = os.path.split(path) if salt.utils.is_windows(): # ':' is an illegal filesystem path character on Windows src_dir = parent_dir.replace(':', '_') else: src_dir = parent_dir[1:] # Figure out full path of location of backup file in minion cache bkdir = os.path.join(bkroot, src_dir) if not os.path.isdir(bkdir): return {} files = {} for fname in [x for x in os.listdir(bkdir) if os.path.isfile(os.path.join(bkdir, x))]: if salt.utils.is_windows(): # ':' is an illegal filesystem path character on Windows strpfmt = '{0}_%a_%b_%d_%H-%M-%S_%f_%Y'.format(basename) else: strpfmt = '{0}_%a_%b_%d_%H:%M:%S_%f_%Y'.format(basename) try: timestamp = datetime.datetime.strptime(fname, strpfmt) except ValueError: # File didn't match the strp format string, so it's not a backup # for this file. Move on to the next one. continue if salt.utils.is_windows(): str_format = '%a %b %d %Y %H-%M-%S.%f' else: str_format = '%a %b %d %Y %H:%M:%S.%f' files.setdefault(timestamp, {})['Backup Time'] = \ timestamp.strftime(str_format) location = os.path.join(bkdir, fname) files[timestamp]['Size'] = os.stat(location).st_size files[timestamp]['Location'] = location return dict(list(zip( list(range(len(files))), [files[x] for x in sorted(files, reverse=True)[:limit]] ))) list_backup = salt.utils.alias_function(list_backups, 'list_backup') def list_backups_dir(path, limit=None): ''' Lists the previous versions of a directory backed up using Salt's :ref:`file state backup <file-state-backups>` system. path The directory on the minion to check for backups limit Limit the number of results to the most recent N backups CLI Example: .. code-block:: bash salt '*' file.list_backups_dir /foo/bar/baz/ ''' path = os.path.expanduser(path) try: limit = int(limit) except TypeError: pass except ValueError: log.error('file.list_backups_dir: \'limit\' value must be numeric') limit = None bkroot = _get_bkroot() parent_dir, basename = os.path.split(path) # Figure out full path of location of backup folder in minion cache bkdir = os.path.join(bkroot, parent_dir[1:]) if not os.path.isdir(bkdir): return {} files = {} f = dict([(i, len(list(n))) for i, n in itertools.groupby([x.split("_")[0] for x in sorted(os.listdir(bkdir))])]) ff = os.listdir(bkdir) for i, n in six.iteritems(f): ssfile = {} for x in sorted(ff): basename = x.split('_')[0] if i == basename: strpfmt = '{0}_%a_%b_%d_%H:%M:%S_%f_%Y'.format(basename) try: timestamp = datetime.datetime.strptime(x, strpfmt) except ValueError: # Folder didn't match the strp format string, so it's not a backup # for this folder. Move on to the next one. continue ssfile.setdefault(timestamp, {})['Backup Time'] = \ timestamp.strftime('%a %b %d %Y %H:%M:%S.%f') location = os.path.join(bkdir, x) ssfile[timestamp]['Size'] = os.stat(location).st_size ssfile[timestamp]['Location'] = location sfiles = dict(list(zip(list(range(n)), [ssfile[x] for x in sorted(ssfile, reverse=True)[:limit]]))) sefiles = {i: sfiles} files.update(sefiles) return files def restore_backup(path, backup_id): ''' .. versionadded:: 0.17.0 Restore a previous version of a file that was backed up using Salt's :ref:`file state backup <file-state-backups>` system. path The path on the minion to check for backups backup_id The numeric id for the backup you wish to restore, as found using :mod:`file.list_backups <salt.modules.file.list_backups>` CLI Example: .. code-block:: bash salt '*' file.restore_backup /foo/bar/baz.txt 0 ''' path = os.path.expanduser(path) # Note: This only supports minion backups, so this function will need to be # modified if/when master backups are implemented. ret = {'result': False, 'comment': 'Invalid backup_id \'{0}\''.format(backup_id)} try: if len(str(backup_id)) == len(str(int(backup_id))): backup = list_backups(path)[int(backup_id)] else: return ret except ValueError: return ret except KeyError: ret['comment'] = 'backup_id \'{0}\' does not exist for ' \ '{1}'.format(backup_id, path) return ret salt.utils.backup_minion(path, _get_bkroot()) try: shutil.copyfile(backup['Location'], path) except IOError as exc: ret['comment'] = \ 'Unable to restore {0} to {1}: ' \ '{2}'.format(backup['Location'], path, exc) return ret else: ret['result'] = True ret['comment'] = 'Successfully restored {0} to ' \ '{1}'.format(backup['Location'], path) # Try to set proper ownership if not salt.utils.is_windows(): try: fstat = os.stat(path) except (OSError, IOError): ret['comment'] += ', but was unable to set ownership' else: os.chown(path, fstat.st_uid, fstat.st_gid) return ret def delete_backup(path, backup_id): ''' .. versionadded:: 0.17.0 Delete a previous version of a file that was backed up using Salt's :ref:`file state backup <file-state-backups>` system. path The path on the minion to check for backups backup_id The numeric id for the backup you wish to delete, as found using :mod:`file.list_backups <salt.modules.file.list_backups>` CLI Example: .. code-block:: bash salt '*' file.delete_backup /var/cache/salt/minion/file_backup/home/foo/bar/baz.txt 0 ''' path = os.path.expanduser(path) ret = {'result': False, 'comment': 'Invalid backup_id \'{0}\''.format(backup_id)} try: if len(str(backup_id)) == len(str(int(backup_id))): backup = list_backups(path)[int(backup_id)] else: return ret except ValueError: return ret except KeyError: ret['comment'] = 'backup_id \'{0}\' does not exist for ' \ '{1}'.format(backup_id, path) return ret try: os.remove(backup['Location']) except IOError as exc: ret['comment'] = 'Unable to remove {0}: {1}'.format(backup['Location'], exc) else: ret['result'] = True ret['comment'] = 'Successfully removed {0}'.format(backup['Location']) return ret remove_backup = salt.utils.alias_function(delete_backup, 'remove_backup') def grep(path, pattern, *opts): ''' Grep for a string in the specified file .. note:: This function's return value is slated for refinement in future versions of Salt path Path to the file to be searched .. note:: Globbing is supported (i.e. ``/var/log/foo/*.log``, but if globbing is being used then the path should be quoted to keep the shell from attempting to expand the glob expression. pattern Pattern to match. For example: ``test``, or ``a[0-5]`` opts Additional command-line flags to pass to the grep command. For example: ``-v``, or ``-i -B2`` .. note:: The options should come after a double-dash (as shown in the examples below) to keep Salt's own argument parser from interpreting them. CLI Example: .. code-block:: bash salt '*' file.grep /etc/passwd nobody salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i -B2 salt '*' file.grep "/etc/sysconfig/network-scripts/*" ipaddr -- -i -l ''' path = os.path.expanduser(path) split_opts = [] for opt in opts: try: split = salt.utils.shlex_split(opt) except AttributeError: split = salt.utils.shlex_split(str(opt)) if len(split) > 1: raise SaltInvocationError( 'Passing multiple command line arguments in a single string ' 'is not supported, please pass the following arguments ' 'separately: {0}'.format(opt) ) split_opts.extend(split) cmd = ['grep'] + split_opts + [pattern, path] try: ret = __salt__['cmd.run_all'](cmd, python_shell=False) except (IOError, OSError) as exc: raise CommandExecutionError(exc.strerror) return ret def open_files(by_pid=False): ''' Return a list of all physical open files on the system. CLI Examples: .. code-block:: bash salt '*' file.open_files salt '*' file.open_files by_pid=True ''' # First we collect valid PIDs pids = {} procfs = os.listdir('/proc/') for pfile in procfs: try: pids[int(pfile)] = [] except ValueError: # Not a valid PID, move on pass # Then we look at the open files for each PID files = {} for pid in pids: ppath = '/proc/{0}'.format(pid) try: tids = os.listdir('{0}/task'.format(ppath)) except OSError: continue # Collect the names of all of the file descriptors fd_ = [] #try: # fd_.append(os.path.realpath('{0}/task/{1}exe'.format(ppath, tid))) #except: # pass for fpath in os.listdir('{0}/fd'.format(ppath)): fd_.append('{0}/fd/{1}'.format(ppath, fpath)) for tid in tids: try: fd_.append( os.path.realpath('{0}/task/{1}/exe'.format(ppath, tid)) ) except OSError: continue for tpath in os.listdir('{0}/task/{1}/fd'.format(ppath, tid)): fd_.append('{0}/task/{1}/fd/{2}'.format(ppath, tid, tpath)) fd_ = sorted(set(fd_)) # Loop through file descriptors and return useful data for each file for fdpath in fd_: # Sometimes PIDs and TIDs disappear before we can query them try: name = os.path.realpath(fdpath) # Running stat on the file cuts out all of the sockets and # deleted files from the list os.stat(name) except OSError: continue if name not in files: files[name] = [pid] else: # We still want to know which PIDs are using each file files[name].append(pid) files[name] = sorted(set(files[name])) pids[pid].append(name) pids[pid] = sorted(set(pids[pid])) if by_pid: return pids return files def pardir(): ''' Return the relative parent directory path symbol for underlying OS .. versionadded:: 2014.7.0 This can be useful when constructing Salt Formulas. .. code-block:: jinja {% set pardir = salt['file.pardir']() %} {% set final_path = salt['file.join']('subdir', pardir, 'confdir') %} CLI Example: .. code-block:: bash salt '*' file.pardir ''' return os.path.pardir def normpath(path): ''' Returns Normalize path, eliminating double slashes, etc. .. versionadded:: 2015.5.0 This can be useful at the CLI but is frequently useful when scripting. .. code-block:: jinja {%- from salt['file.normpath'](tpldir + '/../vars.jinja') import parent_vars %} CLI Example: .. code-block:: bash salt '*' file.normpath 'a/b/c/..' ''' return os.path.normpath(path) def basename(path): ''' Returns the final component of a pathname .. versionadded:: 2015.5.0 This can be useful at the CLI but is frequently useful when scripting. .. code-block:: jinja {%- set filename = salt['file.basename'](source_file) %} CLI Example: .. code-block:: bash salt '*' file.basename 'test/test.config' ''' return os.path.basename(path) def dirname(path): ''' Returns the directory component of a pathname .. versionadded:: 2015.5.0 This can be useful at the CLI but is frequently useful when scripting. .. code-block:: jinja {%- from salt['file.dirname'](tpldir) + '/vars.jinja' import parent_vars %} CLI Example: .. code-block:: bash salt '*' file.dirname 'test/path/filename.config' ''' return os.path.dirname(path) def join(*args): ''' Return a normalized file system path for the underlying OS .. versionadded:: 2014.7.0 This can be useful at the CLI but is frequently useful when scripting combining path variables: .. code-block:: jinja {% set www_root = '/var' %} {% set app_dir = 'myapp' %} myapp_config: file: - managed - name: {{ salt['file.join'](www_root, app_dir, 'config.yaml') }} CLI Example: .. code-block:: bash salt '*' file.join '/' 'usr' 'local' 'bin' ''' return os.path.join(*args) def move(src, dst): ''' Move a file or directory CLI Example: .. code-block:: bash salt '*' file.move /path/to/src /path/to/dst ''' src = os.path.expanduser(src) dst = os.path.expanduser(dst) if not os.path.isabs(src): raise SaltInvocationError('Source path must be absolute.') if not os.path.isabs(dst): raise SaltInvocationError('Destination path must be absolute.') ret = { 'result': True, 'comment': "'{0}' moved to '{1}'".format(src, dst), } try: shutil.move(src, dst) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to move '{0}' to '{1}': {2}".format(src, dst, exc) ) return ret def diskusage(path): ''' Recursively calculate disk usage of path and return it in bytes CLI Example: .. code-block:: bash salt '*' file.diskusage /path/to/check ''' total_size = 0 seen = set() if os.path.isfile(path): stat_structure = os.stat(path) ret = stat_structure.st_size return ret for dirpath, dirnames, filenames in os.walk(path): for f in filenames: fp = os.path.join(dirpath, f) try: stat_structure = os.stat(fp) except OSError: continue if stat_structure.st_ino in seen: continue seen.add(stat_structure.st_ino) total_size += stat_structure.st_size ret = total_size return ret
./CrossVul/dataset_final_sorted/CWE-200/py/bad_3325_2
crossvul-python_data_good_610_0
import warnings import six from django.http import HttpResponse from django.utils.crypto import constant_time_compare from django.utils.decorators import method_decorator from django.views.decorators.csrf import csrf_exempt from django.views.generic import View from ..exceptions import AnymailInsecureWebhookWarning, AnymailWebhookValidationFailure from ..utils import get_anymail_setting, collect_all_methods, get_request_basic_auth class AnymailBasicAuthMixin(object): """Implements webhook basic auth as mixin to AnymailBaseWebhookView.""" # Whether to warn if basic auth is not configured. # For most ESPs, basic auth is the only webhook security, # so the default is True. Subclasses can set False if # they enforce other security (like signed webhooks). warn_if_no_basic_auth = True # List of allowable HTTP basic-auth 'user:pass' strings. basic_auth = None # (Declaring class attr allows override by kwargs in View.as_view.) def __init__(self, **kwargs): self.basic_auth = get_anymail_setting('webhook_authorization', default=[], kwargs=kwargs) # no esp_name -- auth is shared between ESPs # Allow a single string: if isinstance(self.basic_auth, six.string_types): self.basic_auth = [self.basic_auth] if self.warn_if_no_basic_auth and len(self.basic_auth) < 1: warnings.warn( "Your Anymail webhooks are insecure and open to anyone on the web. " "You should set WEBHOOK_AUTHORIZATION in your ANYMAIL settings. " "See 'Securing webhooks' in the Anymail docs.", AnymailInsecureWebhookWarning) # noinspection PyArgumentList super(AnymailBasicAuthMixin, self).__init__(**kwargs) def validate_request(self, request): """If configured for webhook basic auth, validate request has correct auth.""" if self.basic_auth: request_auth = get_request_basic_auth(request) # Use constant_time_compare to avoid timing attack on basic auth. (It's OK that any() # can terminate early: we're not trying to protect how many auth strings are allowed, # just the contents of each individual auth string.) auth_ok = any(constant_time_compare(request_auth, allowed_auth) for allowed_auth in self.basic_auth) if not auth_ok: # noinspection PyUnresolvedReferences raise AnymailWebhookValidationFailure( "Missing or invalid basic auth in Anymail %s webhook" % self.esp_name) # Mixin note: Django's View.__init__ doesn't cooperate with chaining, # so all mixins that need __init__ must appear before View in MRO. class AnymailBaseWebhookView(AnymailBasicAuthMixin, View): """Base view for processing ESP event webhooks ESP-specific implementations should subclass and implement parse_events. They may also want to implement validate_request if additional security is available. """ def __init__(self, **kwargs): super(AnymailBaseWebhookView, self).__init__(**kwargs) self.validators = collect_all_methods(self.__class__, 'validate_request') # Subclass implementation: # Where to send events: either ..signals.inbound or ..signals.tracking signal = None def validate_request(self, request): """Check validity of webhook post, or raise AnymailWebhookValidationFailure. AnymailBaseWebhookView includes basic auth validation. Subclasses can implement (or provide via mixins) if the ESP supports additional validation (such as signature checking). *All* definitions of this method in the class chain (including mixins) will be called. There is no need to chain to the superclass. (See self.run_validators and collect_all_methods.) Security note: use django.utils.crypto.constant_time_compare for string comparisons, to avoid exposing your validation to a timing attack. """ # if not constant_time_compare(request.POST['signature'], expected_signature): # raise AnymailWebhookValidationFailure("...message...") # (else just do nothing) pass def parse_events(self, request): """Return a list of normalized AnymailWebhookEvent extracted from ESP post data. Subclasses must implement. """ raise NotImplementedError() # HTTP handlers (subclasses shouldn't need to override): http_method_names = ["post", "head", "options"] @method_decorator(csrf_exempt) def dispatch(self, request, *args, **kwargs): return super(AnymailBaseWebhookView, self).dispatch(request, *args, **kwargs) def head(self, request, *args, **kwargs): # Some ESPs verify the webhook with a HEAD request at configuration time return HttpResponse() def post(self, request, *args, **kwargs): # Normal Django exception handling will do the right thing: # - AnymailWebhookValidationFailure will turn into an HTTP 400 response # (via Django SuspiciousOperation handling) # - Any other errors (e.g., in signal dispatch) will turn into HTTP 500 # responses (via normal Django error handling). ESPs generally # treat that as "try again later". self.run_validators(request) events = self.parse_events(request) esp_name = self.esp_name for event in events: self.signal.send(sender=self.__class__, event=event, esp_name=esp_name) return HttpResponse() # Request validation (subclasses shouldn't need to override): def run_validators(self, request): for validator in self.validators: validator(self, request) @property def esp_name(self): """ Read-only name of the ESP for this webhook view. Subclasses must override with class attr. E.g.: esp_name = "Postmark" esp_name = "SendGrid" # (use ESP's preferred capitalization) """ raise NotImplementedError("%s.%s must declare esp_name class attr" % (self.__class__.__module__, self.__class__.__name__))
./CrossVul/dataset_final_sorted/CWE-200/py/good_610_0
crossvul-python_data_bad_610_0
import warnings import six from django.http import HttpResponse from django.utils.decorators import method_decorator from django.views.decorators.csrf import csrf_exempt from django.views.generic import View from ..exceptions import AnymailInsecureWebhookWarning, AnymailWebhookValidationFailure from ..utils import get_anymail_setting, collect_all_methods, get_request_basic_auth class AnymailBasicAuthMixin(object): """Implements webhook basic auth as mixin to AnymailBaseWebhookView.""" # Whether to warn if basic auth is not configured. # For most ESPs, basic auth is the only webhook security, # so the default is True. Subclasses can set False if # they enforce other security (like signed webhooks). warn_if_no_basic_auth = True # List of allowable HTTP basic-auth 'user:pass' strings. basic_auth = None # (Declaring class attr allows override by kwargs in View.as_view.) def __init__(self, **kwargs): self.basic_auth = get_anymail_setting('webhook_authorization', default=[], kwargs=kwargs) # no esp_name -- auth is shared between ESPs # Allow a single string: if isinstance(self.basic_auth, six.string_types): self.basic_auth = [self.basic_auth] if self.warn_if_no_basic_auth and len(self.basic_auth) < 1: warnings.warn( "Your Anymail webhooks are insecure and open to anyone on the web. " "You should set WEBHOOK_AUTHORIZATION in your ANYMAIL settings. " "See 'Securing webhooks' in the Anymail docs.", AnymailInsecureWebhookWarning) # noinspection PyArgumentList super(AnymailBasicAuthMixin, self).__init__(**kwargs) def validate_request(self, request): """If configured for webhook basic auth, validate request has correct auth.""" if self.basic_auth: basic_auth = get_request_basic_auth(request) if basic_auth is None or basic_auth not in self.basic_auth: # noinspection PyUnresolvedReferences raise AnymailWebhookValidationFailure( "Missing or invalid basic auth in Anymail %s webhook" % self.esp_name) # Mixin note: Django's View.__init__ doesn't cooperate with chaining, # so all mixins that need __init__ must appear before View in MRO. class AnymailBaseWebhookView(AnymailBasicAuthMixin, View): """Base view for processing ESP event webhooks ESP-specific implementations should subclass and implement parse_events. They may also want to implement validate_request if additional security is available. """ def __init__(self, **kwargs): super(AnymailBaseWebhookView, self).__init__(**kwargs) self.validators = collect_all_methods(self.__class__, 'validate_request') # Subclass implementation: # Where to send events: either ..signals.inbound or ..signals.tracking signal = None def validate_request(self, request): """Check validity of webhook post, or raise AnymailWebhookValidationFailure. AnymailBaseWebhookView includes basic auth validation. Subclasses can implement (or provide via mixins) if the ESP supports additional validation (such as signature checking). *All* definitions of this method in the class chain (including mixins) will be called. There is no need to chain to the superclass. (See self.run_validators and collect_all_methods.) """ # if request.POST['signature'] != expected_signature: # raise AnymailWebhookValidationFailure("...message...") # (else just do nothing) pass def parse_events(self, request): """Return a list of normalized AnymailWebhookEvent extracted from ESP post data. Subclasses must implement. """ raise NotImplementedError() # HTTP handlers (subclasses shouldn't need to override): http_method_names = ["post", "head", "options"] @method_decorator(csrf_exempt) def dispatch(self, request, *args, **kwargs): return super(AnymailBaseWebhookView, self).dispatch(request, *args, **kwargs) def head(self, request, *args, **kwargs): # Some ESPs verify the webhook with a HEAD request at configuration time return HttpResponse() def post(self, request, *args, **kwargs): # Normal Django exception handling will do the right thing: # - AnymailWebhookValidationFailure will turn into an HTTP 400 response # (via Django SuspiciousOperation handling) # - Any other errors (e.g., in signal dispatch) will turn into HTTP 500 # responses (via normal Django error handling). ESPs generally # treat that as "try again later". self.run_validators(request) events = self.parse_events(request) esp_name = self.esp_name for event in events: self.signal.send(sender=self.__class__, event=event, esp_name=esp_name) return HttpResponse() # Request validation (subclasses shouldn't need to override): def run_validators(self, request): for validator in self.validators: validator(self, request) @property def esp_name(self): """ Read-only name of the ESP for this webhook view. Subclasses must override with class attr. E.g.: esp_name = "Postmark" esp_name = "SendGrid" # (use ESP's preferred capitalization) """ raise NotImplementedError("%s.%s must declare esp_name class attr" % (self.__class__.__module__, self.__class__.__name__))
./CrossVul/dataset_final_sorted/CWE-200/py/bad_610_0
crossvul-python_data_bad_3325_1
# -*- coding: utf-8 -*- ''' Minion side functions for salt-cp ''' # Import python libs from __future__ import absolute_import import os import logging import fnmatch # Import salt libs import salt.minion import salt.fileclient import salt.utils import salt.utils.url import salt.crypt import salt.transport from salt.exceptions import CommandExecutionError from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=import-error,no-name-in-module # Import 3rd-party libs import salt.ext.six as six log = logging.getLogger(__name__) __proxyenabled__ = ['*'] def _auth(): ''' Return the auth object ''' if 'auth' not in __context__: __context__['auth'] = salt.crypt.SAuth(__opts__) return __context__['auth'] def _gather_pillar(pillarenv, pillar_override): ''' Whenever a state run starts, gather the pillar data fresh ''' pillar = salt.pillar.get_pillar( __opts__, __grains__, __opts__['id'], __opts__['environment'], pillar=pillar_override, pillarenv=pillarenv ) ret = pillar.compile_pillar() if pillar_override and isinstance(pillar_override, dict): ret.update(pillar_override) return ret def recv(files, dest): ''' Used with salt-cp, pass the files dict, and the destination. This function receives small fast copy files from the master via salt-cp. It does not work via the CLI. ''' ret = {} for path, data in six.iteritems(files): if os.path.basename(path) == os.path.basename(dest) \ and not os.path.isdir(dest): final = dest elif os.path.isdir(dest): final = os.path.join(dest, os.path.basename(path)) elif os.path.isdir(os.path.dirname(dest)): final = dest else: return 'Destination unavailable' try: with salt.utils.fopen(final, 'w+') as fp_: fp_.write(data) ret[final] = True except IOError: ret[final] = False return ret def _mk_client(): ''' Create a file client and add it to the context. Each file client needs to correspond to a unique copy of the opts dictionary, therefore it's hashed by the id of the __opts__ dict ''' if 'cp.fileclient_{0}'.format(id(__opts__)) not in __context__: __context__['cp.fileclient_{0}'.format(id(__opts__))] = \ salt.fileclient.get_file_client(__opts__) def _client(): ''' Return a client, hashed by the list of masters ''' _mk_client() return __context__['cp.fileclient_{0}'.format(id(__opts__))] def _render_filenames(path, dest, saltenv, template, **kw): ''' Process markup in the :param:`path` and :param:`dest` variables (NOT the files under the paths they ultimately point to) according to the markup format provided by :param:`template`. ''' if not template: return (path, dest) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( 'Attempted to render file paths with unavailable engine ' '{0}'.format(template) ) kwargs = {} kwargs['salt'] = __salt__ if 'pillarenv' in kw or 'pillar' in kw: pillarenv = kw.get('pillarenv', __opts__.get('pillarenv')) kwargs['pillar'] = _gather_pillar(pillarenv, kw.get('pillar')) else: kwargs['pillar'] = __pillar__ kwargs['grains'] = __grains__ kwargs['opts'] = __opts__ kwargs['saltenv'] = saltenv def _render(contents): ''' Render :param:`contents` into a literal pathname by writing it to a temp file, rendering that file, and returning the result. ''' # write out path to temp file tmp_path_fn = salt.utils.mkstemp() with salt.utils.fopen(tmp_path_fn, 'w+') as fp_: fp_.write(contents) data = salt.utils.templates.TEMPLATE_REGISTRY[template]( tmp_path_fn, to_str=True, **kwargs ) salt.utils.safe_rm(tmp_path_fn) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: {0}'.format( data['data'] ) ) else: return data['data'] path = _render(path) dest = _render(dest) return (path, dest) def get_file(path, dest, saltenv='base', makedirs=False, template=None, gzip=None, **kwargs): ''' Used to get a single file from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_file salt://path/to/file /minion/dest Template rendering can be enabled on both the source and destination file names like so: .. code-block:: bash salt '*' cp.get_file "salt://{{grains.os}}/vimrc" /etc/vimrc template=jinja This example would instruct all Salt minions to download the vimrc from a directory with the same name as their os grain and copy it to /etc/vimrc For larger files, the cp.get_file module also supports gzip compression. Because gzip is CPU-intensive, this should only be used in scenarios where the compression ratio is very high (e.g. pretty-printed JSON or YAML files). Use the *gzip* named argument to enable it. Valid values are 1..9, where 1 is the lightest compression and 9 the heaviest. 1 uses the least CPU on the master (and minion), 9 uses the most. There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to retrieve the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.get_file salt://foo/bar.conf /etc/foo/bar.conf saltenv=config salt '*' cp.get_file salt://foo/bar.conf?saltenv=config /etc/foo/bar.conf .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv if not hash_file(path, saltenv): return '' else: return _client().get_file( path, dest, makedirs, saltenv, gzip) def get_template(path, dest, template='jinja', saltenv='base', makedirs=False, **kwargs): ''' Render a file as a template before setting it down. Warning, order is not the same as in fileclient.cp for non breaking old API. CLI Example: .. code-block:: bash salt '*' cp.get_template salt://path/to/template /minion/dest ''' if 'salt' not in kwargs: kwargs['salt'] = __salt__ if 'pillar' not in kwargs: kwargs['pillar'] = __pillar__ if 'grains' not in kwargs: kwargs['grains'] = __grains__ if 'opts' not in kwargs: kwargs['opts'] = __opts__ return _client().get_template( path, dest, template, makedirs, saltenv, **kwargs) def get_dir(path, dest, saltenv='base', template=None, gzip=None, **kwargs): ''' Used to recursively copy a directory from the salt master CLI Example: .. code-block:: bash salt '*' cp.get_dir salt://path/to/dir/ /minion/dest get_dir supports the same template and gzip arguments as get_file. ''' (path, dest) = _render_filenames(path, dest, saltenv, template, **kwargs) return _client().get_dir(path, dest, saltenv, gzip) def get_url(path, dest='', saltenv='base', makedirs=False): ''' Used to get a single file from a URL. path A URL to download a file from. Supported URL schemes are: ``salt://``, ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and ``file://`` (local filesystem). If no scheme was specified, this is equivalent of using ``file://``. If a ``file://`` URL is given, the function just returns absolute path to that file on a local filesystem. The function returns ``False`` if Salt was unable to fetch a file from a ``salt://`` URL. dest The default behaviour is to write the fetched file to the given destination path. If this parameter is omitted or set as empty string (``''``), the function places the remote file on the local filesystem inside the Minion cache directory and returns the path to that file. .. note:: To simply return the file contents instead, set destination to ``None``. This works with ``salt://``, ``http://``, ``https://`` and ``file://`` URLs. The files fetched by ``http://`` and ``https://`` will not be cached. saltenv : base Salt fileserver envrionment from which to retrieve the file. Ignored if ``path`` is not a ``salt://`` URL. CLI Example: .. code-block:: bash salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine salt '*' cp.get_url http://www.slashdot.org /tmp/index.html ''' if isinstance(dest, six.string_types): result = _client().get_url(path, dest, makedirs, saltenv) else: result = _client().get_url(path, None, makedirs, saltenv, no_cache=True) if not result: log.error( 'Unable to fetch file {0} from saltenv {1}.'.format( path, saltenv ) ) return result def get_file_str(path, saltenv='base'): ''' Download a file from a URL to the Minion cache directory and return the contents of that file Returns ``False`` if Salt was unable to cache a file from a URL. CLI Example: .. code-block:: bash salt '*' cp.get_file_str salt://my/file ''' fn_ = cache_file(path, saltenv) if isinstance(fn_, six.string_types): with salt.utils.fopen(fn_, 'r') as fp_: data = fp_.read() return data return fn_ def cache_file(path, saltenv='base'): ''' Used to cache a single file on the Minion Returns the location of the new cached file on the Minion. CLI Example: .. code-block:: bash salt '*' cp.cache_file salt://path/to/file There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the file. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_file salt://foo/bar.conf saltenv=config salt '*' cp.cache_file salt://foo/bar.conf?saltenv=config If the path being cached is a ``salt://`` URI, and the path does not exist, then ``False`` will be returned. .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' contextkey = '{0}_|-{1}_|-{2}'.format('cp.cache_file', path, saltenv) path_is_remote = _urlparse(path).scheme in ('http', 'https', 'ftp') try: if path_is_remote and contextkey in __context__: # Prevent multiple caches in the same salt run. Affects remote URLs # since the master won't know their hash, so the fileclient # wouldn't be able to prevent multiple caches if we try to cache # the remote URL more than once. if os.path.isfile(__context__[contextkey]): return __context__[contextkey] else: # File is in __context__ but no longer exists in the minion # cache, get rid of the context key and re-cache below. # Accounts for corner case where file is removed from minion # cache between cp.cache_file calls in the same salt-run. __context__.pop(contextkey) except AttributeError: pass path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv result = _client().cache_file(path, saltenv) if not result: log.error( 'Unable to cache file \'{0}\' from saltenv \'{1}\'.'.format( path, saltenv ) ) if path_is_remote: # Cache was successful, store the result in __context__ to prevent # multiple caches (see above). __context__[contextkey] = result return result def cache_files(paths, saltenv='base'): ''' Used to gather many files from the Master, the gathered files will be saved in the minion cachedir reflective to the paths retrieved from the Master CLI Example: .. code-block:: bash salt '*' cp.cache_files salt://pathto/file1,salt://pathto/file1 There are two ways of defining the fileserver environment (a.k.a. ``saltenv``) from which to cache the files. One is to use the ``saltenv`` parameter, and the other is to use a querystring syntax in the ``salt://`` URL. The below two examples are equivalent: .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf,salt://foo/baz.conf saltenv=config salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config,salt://foo/baz.conf?saltenv=config The querystring method is less useful when all files are being cached from the same environment, but is a good way of caching files from multiple different environments in the same command. For example, the below command will cache the first file from the ``config1`` environment, and the second one from the ``config2`` environment. .. code-block:: bash salt '*' cp.cache_files salt://foo/bar.conf?saltenv=config1,salt://foo/bar.conf?saltenv=config2 .. note:: It may be necessary to quote the URL when using the querystring method, depending on the shell being used to run the command. ''' return _client().cache_files(paths, saltenv) def cache_dir(path, saltenv='base', include_empty=False, include_pat=None, exclude_pat=None): ''' Download and cache everything under a directory from the master include_pat : None Glob or regex to narrow down the files cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. versionadded:: 2014.7.0 exclude_pat : None Glob or regex to exclude certain files from being cached from the given path. If matching with a regex, the regex must be prefixed with ``E@``, otherwise the expression will be interpreted as a glob. .. note:: If used with ``include_pat``, files matching this pattern will be excluded from the subset of files defined by ``include_pat``. .. versionadded:: 2014.7.0 CLI Examples: .. code-block:: bash salt '*' cp.cache_dir salt://path/to/dir salt '*' cp.cache_dir salt://path/to/dir include_pat='E@*.py$' ''' return _client().cache_dir( path, saltenv, include_empty, include_pat, exclude_pat ) def cache_master(saltenv='base'): ''' Retrieve all of the files on the master and cache them locally CLI Example: .. code-block:: bash salt '*' cp.cache_master ''' return _client().cache_master(saltenv) def cache_local_file(path): ''' Cache a local file on the minion in the localfiles cache CLI Example: .. code-block:: bash salt '*' cp.cache_local_file /etc/hosts ''' if not os.path.exists(path): return '' path_cached = is_cached(path) # If the file has already been cached, return the path if path_cached: path_hash = hash_file(path) path_cached_hash = hash_file(path_cached) if path_hash['hsum'] == path_cached_hash['hsum']: return path_cached # The file hasn't been cached or has changed; cache it return _client().cache_local_file(path) def list_states(saltenv='base'): ''' List all of the available state modules in an environment CLI Example: .. code-block:: bash salt '*' cp.list_states ''' return _client().list_states(saltenv) def list_master(saltenv='base', prefix=''): ''' List all of the files stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master ''' return _client().file_list(saltenv, prefix) def list_master_dirs(saltenv='base', prefix=''): ''' List all of the directories stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_dirs ''' return _client().dir_list(saltenv, prefix) def list_master_symlinks(saltenv='base', prefix=''): ''' List all of the symlinks stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_symlinks ''' return _client().symlink_list(saltenv, prefix) def list_minion(saltenv='base'): ''' List all of the files cached on the minion CLI Example: .. code-block:: bash salt '*' cp.list_minion ''' return _client().file_local_list(saltenv) def is_cached(path, saltenv='base'): ''' Return a boolean if the given path on the master has been cached on the minion CLI Example: .. code-block:: bash salt '*' cp.is_cached salt://path/to/file ''' return _client().is_cached(path, saltenv) def hash_file(path, saltenv='base'): ''' Return the hash of a file, to get the hash of a file on the salt master file server prepend the path with salt://<file on server> otherwise, prepend the file with / for a local file. CLI Example: .. code-block:: bash salt '*' cp.hash_file salt://path/to/file ''' path, senv = salt.utils.url.split_env(path) if senv: saltenv = senv return _client().hash_file(path, saltenv) def push(path, keep_symlinks=False, upload_path=None, remove_source=False): ''' WARNING Files pushed to the master will have global read permissions.. Push a file from the minion up to the master, the file will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``) Since this feature allows a minion to push a file up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. keep_symlinks Keep the path value without resolving its canonical form upload_path Provide a different path inside the master's minion files cachedir remove_source Remove the source file on the minion .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' cp.push /etc/fstab salt '*' cp.push /etc/system-release keep_symlinks=True salt '*' cp.push /etc/fstab upload_path='/new/path/fstab' salt '*' cp.push /tmp/filename remove_source=True ''' log.debug('Trying to copy \'{0}\' to master'.format(path)) if '../' in path or not os.path.isabs(path): log.debug('Path must be absolute, returning False') return False if not keep_symlinks: path = os.path.realpath(path) if not os.path.isfile(path): log.debug('Path failed os.path.isfile check, returning False') return False auth = _auth() if upload_path: if '../' in upload_path: log.debug('Path must be absolute, returning False') log.debug('Bad path: {0}'.format(upload_path)) return False load_path = upload_path.lstrip(os.sep) else: load_path = path.lstrip(os.sep) # Normalize the path. This does not eliminate # the possibility that relative entries will still be present load_path_normal = os.path.normpath(load_path) # If this is Windows and a drive letter is present, remove it load_path_split_drive = os.path.splitdrive(load_path_normal)[1] # Finally, split the remaining path into a list for delivery to the master load_path_list = [_f for _f in load_path_split_drive.split(os.sep) if _f] load = {'cmd': '_file_recv', 'id': __opts__['id'], 'path': load_path_list, 'tok': auth.gen_token('salt')} channel = salt.transport.Channel.factory(__opts__) with salt.utils.fopen(path, 'rb') as fp_: init_send = False while True: load['loc'] = fp_.tell() load['data'] = fp_.read(__opts__['file_buffer_size']) if not load['data'] and init_send: if remove_source: try: salt.utils.rm_rf(path) log.debug('Removing source file \'{0}\''.format(path)) except IOError: log.error('cp.push failed to remove file \ \'{0}\''.format(path)) return False return True ret = channel.send(load) if not ret: log.error('cp.push Failed transfer failed. Ensure master has ' '\'file_recv\' set to \'True\' and that the file ' 'is not larger than the \'file_recv_size_max\' ' 'setting on the master.') return ret init_send = True def push_dir(path, glob=None, upload_path=None): ''' Push a directory from the minion up to the master, the files will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``). It also has a glob for matching specific files using globbing. .. versionadded:: 2014.7.0 Since this feature allows a minion to push files up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. upload_path Provide a different path and directory name inside the master's minion files cachedir CLI Example: .. code-block:: bash salt '*' cp.push /usr/lib/mysql salt '*' cp.push /usr/lib/mysql upload_path='/newmysql/path' salt '*' cp.push_dir /etc/modprobe.d/ glob='*.conf' ''' if '../' in path or not os.path.isabs(path): return False tmpupload_path = upload_path path = os.path.realpath(path) if os.path.isfile(path): return push(path, upload_path=upload_path) else: filelist = [] for root, _, files in os.walk(path): filelist += [os.path.join(root, tmpfile) for tmpfile in files] if glob is not None: filelist = [fi for fi in filelist if fnmatch.fnmatch(os.path.basename(fi), glob)] if not filelist: return False for tmpfile in filelist: if upload_path and tmpfile.startswith(path): tmpupload_path = os.path.join(os.path.sep, upload_path.strip(os.path.sep), tmpfile.replace(path, '') .strip(os.path.sep)) ret = push(tmpfile, upload_path=tmpupload_path) if not ret: return ret return True
./CrossVul/dataset_final_sorted/CWE-200/py/bad_3325_1
crossvul-python_data_good_1559_1
import errno import logging import os import uuid import struct import time import base64 import socket from ceph_deploy.cliutil import priority from ceph_deploy import conf, hosts, exc from ceph_deploy.util import arg_validators, ssh, net from ceph_deploy.misc import mon_hosts from ceph_deploy.lib import remoto from ceph_deploy.connection import get_local_connection LOG = logging.getLogger(__name__) def generate_auth_key(): key = os.urandom(16) header = struct.pack( '<hiih', 1, # le16 type: CEPH_CRYPTO_AES int(time.time()), # le32 created: seconds 0, # le32 created: nanoseconds, len(key), # le16: len(key) ) return base64.b64encode(header + key) def ssh_copy_keys(hostname, username=None): LOG.info('making sure passwordless SSH succeeds') if ssh.can_connect_passwordless(hostname): return LOG.warning('could not connect via SSH') # Create the key if it doesn't exist: id_rsa_pub_file = os.path.expanduser(u'~/.ssh/id_rsa.pub') id_rsa_file = id_rsa_pub_file.split('.pub')[0] if not os.path.exists(id_rsa_file): LOG.info('creating a passwordless id_rsa.pub key file') with get_local_connection(LOG) as conn: remoto.process.run( conn, [ 'ssh-keygen', '-t', 'rsa', '-N', "", '-f', id_rsa_file, ] ) # Get the contents of id_rsa.pub and push it to the host LOG.info('will connect again with password prompt') distro = hosts.get(hostname, username, detect_sudo=False) auth_keys_path = '.ssh/authorized_keys' if not distro.conn.remote_module.path_exists(auth_keys_path): distro.conn.logger.warning( '.ssh/authorized_keys does not exist, will skip adding keys' ) else: LOG.info('adding public keys to authorized_keys') with open(os.path.expanduser('~/.ssh/id_rsa.pub'), 'r') as id_rsa: contents = id_rsa.read() distro.conn.remote_module.append_to_file( auth_keys_path, contents ) distro.conn.exit() def validate_host_ip(ips, subnets): """ Make sure that a given host all subnets specified will have at least one IP in that range. """ # Make sure we prune ``None`` arguments subnets = [s for s in subnets if s is not None] validate_one_subnet = len(subnets) == 1 def ip_in_one_subnet(ips, subnet): """ ensure an ip exists in at least one subnet """ for ip in ips: if net.ip_in_subnet(ip, subnet): return True return False for subnet in subnets: if ip_in_one_subnet(ips, subnet): if validate_one_subnet: return else: # keep going to make sure the other subnets are ok continue else: msg = "subnet (%s) is not valid for any of the ips found %s" % (subnet, str(ips)) raise RuntimeError(msg) def get_public_network_ip(ips, public_subnet): """ Given a public subnet, chose the one IP from the remote host that exists within the subnet range. """ for ip in ips: if net.ip_in_subnet(ip, public_subnet): return ip msg = "IPs (%s) are not valid for any of subnet specified %s" % (str(ips), str(public_subnet)) raise RuntimeError(msg) def new(args): if args.ceph_conf: raise RuntimeError('will not create a ceph conf file if attemtping to re-use with `--ceph-conf` flag') LOG.debug('Creating new cluster named %s', args.cluster) cfg = conf.ceph.CephConf() cfg.add_section('global') fsid = args.fsid or uuid.uuid4() cfg.set('global', 'fsid', str(fsid)) # if networks were passed in, lets set them in the # global section if args.public_network: cfg.set('global', 'public network', str(args.public_network)) if args.cluster_network: cfg.set('global', 'cluster network', str(args.cluster_network)) mon_initial_members = [] mon_host = [] for (name, host) in mon_hosts(args.mon): # Try to ensure we can ssh in properly before anything else if args.ssh_copykey: ssh_copy_keys(host, args.username) # Now get the non-local IPs from the remote node distro = hosts.get(host, username=args.username) remote_ips = net.ip_addresses(distro.conn) distro.conn.exit() # Validate subnets if we received any if args.public_network or args.cluster_network: validate_host_ip(remote_ips, [args.public_network, args.cluster_network]) # Pick the IP that matches the public cluster (if we were told to do # so) otherwise pick the first, non-local IP LOG.debug('Resolving host %s', host) if args.public_network: ip = get_public_network_ip(remote_ips, args.public_network) else: ip = net.get_nonlocal_ip(host) LOG.debug('Monitor %s at %s', name, ip) mon_initial_members.append(name) try: socket.inet_pton(socket.AF_INET6, ip) mon_host.append("[" + ip + "]") LOG.info('Monitors are IPv6, binding Messenger traffic on IPv6') cfg.set('global', 'ms bind ipv6', 'true') except socket.error: mon_host.append(ip) LOG.debug('Monitor initial members are %s', mon_initial_members) LOG.debug('Monitor addrs are %s', mon_host) cfg.set('global', 'mon initial members', ', '.join(mon_initial_members)) # no spaces here, see http://tracker.newdream.net/issues/3145 cfg.set('global', 'mon host', ','.join(mon_host)) # override undesirable defaults, needed until bobtail # http://tracker.ceph.com/issues/6788 cfg.set('global', 'auth cluster required', 'cephx') cfg.set('global', 'auth service required', 'cephx') cfg.set('global', 'auth client required', 'cephx') # http://tracker.newdream.net/issues/3138 cfg.set('global', 'filestore xattr use omap', 'true') path = '{name}.conf'.format( name=args.cluster, ) new_mon_keyring(args) LOG.debug('Writing initial config to %s...', path) tmp = '%s.tmp' % path with file(tmp, 'w') as f: cfg.write(f) try: os.rename(tmp, path) except OSError as e: if e.errno == errno.EEXIST: raise exc.ClusterExistsError(path) else: raise def new_mon_keyring(args): LOG.debug('Creating a random mon key...') mon_keyring = '[mon.]\nkey = %s\ncaps mon = allow *\n' % generate_auth_key() keypath = '{name}.mon.keyring'.format( name=args.cluster, ) oldmask = os.umask(077) LOG.debug('Writing monitor keyring to %s...', keypath) try: tmp = '%s.tmp' % keypath with open(tmp, 'w', 0600) as f: f.write(mon_keyring) try: os.rename(tmp, keypath) except OSError as e: if e.errno == errno.EEXIST: raise exc.ClusterExistsError(keypath) else: raise finally: os.umask(oldmask) @priority(10) def make(parser): """ Start deploying a new cluster, and write a CLUSTER.conf and keyring for it. """ parser.add_argument( 'mon', metavar='MON', nargs='+', help='initial monitor hostname, fqdn, or hostname:fqdn pair', type=arg_validators.Hostname(), ) parser.add_argument( '--no-ssh-copykey', dest='ssh_copykey', action='store_false', default=True, help='do not attempt to copy SSH keys', ) parser.add_argument( '--fsid', dest='fsid', help='provide an alternate FSID for ceph.conf generation', ) parser.add_argument( '--cluster-network', help='specify the (internal) cluster network', type=arg_validators.Subnet(), ) parser.add_argument( '--public-network', help='specify the public network for a cluster', type=arg_validators.Subnet(), ) parser.set_defaults( func=new, )
./CrossVul/dataset_final_sorted/CWE-200/py/good_1559_1
crossvul-python_data_bad_4177_1
from rest_framework.status import HTTP_400_BAD_REQUEST from rest_framework.views import APIView from backend.response import FormattedResponse from config import config from backend.permissions import AdminOrAnonymousReadOnly class ConfigView(APIView): throttle_scope = "config" permission_classes = (AdminOrAnonymousReadOnly,) def get(self, request, name=None): if name is None: if request.user.is_staff: return FormattedResponse(config.get_all()) return FormattedResponse(config.get_all_non_sensitive()) return FormattedResponse(config.get(name)) def post(self, request, name): if "value" not in request.data: return FormattedResponse(status=HTTP_400_BAD_REQUEST) config.set(name, request.data.get("value")) return FormattedResponse() def patch(self, request, name): if "value" not in request.data: return FormattedResponse(status=HTTP_400_BAD_REQUEST) if config.get(name) is not None and isinstance(config.get(name), list): config.set("name", config.get(name).append(request.data["value"])) return FormattedResponse() config.set(name, request.data.get("value")) return FormattedResponse()
./CrossVul/dataset_final_sorted/CWE-200/py/bad_4177_1
crossvul-python_data_bad_5622_0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import gettext import os import sys from oslo.config import cfg from keystone.common import logging gettext.install('keystone', unicode=1) _DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s" _DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" _DEFAULT_AUTH_METHODS = ['password', 'token'] COMMON_CLI_OPTS = [ cfg.BoolOpt('debug', short='d', default=False, help='Print debugging output (set logging level to ' 'DEBUG instead of default WARNING level).'), cfg.BoolOpt('verbose', short='v', default=False, help='Print more verbose output (set logging level to ' 'INFO instead of default WARNING level).'), ] LOGGING_CLI_OPTS = [ cfg.StrOpt('log-config', metavar='PATH', help='If this option is specified, the logging configuration ' 'file specified is used and overrides any other logging ' 'options specified. Please see the Python logging module ' 'documentation for details on logging configuration ' 'files.'), cfg.StrOpt('log-format', default=_DEFAULT_LOG_FORMAT, metavar='FORMAT', help='A logging.Formatter log message format string which may ' 'use any of the available logging.LogRecord attributes.'), cfg.StrOpt('log-date-format', default=_DEFAULT_LOG_DATE_FORMAT, metavar='DATE_FORMAT', help='Format string for %%(asctime)s in log records.'), cfg.StrOpt('log-file', metavar='PATH', help='Name of log file to output. ' 'If not set, logging will go to stdout.'), cfg.StrOpt('log-dir', help='The directory in which to store log files. ' '(will be prepended to --log-file)'), cfg.BoolOpt('use-syslog', default=False, help='Use syslog for logging.'), cfg.StrOpt('syslog-log-facility', default='LOG_USER', help='syslog facility to receive log lines.') ] CONF = cfg.CONF def setup_logging(conf): """ Sets up the logging options for a log with supplied name :param conf: a cfg.ConfOpts object """ if conf.log_config: # Use a logging configuration file for all settings... if os.path.exists(conf.log_config): logging.config.fileConfig(conf.log_config) return else: raise RuntimeError(_('Unable to locate specified logging ' 'config file: %s') % conf.log_config) root_logger = logging.root if conf.debug: root_logger.setLevel(logging.DEBUG) elif conf.verbose: root_logger.setLevel(logging.INFO) else: root_logger.setLevel(logging.WARNING) formatter = logging.Formatter(conf.log_format, conf.log_date_format) if conf.use_syslog: try: facility = getattr(logging.SysLogHandler, conf.syslog_log_facility) except AttributeError: raise ValueError(_('Invalid syslog facility')) handler = logging.SysLogHandler(address='/dev/log', facility=facility) elif conf.log_file: logfile = conf.log_file if conf.log_dir: logfile = os.path.join(conf.log_dir, logfile) handler = logging.WatchedFileHandler(logfile) else: handler = logging.StreamHandler(sys.stdout) handler.setFormatter(formatter) root_logger.addHandler(handler) def setup_authentication(): # register any non-default auth methods here (used by extensions, etc) for method_name in CONF.auth.methods: if method_name not in _DEFAULT_AUTH_METHODS: register_str(method_name, group="auth") def register_str(*args, **kw): conf = kw.pop('conf', CONF) group = kw.pop('group', None) return conf.register_opt(cfg.StrOpt(*args, **kw), group=group) def register_cli_str(*args, **kw): conf = kw.pop('conf', CONF) group = kw.pop('group', None) return conf.register_cli_opt(cfg.StrOpt(*args, **kw), group=group) def register_list(*args, **kw): conf = kw.pop('conf', CONF) group = kw.pop('group', None) return conf.register_opt(cfg.ListOpt(*args, **kw), group=group) def register_cli_list(*args, **kw): conf = kw.pop('conf', CONF) group = kw.pop('group', None) return conf.register_cli_opt(cfg.ListOpt(*args, **kw), group=group) def register_bool(*args, **kw): conf = kw.pop('conf', CONF) group = kw.pop('group', None) return conf.register_opt(cfg.BoolOpt(*args, **kw), group=group) def register_cli_bool(*args, **kw): conf = kw.pop('conf', CONF) group = kw.pop('group', None) return conf.register_cli_opt(cfg.BoolOpt(*args, **kw), group=group) def register_int(*args, **kw): conf = kw.pop('conf', CONF) group = kw.pop('group', None) return conf.register_opt(cfg.IntOpt(*args, **kw), group=group) def register_cli_int(*args, **kw): conf = kw.pop('conf', CONF) group = kw.pop('group', None) return conf.register_cli_opt(cfg.IntOpt(*args, **kw), group=group) def configure(): CONF.register_cli_opts(COMMON_CLI_OPTS) CONF.register_cli_opts(LOGGING_CLI_OPTS) register_cli_bool('standard-threads', default=False) register_cli_str('pydev-debug-host', default=None) register_cli_int('pydev-debug-port', default=None) register_str('admin_token', default='ADMIN') register_str('bind_host', default='0.0.0.0') register_int('compute_port', default=8774) register_int('admin_port', default=35357) register_int('public_port', default=5000) register_str( 'public_endpoint', default='http://localhost:%(public_port)d/') register_str('admin_endpoint', default='http://localhost:%(admin_port)d/') register_str('onready') register_str('auth_admin_prefix', default='') register_str('policy_file', default='policy.json') register_str('policy_default_rule', default=None) # default max request size is 112k register_int('max_request_body_size', default=114688) register_int('max_param_size', default=64) # we allow tokens to be a bit larger to accommodate PKI register_int('max_token_size', default=8192) register_str( 'member_role_id', default='9fe2ff9ee4384b1894a90878d3e92bab') register_str('member_role_name', default='_member_') # identity register_str('default_domain_id', group='identity', default='default') # trust register_bool('enabled', group='trust', default=True) # ssl register_bool('enable', group='ssl', default=False) register_str('certfile', group='ssl', default=None) register_str('keyfile', group='ssl', default=None) register_str('ca_certs', group='ssl', default=None) register_bool('cert_required', group='ssl', default=False) # signing register_str( 'token_format', group='signing', default="PKI") register_str( 'certfile', group='signing', default="/etc/keystone/ssl/certs/signing_cert.pem") register_str( 'keyfile', group='signing', default="/etc/keystone/ssl/private/signing_key.pem") register_str( 'ca_certs', group='signing', default="/etc/keystone/ssl/certs/ca.pem") register_int('key_size', group='signing', default=1024) register_int('valid_days', group='signing', default=3650) register_str('ca_password', group='signing', default=None) # sql register_str('connection', group='sql', default='sqlite:///keystone.db') register_int('idle_timeout', group='sql', default=200) register_str( 'driver', group='catalog', default='keystone.catalog.backends.sql.Catalog') register_str( 'driver', group='identity', default='keystone.identity.backends.sql.Identity') register_str( 'driver', group='policy', default='keystone.policy.backends.sql.Policy') register_str( 'driver', group='token', default='keystone.token.backends.kvs.Token') register_str( 'driver', group='trust', default='keystone.trust.backends.sql.Trust') register_str( 'driver', group='ec2', default='keystone.contrib.ec2.backends.kvs.Ec2') register_str( 'driver', group='stats', default='keystone.contrib.stats.backends.kvs.Stats') # ldap register_str('url', group='ldap', default='ldap://localhost') register_str('user', group='ldap', default=None) register_str('password', group='ldap', default=None) register_str('suffix', group='ldap', default='cn=example,cn=com') register_bool('use_dumb_member', group='ldap', default=False) register_str('dumb_member', group='ldap', default='cn=dumb,dc=nonexistent') register_bool('allow_subtree_delete', group='ldap', default=False) register_str('query_scope', group='ldap', default='one') register_int('page_size', group='ldap', default=0) register_str('alias_dereferencing', group='ldap', default='default') register_str('user_tree_dn', group='ldap', default=None) register_str('user_filter', group='ldap', default=None) register_str('user_objectclass', group='ldap', default='inetOrgPerson') register_str('user_id_attribute', group='ldap', default='cn') register_str('user_name_attribute', group='ldap', default='sn') register_str('user_mail_attribute', group='ldap', default='email') register_str('user_pass_attribute', group='ldap', default='userPassword') register_str('user_enabled_attribute', group='ldap', default='enabled') register_str( 'user_domain_id_attribute', group='ldap', default='businessCategory') register_int('user_enabled_mask', group='ldap', default=0) register_str('user_enabled_default', group='ldap', default='True') register_list( 'user_attribute_ignore', group='ldap', default='tenant_id,tenants') register_bool('user_allow_create', group='ldap', default=True) register_bool('user_allow_update', group='ldap', default=True) register_bool('user_allow_delete', group='ldap', default=True) register_bool('user_enabled_emulation', group='ldap', default=False) register_str('user_enabled_emulation_dn', group='ldap', default=None) register_str('tenant_tree_dn', group='ldap', default=None) register_str('tenant_filter', group='ldap', default=None) register_str('tenant_objectclass', group='ldap', default='groupOfNames') register_str('tenant_id_attribute', group='ldap', default='cn') register_str('tenant_member_attribute', group='ldap', default='member') register_str('tenant_name_attribute', group='ldap', default='ou') register_str('tenant_desc_attribute', group='ldap', default='description') register_str('tenant_enabled_attribute', group='ldap', default='enabled') register_str( 'tenant_domain_id_attribute', group='ldap', default='businessCategory') register_list('tenant_attribute_ignore', group='ldap', default='') register_bool('tenant_allow_create', group='ldap', default=True) register_bool('tenant_allow_update', group='ldap', default=True) register_bool('tenant_allow_delete', group='ldap', default=True) register_bool('tenant_enabled_emulation', group='ldap', default=False) register_str('tenant_enabled_emulation_dn', group='ldap', default=None) register_str('role_tree_dn', group='ldap', default=None) register_str('role_filter', group='ldap', default=None) register_str( 'role_objectclass', group='ldap', default='organizationalRole') register_str('role_id_attribute', group='ldap', default='cn') register_str('role_name_attribute', group='ldap', default='ou') register_str('role_member_attribute', group='ldap', default='roleOccupant') register_list('role_attribute_ignore', group='ldap', default='') register_bool('role_allow_create', group='ldap', default=True) register_bool('role_allow_update', group='ldap', default=True) register_bool('role_allow_delete', group='ldap', default=True) register_str('group_tree_dn', group='ldap', default=None) register_str('group_filter', group='ldap', default=None) register_str('group_objectclass', group='ldap', default='groupOfNames') register_str('group_id_attribute', group='ldap', default='cn') register_str('group_name_attribute', group='ldap', default='ou') register_str('group_member_attribute', group='ldap', default='member') register_str('group_desc_attribute', group='ldap', default='description') register_str( 'group_domain_id_attribute', group='ldap', default='businessCategory') register_list('group_attribute_ignore', group='ldap', default='') register_bool('group_allow_create', group='ldap', default=True) register_bool('group_allow_update', group='ldap', default=True) register_bool('group_allow_delete', group='ldap', default=True) register_str('domain_tree_dn', group='ldap', default=None) register_str('domain_filter', group='ldap', default=None) register_str('domain_objectclass', group='ldap', default='groupOfNames') register_str('domain_id_attribute', group='ldap', default='cn') register_str('domain_name_attribute', group='ldap', default='ou') register_str('domain_member_attribute', group='ldap', default='member') register_str('domain_desc_attribute', group='ldap', default='description') register_str('domain_enabled_attribute', group='ldap', default='enabled') register_list('domain_attribute_ignore', group='ldap', default='') register_bool('domain_allow_create', group='ldap', default=True) register_bool('domain_allow_update', group='ldap', default=True) register_bool('domain_allow_delete', group='ldap', default=True) register_bool('domain_enabled_emulation', group='ldap', default=False) register_str('domain_enabled_emulation_dn', group='ldap', default=None) # pam register_str('url', group='pam', default=None) register_str('userid', group='pam', default=None) register_str('password', group='pam', default=None) # default authentication methods register_list('methods', group='auth', default=_DEFAULT_AUTH_METHODS) register_str( 'password', group='auth', default='keystone.auth.plugins.token.Token') register_str( 'token', group='auth', default='keystone.auth.plugins.password.Password') # register any non-default auth methods here (used by extensions, etc) for method_name in CONF.auth.methods: if method_name not in _DEFAULT_AUTH_METHODS: register_str(method_name, group='auth')
./CrossVul/dataset_final_sorted/CWE-200/py/bad_5622_0
crossvul-python_data_bad_3325_3
# -*- coding: utf-8 -*- ''' Operations on regular files, special files, directories, and symlinks ===================================================================== Salt States can aggressively manipulate files on a system. There are a number of ways in which files can be managed. Regular files can be enforced with the :mod:`file.managed <salt.states.file.managed>` state. This state downloads files from the salt master and places them on the target system. Managed files can be rendered as a jinja, mako, or wempy template, adding a dynamic component to file management. An example of :mod:`file.managed <salt.states.file.managed>` which makes use of the jinja templating system would look like this: .. code-block:: yaml /etc/http/conf/http.conf: file.managed: - source: salt://apache/http.conf - user: root - group: root - mode: 644 - template: jinja - defaults: custom_var: "default value" other_var: 123 {% if grains['os'] == 'Ubuntu' %} - context: custom_var: "override" {% endif %} It is also possible to use the :mod:`py renderer <salt.renderers.py>` as a templating option. The template would be a Python script which would need to contain a function called ``run()``, which returns a string. All arguments to the state will be made available to the Python script as globals. The returned string will be the contents of the managed file. For example: .. code-block:: python def run(): lines = ['foo', 'bar', 'baz'] lines.extend([source, name, user, context]) # Arguments as globals return '\\n\\n'.join(lines) .. note:: The ``defaults`` and ``context`` arguments require extra indentation (four spaces instead of the normal two) in order to create a nested dictionary. :ref:`More information <nested-dict-indentation>`. If using a template, any user-defined template variables in the file defined in ``source`` must be passed in using the ``defaults`` and/or ``context`` arguments. The general best practice is to place default values in ``defaults``, with conditional overrides going into ``context``, as seen above. The template will receive a variable ``custom_var``, which would be accessed in the template using ``{{ custom_var }}``. If the operating system is Ubuntu, the value of the variable ``custom_var`` would be *override*, otherwise it is the default *default value* The ``source`` parameter can be specified as a list. If this is done, then the first file to be matched will be the one that is used. This allows you to have a default file on which to fall back if the desired file does not exist on the salt fileserver. Here's an example: .. code-block:: yaml /etc/foo.conf: file.managed: - source: - salt://foo.conf.{{ grains['fqdn'] }} - salt://foo.conf.fallback - user: foo - group: users - mode: 644 - backup: minion .. note:: Salt supports backing up managed files via the backup option. For more details on this functionality please review the :ref:`backup_mode documentation <file-state-backups>`. The ``source`` parameter can also specify a file in another Salt environment. In this example ``foo.conf`` in the ``dev`` environment will be used instead. .. code-block:: yaml /etc/foo.conf: file.managed: - source: - salt://foo.conf?saltenv=dev - user: foo - group: users - mode: '0644' .. warning:: When using a mode that includes a leading zero you must wrap the value in single quotes. If the value is not wrapped in quotes it will be read by YAML as an integer and evaluated as an octal. The ``names`` parameter, which is part of the state compiler, can be used to expand the contents of a single state declaration into multiple, single state declarations. Each item in the ``names`` list receives its own individual state ``name`` and is converted into its own low-data structure. This is a convenient way to manage several files with similar attributes. There is more documentation about this feature in the :ref:`Names declaration<names-declaration>` section of the :ref:`Highstate docs<states-highstate>`. Special files can be managed via the ``mknod`` function. This function will create and enforce the permissions on a special file. The function supports the creation of character devices, block devices, and FIFO pipes. The function will create the directory structure up to the special file if it is needed on the minion. The function will not overwrite or operate on (change major/minor numbers) existing special files with the exception of user, group, and permissions. In most cases the creation of some special files require root permissions on the minion. This would require that the minion to be run as the root user. Here is an example of a character device: .. code-block:: yaml /var/named/chroot/dev/random: file.mknod: - ntype: c - major: 1 - minor: 8 - user: named - group: named - mode: 660 Here is an example of a block device: .. code-block:: yaml /var/named/chroot/dev/loop0: file.mknod: - ntype: b - major: 7 - minor: 0 - user: named - group: named - mode: 660 Here is an example of a fifo pipe: .. code-block:: yaml /var/named/chroot/var/log/logfifo: file.mknod: - ntype: p - user: named - group: named - mode: 660 Directories can be managed via the ``directory`` function. This function can create and enforce the permissions on a directory. A directory statement will look like this: .. code-block:: yaml /srv/stuff/substuf: file.directory: - user: fred - group: users - mode: 755 - makedirs: True If you need to enforce user and/or group ownership or permissions recursively on the directory's contents, you can do so by adding a ``recurse`` directive: .. code-block:: yaml /srv/stuff/substuf: file.directory: - user: fred - group: users - mode: 755 - makedirs: True - recurse: - user - group - mode As a default, ``mode`` will resolve to ``dir_mode`` and ``file_mode``, to specify both directory and file permissions, use this form: .. code-block:: yaml /srv/stuff/substuf: file.directory: - user: fred - group: users - file_mode: 744 - dir_mode: 755 - makedirs: True - recurse: - user - group - mode Symlinks can be easily created; the symlink function is very simple and only takes a few arguments: .. code-block:: yaml /etc/grub.conf: file.symlink: - target: /boot/grub/grub.conf Recursive directory management can also be set via the ``recurse`` function. Recursive directory management allows for a directory on the salt master to be recursively copied down to the minion. This is a great tool for deploying large code and configuration systems. A state using ``recurse`` would look something like this: .. code-block:: yaml /opt/code/flask: file.recurse: - source: salt://code/flask - include_empty: True A more complex ``recurse`` example: .. code-block:: yaml {% set site_user = 'testuser' %} {% set site_name = 'test_site' %} {% set project_name = 'test_proj' %} {% set sites_dir = 'test_dir' %} django-project: file.recurse: - name: {{ sites_dir }}/{{ site_name }}/{{ project_name }} - user: {{ site_user }} - dir_mode: 2775 - file_mode: '0644' - template: jinja - source: salt://project/templates_dir - include_empty: True Retention scheduling can be applied to manage contents of backup directories. For example: .. code-block:: yaml /var/backups/example_directory: file.retention_schedule: - strptime_format: example_name_%Y%m%dT%H%M%S.tar.bz2 - retain: most_recent: 5 first_of_hour: 4 first_of_day: 14 first_of_week: 6 first_of_month: 6 first_of_year: all ''' # Import python libs from __future__ import absolute_import import difflib import itertools import logging import os import shutil import sys import traceback from collections import Iterable, Mapping, defaultdict from datetime import datetime # python3 problem in the making? # Import salt libs import salt.loader import salt.payload import salt.utils import salt.utils.dictupdate import salt.utils.templates import salt.utils.url from salt.utils.locales import sdecode from salt.exceptions import CommandExecutionError, SaltInvocationError # Import 3rd-party libs import salt.ext.six as six from salt.ext.six.moves import zip_longest log = logging.getLogger(__name__) COMMENT_REGEX = r'^([[:space:]]*){0}[[:space:]]?' __NOT_FOUND = object() def _get_accumulator_filepath(): ''' Return accumulator data path. ''' return os.path.join(salt.utils.get_accumulator_dir(__opts__['cachedir']), __instance_id__) def _load_accumulators(): def _deserialize(path): serial = salt.payload.Serial(__opts__) ret = {'accumulators': {}, 'accumulators_deps': {}} try: with salt.utils.fopen(path, 'rb') as f: loaded = serial.load(f) return loaded if loaded else ret except (IOError, NameError): # NameError is a msgpack error from salt-ssh return ret loaded = _deserialize(_get_accumulator_filepath()) return loaded['accumulators'], loaded['accumulators_deps'] def _persist_accummulators(accumulators, accumulators_deps): accumm_data = {'accumulators': accumulators, 'accumulators_deps': accumulators_deps} serial = salt.payload.Serial(__opts__) try: with salt.utils.fopen(_get_accumulator_filepath(), 'w+b') as f: serial.dump(accumm_data, f) except NameError: # msgpack error from salt-ssh pass def _check_user(user, group): ''' Checks if the named user and group are present on the minion ''' err = '' if user: uid = __salt__['file.user_to_uid'](user) if uid == '': err += 'User {0} is not available '.format(user) if group: gid = __salt__['file.group_to_gid'](group) if gid == '': err += 'Group {0} is not available'.format(group) return err def _gen_keep_files(name, require, walk_d=None): ''' Generate the list of files that need to be kept when a dir based function like directory or recurse has a clean. ''' def _is_child(path, directory): ''' Check whether ``path`` is child of ``directory`` ''' path = os.path.abspath(path) directory = os.path.abspath(directory) relative = os.path.relpath(path, directory) return not relative.startswith(os.pardir) def _add_current_path(path): _ret = set() if os.path.isdir(path): dirs, files = walk_d.get(path, ((), ())) _ret.add(path) for _name in files: _ret.add(os.path.join(path, _name)) for _name in dirs: _ret.add(os.path.join(path, _name)) return _ret def _process_by_walk_d(name, ret): if os.path.isdir(name): walk_ret.update(_add_current_path(name)) dirs, _ = walk_d.get(name, ((), ())) for _d in dirs: p = os.path.join(name, _d) walk_ret.update(_add_current_path(p)) _process_by_walk_d(p, ret) def _process(name): ret = set() if os.path.isdir(name): for root, dirs, files in os.walk(name): ret.add(name) for name in files: ret.add(os.path.join(root, name)) for name in dirs: ret.add(os.path.join(root, name)) return ret keep = set() if isinstance(require, list): required_files = [comp for comp in require if 'file' in comp] for comp in required_files: for low in __lowstate__: # A requirement should match either the ID and the name of # another state. if low['name'] == comp['file'] or low['__id__'] == comp['file']: fn = low['name'] if os.path.isdir(fn): if _is_child(fn, name): if walk_d: walk_ret = set() _process_by_walk_d(fn, walk_ret) keep.update(walk_ret) else: keep.update(_process(fn)) else: keep.add(fn) return list(keep) def _check_file(name): ret = True msg = '' if not os.path.isabs(name): ret = False msg = 'Specified file {0} is not an absolute path'.format(name) elif not os.path.exists(name): ret = False msg = '{0}: file not found'.format(name) return ret, msg def _clean_dir(root, keep, exclude_pat): ''' Clean out all of the files and directories in a directory (root) while preserving the files in a list (keep) and part of exclude_pat ''' removed = set() real_keep = set() real_keep.add(root) if isinstance(keep, list): for fn_ in keep: if not os.path.isabs(fn_): continue real_keep.add(fn_) while True: fn_ = os.path.dirname(fn_) real_keep.add(fn_) if fn_ in ['/', ''.join([os.path.splitdrive(fn_)[0], '\\\\'])]: break def _delete_not_kept(nfn): if nfn not in real_keep: # -- check if this is a part of exclude_pat(only). No need to # check include_pat if not salt.utils.check_include_exclude( os.path.relpath(nfn, root), None, exclude_pat): return removed.add(nfn) if not __opts__['test']: try: os.remove(nfn) except OSError: __salt__['file.remove'](nfn) for roots, dirs, files in os.walk(root): for name in itertools.chain(dirs, files): _delete_not_kept(os.path.join(roots, name)) return list(removed) def _error(ret, err_msg): ret['result'] = False ret['comment'] = err_msg return ret def _check_directory(name, user, group, recurse, mode, clean, require, exclude_pat, max_depth=None): ''' Check what changes need to be made on a directory ''' changes = {} if recurse or clean: assert max_depth is None or not clean # walk path only once and store the result walk_l = list(_depth_limited_walk(name, max_depth)) # root: (dirs, files) structure, compatible for python2.6 walk_d = {} for i in walk_l: walk_d[i[0]] = (i[1], i[2]) if recurse: try: recurse_set = _get_recurse_set(recurse) except (TypeError, ValueError) as exc: return False, '{0}'.format(exc), changes if 'user' not in recurse_set: user = None if 'group' not in recurse_set: group = None if 'mode' not in recurse_set: mode = None check_files = 'ignore_files' not in recurse_set check_dirs = 'ignore_dirs' not in recurse_set for root, dirs, files in walk_l: if check_files: for fname in files: fchange = {} path = os.path.join(root, fname) stats = __salt__['file.stats']( path, None, follow_symlinks=False ) if user is not None and user != stats.get('user'): fchange['user'] = user if group is not None and group != stats.get('group'): fchange['group'] = group if fchange: changes[path] = fchange if check_dirs: for name_ in dirs: path = os.path.join(root, name_) fchange = _check_dir_meta(path, user, group, mode) if fchange: changes[path] = fchange # Recurse skips root (we always do dirs, not root), so always check root: fchange = _check_dir_meta(name, user, group, mode) if fchange: changes[name] = fchange if clean: keep = _gen_keep_files(name, require, walk_d) def _check_changes(fname): path = os.path.join(root, fname) if path in keep: return {} else: if not salt.utils.check_include_exclude( os.path.relpath(path, name), None, exclude_pat): return {} else: return {path: {'removed': 'Removed due to clean'}} for root, dirs, files in walk_l: for fname in files: changes.update(_check_changes(fname)) for name_ in dirs: changes.update(_check_changes(name_)) if not os.path.isdir(name): changes[name] = {'directory': 'new'} if changes: comments = ['The following files will be changed:\n'] for fn_ in changes: for key, val in six.iteritems(changes[fn_]): comments.append('{0}: {1} - {2}\n'.format(fn_, key, val)) return None, ''.join(comments), changes return True, 'The directory {0} is in the correct state'.format(name), changes def _check_dir_meta(name, user, group, mode): ''' Check the changes in directory metadata ''' stats = __salt__['file.stats'](name, follow_symlinks=False) changes = {} if not stats: changes['directory'] = 'new' return changes if (user is not None and user != stats['user'] and user != stats.get('uid')): changes['user'] = user if (group is not None and group != stats['group'] and group != stats.get('gid')): changes['group'] = group # Normalize the dir mode smode = salt.utils.normalize_mode(stats['mode']) mode = salt.utils.normalize_mode(mode) if mode is not None and mode != smode: changes['mode'] = mode return changes def _check_touch(name, atime, mtime): ''' Check to see if a file needs to be updated or created ''' if not os.path.exists(name): return None, 'File {0} is set to be created'.format(name) stats = __salt__['file.stats'](name, follow_symlinks=False) if atime is not None: if str(atime) != str(stats['atime']): return None, 'Times set to be updated on file {0}'.format(name) if mtime is not None: if str(mtime) != str(stats['mtime']): return None, 'Times set to be updated on file {0}'.format(name) return True, 'File {0} exists and has the correct times'.format(name) def _get_symlink_ownership(path): return ( __salt__['file.get_user'](path, follow_symlinks=False), __salt__['file.get_group'](path, follow_symlinks=False) ) def _check_symlink_ownership(path, user, group): ''' Check if the symlink ownership matches the specified user and group ''' cur_user, cur_group = _get_symlink_ownership(path) return (cur_user == user) and (cur_group == group) def _set_symlink_ownership(path, user, group): ''' Set the ownership of a symlink and return a boolean indicating success/failure ''' try: __salt__['file.lchown'](path, user, group) except OSError: pass return _check_symlink_ownership(path, user, group) def _symlink_check(name, target, force, user, group): ''' Check the symlink function ''' pchanges = {} if not os.path.exists(name) and not __salt__['file.is_link'](name): pchanges['new'] = name return None, 'Symlink {0} to {1} is set for creation'.format( name, target ), pchanges if __salt__['file.is_link'](name): if __salt__['file.readlink'](name) != target: pchanges['change'] = name return None, 'Link {0} target is set to be changed to {1}'.format( name, target ), pchanges else: result = True msg = 'The symlink {0} is present'.format(name) if not _check_symlink_ownership(name, user, group): result = None pchanges['ownership'] = '{0}:{1}'.format(*_get_symlink_ownership(name)) msg += ( ', but the ownership of the symlink would be changed ' 'from {2}:{3} to {0}:{1}' ).format(user, group, *_get_symlink_ownership(name)) return result, msg, pchanges else: if force: return None, ('The file or directory {0} is set for removal to ' 'make way for a new symlink targeting {1}' .format(name, target)), pchanges return False, ('File or directory exists where the symlink {0} ' 'should be. Did you mean to use force?'.format(name)), pchanges def _test_owner(kwargs, user=None): ''' Convert owner to user, since other config management tools use owner, no need to punish people coming from other systems. PLEASE DO NOT DOCUMENT THIS! WE USE USER, NOT OWNER!!!! ''' if user: return user if 'owner' in kwargs: log.warning( 'Use of argument owner found, "owner" is invalid, please ' 'use "user"' ) return kwargs['owner'] return user def _unify_sources_and_hashes(source=None, source_hash=None, sources=None, source_hashes=None): ''' Silly little function to give us a standard tuple list for sources and source_hashes ''' if sources is None: sources = [] if source_hashes is None: source_hashes = [] if source and sources: return (False, "source and sources are mutually exclusive", []) if source_hash and source_hashes: return (False, "source_hash and source_hashes are mutually exclusive", []) if source: return (True, '', [(source, source_hash)]) # Make a nice neat list of tuples exactly len(sources) long.. return True, '', list(zip_longest(sources, source_hashes[:len(sources)])) def _get_template_texts(source_list=None, template='jinja', defaults=None, context=None, **kwargs): ''' Iterate a list of sources and process them as templates. Returns a list of 'chunks' containing the rendered templates. ''' ret = {'name': '_get_template_texts', 'changes': {}, 'result': True, 'comment': '', 'data': []} if source_list is None: return _error(ret, '_get_template_texts called with empty source_list') txtl = [] for (source, source_hash) in source_list: tmpctx = defaults if defaults else {} if context: tmpctx.update(context) rndrd_templ_fn = __salt__['cp.get_template']( source, '', template=template, saltenv=__env__, context=tmpctx, **kwargs ) msg = 'cp.get_template returned {0} (Called with: {1})' log.debug(msg.format(rndrd_templ_fn, source)) if rndrd_templ_fn: tmplines = None with salt.utils.fopen(rndrd_templ_fn, 'rb') as fp_: tmplines = fp_.readlines() if not tmplines: msg = 'Failed to read rendered template file {0} ({1})' log.debug(msg.format(rndrd_templ_fn, source)) ret['name'] = source return _error(ret, msg.format(rndrd_templ_fn, source)) txtl.append(''.join(tmplines)) else: msg = 'Failed to load template file {0}'.format(source) log.debug(msg) ret['name'] = source return _error(ret, msg) ret['data'] = txtl return ret def _validate_str_list(arg): ''' ensure ``arg`` is a list of strings ''' if isinstance(arg, six.string_types): ret = [arg] elif isinstance(arg, Iterable) and not isinstance(arg, Mapping): ret = [] for item in arg: if isinstance(item, six.string_types): ret.append(item) else: ret.append(str(item)) else: ret = [str(arg)] return ret def symlink( name, target, force=False, backupname=None, makedirs=False, user=None, group=None, mode=None, **kwargs): ''' Create a symbolic link (symlink, soft link) If the file already exists and is a symlink pointing to any location other than the specified target, the symlink will be replaced. If the symlink is a regular file or directory then the state will return False. If the regular file or directory is desired to be replaced with a symlink pass force: True, if it is to be renamed, pass a backupname. name The location of the symlink to create target The location that the symlink points to force If the name of the symlink exists and is not a symlink and force is set to False, the state will fail. If force is set to True, the file or directory in the way of the symlink file will be deleted to make room for the symlink, unless backupname is set, when it will be renamed backupname If the name of the symlink exists and is not a symlink, it will be renamed to the backupname. If the backupname already exists and force is False, the state will fail. Otherwise, the backupname will be removed first. makedirs If the location of the symlink does not already have a parent directory then the state will fail, setting makedirs to True will allow Salt to create the parent directory user The user to own the file, this defaults to the user salt is running as on the minion group The group ownership set for the file, this defaults to the group salt is running as on the minion. On Windows, this is ignored mode The permissions to set on this file, aka 644, 0775, 4664. Not supported on Windows. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. ''' name = os.path.expanduser(name) # Make sure that leading zeros stripped by YAML loader are added back mode = salt.utils.normalize_mode(mode) user = _test_owner(kwargs, user=user) ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.symlink') if user is None: user = __opts__['user'] if salt.utils.is_windows(): # Make sure the user exists in Windows # Salt default is 'root' if not __salt__['user.info'](user): # User not found, use the account salt is running under # If username not found, use System user = __salt__['user.current']() if not user: user = 'SYSTEM' if group is not None: log.warning( 'The group argument for {0} has been ignored as this ' 'is a Windows system.'.format(name) ) group = user if group is None: group = __salt__['file.gid_to_group']( __salt__['user.info'](user).get('gid', 0) ) preflight_errors = [] uid = __salt__['file.user_to_uid'](user) gid = __salt__['file.group_to_gid'](group) if uid == '': preflight_errors.append('User {0} does not exist'.format(user)) if gid == '': preflight_errors.append('Group {0} does not exist'.format(group)) if not os.path.isabs(name): preflight_errors.append( 'Specified file {0} is not an absolute path'.format(name) ) if preflight_errors: msg = '. '.join(preflight_errors) if len(preflight_errors) > 1: msg += '.' return _error(ret, msg) presult, pcomment, ret['pchanges'] = _symlink_check(name, target, force, user, group) if __opts__['test']: ret['result'] = presult ret['comment'] = pcomment return ret if not os.path.isdir(os.path.dirname(name)): if makedirs: __salt__['file.makedirs']( name, user=user, group=group, mode=mode) else: return _error( ret, 'Directory {0} for symlink is not present'.format( os.path.dirname(name) ) ) if __salt__['file.is_link'](name): # The link exists, verify that it matches the target if os.path.normpath(__salt__['file.readlink'](name)) != os.path.normpath(target): # The target is wrong, delete the link os.remove(name) else: if _check_symlink_ownership(name, user, group): # The link looks good! ret['comment'] = ('Symlink {0} is present and owned by ' '{1}:{2}'.format(name, user, group)) else: if _set_symlink_ownership(name, user, group): ret['comment'] = ('Set ownership of symlink {0} to ' '{1}:{2}'.format(name, user, group)) ret['changes']['ownership'] = '{0}:{1}'.format(user, group) else: ret['result'] = False ret['comment'] += ( 'Failed to set ownership of symlink {0} to ' '{1}:{2}'.format(name, user, group) ) return ret elif os.path.isfile(name) or os.path.isdir(name): # It is not a link, but a file or dir if backupname is not None: # Make a backup first if os.path.lexists(backupname): if not force: return _error(ret, (( 'File exists where the backup target {0} should go' ).format(backupname))) else: __salt__['file.remove'](backupname) os.rename(name, backupname) elif force: # Remove whatever is in the way if __salt__['file.is_link'](name): __salt__['file.remove'](name) ret['changes']['forced'] = 'Symlink was forcibly replaced' else: __salt__['file.remove'](name) else: # Otherwise throw an error if os.path.isfile(name): return _error(ret, ('File exists where the symlink {0} should be' .format(name))) else: return _error(ret, (( 'Directory exists where the symlink {0} should be' ).format(name))) if not os.path.exists(name): # The link is not present, make it try: __salt__['file.symlink'](target, name) except OSError as exc: ret['result'] = False ret['comment'] = ('Unable to create new symlink {0} -> ' '{1}: {2}'.format(name, target, exc)) return ret else: ret['comment'] = ('Created new symlink {0} -> ' '{1}'.format(name, target)) ret['changes']['new'] = name if not _check_symlink_ownership(name, user, group): if not _set_symlink_ownership(name, user, group): ret['result'] = False ret['comment'] += (', but was unable to set ownership to ' '{0}:{1}'.format(user, group)) return ret def absent(name): ''' Make sure that the named file or directory is absent. If it exists, it will be deleted. This will work to reverse any of the functions in the file state module. If a directory is supplied, it will be recursively deleted. name The path which should be deleted ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': True, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.absent') if not os.path.isabs(name): return _error( ret, 'Specified file {0} is not an absolute path'.format(name) ) if name == '/': return _error(ret, 'Refusing to make "/" absent') if os.path.isfile(name) or os.path.islink(name): ret['pchanges']['removed'] = name if __opts__['test']: ret['result'] = None ret['comment'] = 'File {0} is set for removal'.format(name) return ret try: __salt__['file.remove'](name) ret['comment'] = 'Removed file {0}'.format(name) ret['changes']['removed'] = name return ret except CommandExecutionError as exc: return _error(ret, '{0}'.format(exc)) elif os.path.isdir(name): ret['pchanges']['removed'] = name if __opts__['test']: ret['result'] = None ret['comment'] = 'Directory {0} is set for removal'.format(name) return ret try: __salt__['file.remove'](name) ret['comment'] = 'Removed directory {0}'.format(name) ret['changes']['removed'] = name return ret except (OSError, IOError): return _error(ret, 'Failed to remove directory {0}'.format(name)) ret['comment'] = 'File {0} is not present'.format(name) return ret def exists(name): ''' Verify that the named file or directory is present or exists. Ensures pre-requisites outside of Salt's purview (e.g., keytabs, private keys, etc.) have been previously satisfied before deployment. name Absolute path which must exist ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': True, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.exists') if not os.path.exists(name): return _error(ret, 'Specified path {0} does not exist'.format(name)) ret['comment'] = 'Path {0} exists'.format(name) return ret def missing(name): ''' Verify that the named file or directory is missing, this returns True only if the named file is missing but does not remove the file if it is present. name Absolute path which must NOT exist ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': True, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.missing') if os.path.exists(name): return _error(ret, 'Specified path {0} exists'.format(name)) ret['comment'] = 'Path {0} is missing'.format(name) return ret def managed(name, source=None, source_hash='', source_hash_name=None, user=None, group=None, mode=None, template=None, makedirs=False, dir_mode=None, context=None, replace=True, defaults=None, backup='', show_changes=True, create=True, contents=None, tmp_ext='', contents_pillar=None, contents_grains=None, contents_newline=True, contents_delimiter=':', allow_empty=True, follow_symlinks=True, check_cmd=None, skip_verify=False, **kwargs): ''' Manage a given file, this function allows for a file to be downloaded from the salt master and potentially run through a templating system. name The location of the file to manage source The source file to download to the minion, this source file can be hosted on either the salt master server (``salt://``), the salt minion local file system (``/``), or on an HTTP or FTP server (``http(s)://``, ``ftp://``). Both HTTPS and HTTP are supported as well as downloading directly from Amazon S3 compatible URLs with both pre-configured and automatic IAM credentials. (see s3.get state documentation) File retrieval from Openstack Swift object storage is supported via swift://container/object_path URLs, see swift.get documentation. For files hosted on the salt file server, if the file is located on the master in the directory named spam, and is called eggs, the source string is salt://spam/eggs. If source is left blank or None (use ~ in YAML), the file will be created as an empty file and the content will not be managed. This is also the case when a file already exists and the source is undefined; the contents of the file will not be changed or managed. If the file is hosted on a HTTP or FTP server then the source_hash argument is also required. A list of sources can also be passed in to provide a default source and a set of fallbacks. The first source in the list that is found to exist will be used and subsequent entries in the list will be ignored. Source list functionality only supports local files and remote files hosted on the salt master server or retrievable via HTTP, HTTPS, or FTP. .. code-block:: yaml file_override_example: file.managed: - source: - salt://file_that_does_not_exist - salt://file_that_exists source_hash This can be one of the following: 1. a source hash string 2. the URI of a file that contains source hash strings The function accepts the first encountered long unbroken alphanumeric string of correct length as a valid hash, in order from most secure to least secure: .. code-block:: text Type Length ====== ====== sha512 128 sha384 96 sha256 64 sha224 56 sha1 40 md5 32 **Using a Source Hash File** The file can contain several checksums for several files. Each line must contain both the file name and the hash. If no file name is matched, the first hash encountered will be used, otherwise the most secure hash with the correct source file name will be used. When using a source hash file the source_hash argument needs to be a url, the standard download urls are supported, ftp, http, salt etc: Example: .. code-block:: yaml tomdroid-src-0.7.3.tar.gz: file.managed: - name: /tmp/tomdroid-src-0.7.3.tar.gz - source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz - source_hash: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.hash The following lines are all supported formats: .. code-block:: text /etc/rc.conf ef6e82e4006dee563d98ada2a2a80a27 sha254c8525aee419eb649f0233be91c151178b30f0dff8ebbdcc8de71b1d5c8bcc06a /etc/resolv.conf ead48423703509d37c4a90e6a0d53e143b6fc268 Debian file type ``*.dsc`` files are also supported. **Inserting the Source Hash in the SLS Data** The source_hash can be specified as a simple checksum, like so: .. code-block:: yaml tomdroid-src-0.7.3.tar.gz: file.managed: - name: /tmp/tomdroid-src-0.7.3.tar.gz - source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz - source_hash: 79eef25f9b0b2c642c62b7f737d4f53f .. note:: Releases prior to 2016.11.0 must also include the hash type, like in the below example: .. code-block:: yaml tomdroid-src-0.7.3.tar.gz: file.managed: - name: /tmp/tomdroid-src-0.7.3.tar.gz - source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz - source_hash: md5=79eef25f9b0b2c642c62b7f737d4f53f Known issues: If the remote server URL has the hash file as an apparent sub-directory of the source file, the module will discover that it has already cached a directory where a file should be cached. For example: .. code-block:: yaml tomdroid-src-0.7.3.tar.gz: file.managed: - name: /tmp/tomdroid-src-0.7.3.tar.gz - source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz - source_hash: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz/+md5 source_hash_name When ``source_hash`` refers to a hash file, Salt will try to find the correct hash by matching the filename/URI associated with that hash. By default, Salt will look for the filename being managed. When managing a file at path ``/tmp/foo.txt``, then the following line in a hash file would match: .. code-block:: text acbd18db4cc2f85cedef654fccc4a4d8 foo.txt However, sometimes a hash file will include multiple similar paths: .. code-block:: text 37b51d194a7513e45b56f6524f2d51f2 ./dir1/foo.txt acbd18db4cc2f85cedef654fccc4a4d8 ./dir2/foo.txt 73feffa4b7f6bb68e44cf984c85f6e88 ./dir3/foo.txt In cases like this, Salt may match the incorrect hash. This argument can be used to tell Salt which filename to match, to ensure that the correct hash is identified. For example: .. code-block:: yaml /tmp/foo.txt: file.managed: - source: https://mydomain.tld/dir2/foo.txt - source_hash: https://mydomain.tld/hashes - source_hash_name: ./dir2/foo.txt .. note:: This argument must contain the full filename entry from the checksum file, as this argument is meant to disambiguate matches for multiple files that have the same basename. So, in the example above, simply using ``foo.txt`` would not match. .. versionadded:: 2016.3.5 user The user to own the file, this defaults to the user salt is running as on the minion group The group ownership set for the file, this defaults to the group salt is running as on the minion On Windows, this is ignored mode The permissions to set on this file, e.g. ``644``, ``0775``, or ``4664``. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. .. note:: This option is **not** supported on Windows. .. versionchanged:: 2016.11.0 This option can be set to ``keep``, and Salt will keep the mode from the Salt fileserver. This is only supported when the ``source`` URL begins with ``salt://``, or for files local to the minion. Because the ``source`` option cannot be used with any of the ``contents`` options, setting the ``mode`` to ``keep`` is also incompatible with the ``contents`` options. template If this setting is applied, the named templating engine will be used to render the downloaded file. The following templates are supported: - :mod:`cheetah<salt.renderers.cheetah>` - :mod:`genshi<salt.renderers.genshi>` - :mod:`jinja<salt.renderers.jinja>` - :mod:`mako<salt.renderers.mako>` - :mod:`py<salt.renderers.py>` - :mod:`wempy<salt.renderers.wempy>` makedirs : False If set to ``True``, then the parent directories will be created to facilitate the creation of the named file. If ``False``, and the parent directory of the destination file doesn't exist, the state will fail. dir_mode If directories are to be created, passing this option specifies the permissions for those directories. If this is not set, directories will be assigned permissions by adding the execute bit to the mode of the files. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. replace : True If set to ``False`` and the file already exists, the file will not be modified even if changes would otherwise be made. Permissions and ownership will still be enforced, however. context Overrides default context variables passed to the template. defaults Default context passed to the template. backup Overrides the default backup mode for this specific file. See :ref:`backup_mode documentation <file-state-backups>` for more details. show_changes Output a unified diff of the old file and the new file. If ``False`` return a boolean if any changes were made. create : True If set to ``False``, then the file will only be managed if the file already exists on the system. contents Specify the contents of the file. Cannot be used in combination with ``source``. Ignores hashes and does not use a templating engine. This value can be either a single string, a multiline YAML string or a list of strings. If a list of strings, then the strings will be joined together with newlines in the resulting file. For example, the below two example states would result in identical file contents: .. code-block:: yaml /path/to/file1: file.managed: - contents: - This is line 1 - This is line 2 /path/to/file2: file.managed: - contents: | This is line 1 This is line 2 contents_pillar .. versionadded:: 0.17.0 .. versionchanged: 2016.11.0 contents_pillar can also be a list, and the pillars will be concatinated together to form one file. Operates like ``contents``, but draws from a value stored in pillar, using the pillar path syntax used in :mod:`pillar.get <salt.modules.pillar.get>`. This is useful when the pillar value contains newlines, as referencing a pillar variable using a jinja/mako template can result in YAML formatting issues due to the newlines causing indentation mismatches. For example, the following could be used to deploy an SSH private key: .. code-block:: yaml /home/deployer/.ssh/id_rsa: file.managed: - user: deployer - group: deployer - mode: 600 - contents_pillar: userdata:deployer:id_rsa This would populate ``/home/deployer/.ssh/id_rsa`` with the contents of ``pillar['userdata']['deployer']['id_rsa']``. An example of this pillar setup would be like so: .. code-block:: yaml userdata: deployer: id_rsa: | -----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAoQiwO3JhBquPAalQF9qP1lLZNXVjYMIswrMe2HcWUVBgh+vY U7sCwx/dH6+VvNwmCoqmNnP+8gTPKGl1vgAObJAnMT623dMXjVKwnEagZPRJIxDy B/HaAre9euNiY3LvIzBTWRSeMfT+rWvIKVBpvwlgGrfgz70m0pqxu+UyFbAGLin+ GpxzZAMaFpZw4sSbIlRuissXZj/sHpQb8p9M5IeO4Z3rjkCP1cxI -----END RSA PRIVATE KEY----- .. note:: The private key above is shortened to keep the example brief, but shows how to do multiline string in YAML. The key is followed by a pipe character, and the mutliline string is indented two more spaces. To avoid the hassle of creating an indented multiline YAML string, the :mod:`file_tree external pillar <salt.pillar.file_tree>` can be used instead. However, this will not work for binary files in Salt releases before 2015.8.4. contents_grains .. versionadded:: 2014.7.0 Operates like ``contents``, but draws from a value stored in grains, using the grains path syntax used in :mod:`grains.get <salt.modules.grains.get>`. This functionality works similarly to ``contents_pillar``, but with grains. For example, the following could be used to deploy a "message of the day" file: .. code-block:: yaml write_motd: file.managed: - name: /etc/motd - contents_grains: motd This would populate ``/etc/motd`` file with the contents of the ``motd`` grain. The ``motd`` grain is not a default grain, and would need to be set prior to running the state: .. code-block:: bash salt '*' grains.set motd 'Welcome! This system is managed by Salt.' contents_newline : True .. versionadded:: 2014.7.0 .. versionchanged:: 2015.8.4 This option is now ignored if the contents being deployed contain binary data. If ``True``, files managed using ``contents``, ``contents_pillar``, or ``contents_grains`` will have a newline added to the end of the file if one is not present. Setting this option to ``False`` will omit this final newline. contents_delimiter .. versionadded:: 2015.8.4 Can be used to specify an alternate delimiter for ``contents_pillar`` or ``contents_grains``. This delimiter will be passed through to :py:func:`pillar.get <salt.modules.pillar.get>` or :py:func:`grains.get <salt.modules.grains.get>` when retrieving the contents. allow_empty : True .. versionadded:: 2015.8.4 If set to ``False``, then the state will fail if the contents specified by ``contents_pillar`` or ``contents_grains`` are empty. follow_symlinks : True .. versionadded:: 2014.7.0 If the desired path is a symlink follow it and make changes to the file to which the symlink points. check_cmd .. versionadded:: 2014.7.0 The specified command will be run with an appended argument of a *temporary* file containing the new managed contents. If the command exits with a zero status the new managed contents will be written to the managed destination. If the command exits with a nonzero exit code, the state will fail and no changes will be made to the file. For example, the following could be used to verify sudoers before making changes: .. code-block:: yaml /etc/sudoers: file.managed: - user: root - group: root - mode: 0440 - source: salt://sudoers/files/sudoers.jinja - template: jinja - check_cmd: /usr/sbin/visudo -c -f **NOTE**: This ``check_cmd`` functions differently than the requisite ``check_cmd``. tmp_ext Suffix for temp file created by ``check_cmd``. Useful for checkers dependant on config file extension (e.g. the init-checkconf upstart config checker). .. code-block:: yaml /etc/init/test.conf: file.managed: - user: root - group: root - mode: 0440 - tmp_ext: '.conf' - contents: - 'description "Salt Minion"'' - 'start on started mountall' - 'stop on shutdown' - 'respawn' - 'exec salt-minion' - check_cmd: init-checkconf -f skip_verify : False If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped, and the ``source_hash`` argument will be ignored. .. versionadded:: 2016.3.0 ''' if 'env' in kwargs: salt.utils.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' ) kwargs.pop('env') name = os.path.expanduser(name) ret = {'changes': {}, 'pchanges': {}, 'comment': '', 'name': name, 'result': True} if mode is not None and salt.utils.is_windows(): return _error(ret, 'The \'mode\' option is not supported on Windows') try: keep_mode = mode.lower() == 'keep' if keep_mode: # We're not hard-coding the mode, so set it to None mode = None except AttributeError: keep_mode = False # Make sure that any leading zeros stripped by YAML loader are added back mode = salt.utils.normalize_mode(mode) contents_count = len( [x for x in (contents, contents_pillar, contents_grains) if x is not None] ) if source and contents_count > 0: return _error( ret, '\'source\' cannot be used in combination with \'contents\', ' '\'contents_pillar\', or \'contents_grains\'' ) elif keep_mode and contents_count > 0: return _error( ret, 'Mode preservation cannot be used in combination with \'contents\', ' '\'contents_pillar\', or \'contents_grains\'' ) elif contents_count > 1: return _error( ret, 'Only one of \'contents\', \'contents_pillar\', and ' '\'contents_grains\' is permitted' ) # If no source is specified, set replace to False, as there is nothing # with which to replace the file. if not source and contents_count == 0 and replace: replace = False log.warning( 'State for file: {0} - Neither \'source\' nor \'contents\' nor ' '\'contents_pillar\' nor \'contents_grains\' was defined, yet ' '\'replace\' was set to \'True\'. As there is no source to ' 'replace the file with, \'replace\' has been set to \'False\' to ' 'avoid reading the file unnecessarily.'.format(name) ) # Use this below to avoid multiple '\0' checks and save some CPU cycles if contents_pillar is not None: if isinstance(contents_pillar, list): list_contents = [] for nextp in contents_pillar: nextc = __salt__['pillar.get'](nextp, __NOT_FOUND, delimiter=contents_delimiter) if nextc is __NOT_FOUND: return _error( ret, 'Pillar {0} does not exist'.format(nextp) ) list_contents.append(nextc) use_contents = os.linesep.join(list_contents) else: use_contents = __salt__['pillar.get'](contents_pillar, __NOT_FOUND, delimiter=contents_delimiter) if use_contents is __NOT_FOUND: return _error( ret, 'Pillar {0} does not exist'.format(contents_pillar) ) elif contents_grains is not None: if isinstance(contents_grains, list): list_contents = [] for nextg in contents_grains: nextc = __salt__['grains.get'](nextg, __NOT_FOUND, delimiter=contents_delimiter) if nextc is __NOT_FOUND: return _error( ret, 'Grain {0} does not exist'.format(nextc) ) list_contents.append(nextc) use_contents = os.linesep.join(list_contents) else: use_contents = __salt__['grains.get'](contents_grains, __NOT_FOUND, delimiter=contents_delimiter) if use_contents is __NOT_FOUND: return _error( ret, 'Grain {0} does not exist'.format(contents_grains) ) elif contents is not None: use_contents = contents else: use_contents = None if use_contents is not None: if not allow_empty and not use_contents: if contents_pillar: contents_id = 'contents_pillar {0}'.format(contents_pillar) elif contents_grains: contents_id = 'contents_grains {0}'.format(contents_grains) else: contents_id = '\'contents\'' return _error( ret, '{0} value would result in empty contents. Set allow_empty ' 'to True to allow the managed file to be empty.' .format(contents_id) ) contents_are_binary = \ isinstance(use_contents, six.string_types) and '\0' in use_contents if contents_are_binary: contents = use_contents else: validated_contents = _validate_str_list(use_contents) if not validated_contents: return _error( ret, 'Contents specified by contents/contents_pillar/' 'contents_grains is not a string or list of strings, and ' 'is not binary data. SLS is likely malformed.' ) contents = os.linesep.join(validated_contents) if contents_newline and not contents.endswith(os.linesep): contents += os.linesep if template: contents = __salt__['file.apply_template_on_contents']( contents, template=template, context=context, defaults=defaults, saltenv=__env__) if not isinstance(contents, six.string_types): if 'result' in contents: ret['result'] = contents['result'] else: ret['result'] = False if 'comment' in contents: ret['comment'] = contents['comment'] else: ret['comment'] = 'Error while applying template on contents' return ret if not name: return _error(ret, 'Must provide name to file.exists') user = _test_owner(kwargs, user=user) if salt.utils.is_windows(): if group is not None: log.warning( 'The group argument for {0} has been ignored as this ' 'is a Windows system.'.format(name) ) group = user if not create: if not os.path.isfile(name): # Don't create a file that is not already present ret['comment'] = ('File {0} is not present and is not set for ' 'creation').format(name) return ret u_check = _check_user(user, group) if u_check: # The specified user or group do not exist return _error(ret, u_check) if not os.path.isabs(name): return _error( ret, 'Specified file {0} is not an absolute path'.format(name)) if os.path.isdir(name): ret['comment'] = 'Specified target {0} is a directory'.format(name) ret['result'] = False return ret if context is None: context = {} elif not isinstance(context, dict): return _error( ret, 'Context must be formed as a dict') if defaults and not isinstance(defaults, dict): return _error( ret, 'Defaults must be formed as a dict') if not replace and os.path.exists(name): # Check and set the permissions if necessary ret, _ = __salt__['file.check_perms'](name, ret, user, group, mode, follow_symlinks) if __opts__['test']: ret['comment'] = 'File {0} not updated'.format(name) elif not ret['changes'] and ret['result']: ret['comment'] = ('File {0} exists with proper permissions. ' 'No changes made.'.format(name)) return ret accum_data, _ = _load_accumulators() if name in accum_data: if not context: context = {} context['accumulator'] = accum_data[name] try: if __opts__['test']: if 'file.check_managed_changes' in __salt__: ret['pchanges'] = __salt__['file.check_managed_changes']( name, source, source_hash, source_hash_name, user, group, mode, template, context, defaults, __env__, contents, skip_verify, **kwargs ) if isinstance(ret['pchanges'], tuple): ret['result'], ret['comment'] = ret['pchanges'] elif ret['pchanges']: ret['result'] = None ret['comment'] = 'The file {0} is set to be changed'.format(name) if show_changes and 'diff' in ret['pchanges']: ret['changes']['diff'] = ret['pchanges']['diff'] if not show_changes: ret['changes']['diff'] = '<show_changes=False>' else: ret['result'] = True ret['comment'] = 'The file {0} is in the correct state'.format(name) return ret # If the source is a list then find which file exists source, source_hash = __salt__['file.source_list']( source, source_hash, __env__ ) except CommandExecutionError as exc: ret['result'] = False ret['comment'] = 'Unable to manage file: {0}'.format(exc) return ret # Gather the source file from the server try: sfn, source_sum, comment_ = __salt__['file.get_managed']( name, template, source, source_hash, source_hash_name, user, group, mode, __env__, context, defaults, skip_verify, **kwargs ) except Exception as exc: ret['changes'] = {} log.debug(traceback.format_exc()) return _error(ret, 'Unable to manage file: {0}'.format(exc)) tmp_filename = None if check_cmd: tmp_filename = salt.utils.mkstemp(suffix=tmp_ext) # if exists copy existing file to tmp to compare if __salt__['file.file_exists'](name): try: __salt__['file.copy'](name, tmp_filename) except Exception as exc: return _error( ret, 'Unable to copy file {0} to {1}: {2}'.format( name, tmp_filename, exc ) ) try: ret = __salt__['file.manage_file']( tmp_filename, sfn, ret, source, source_sum, user, group, mode, __env__, backup, makedirs, template, show_changes, contents, dir_mode, follow_symlinks, skip_verify, keep_mode, **kwargs) except Exception as exc: ret['changes'] = {} log.debug(traceback.format_exc()) if os.path.isfile(tmp_filename): os.remove(tmp_filename) return _error(ret, 'Unable to check_cmd file: {0}'.format(exc)) # file being updated to verify using check_cmd if ret['changes']: # Reset ret ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} check_cmd_opts = {} if 'shell' in __grains__: check_cmd_opts['shell'] = __grains__['shell'] cret = mod_run_check_cmd(check_cmd, tmp_filename, **check_cmd_opts) if isinstance(cret, dict): ret.update(cret) if os.path.isfile(tmp_filename): os.remove(tmp_filename) return ret # Since we generated a new tempfile and we are not returning here # lets change the original sfn to the new tempfile or else we will # get file not found sfn = tmp_filename else: ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if comment_ and contents is None: return _error(ret, comment_) else: try: return __salt__['file.manage_file']( name, sfn, ret, source, source_sum, user, group, mode, __env__, backup, makedirs, template, show_changes, contents, dir_mode, follow_symlinks, skip_verify, keep_mode, **kwargs) except Exception as exc: ret['changes'] = {} log.debug(traceback.format_exc()) return _error(ret, 'Unable to manage file: {0}'.format(exc)) finally: if tmp_filename and os.path.isfile(tmp_filename): os.remove(tmp_filename) _RECURSE_TYPES = ['user', 'group', 'mode', 'ignore_files', 'ignore_dirs'] def _get_recurse_set(recurse): ''' Converse *recurse* definition to a set of strings. Raises TypeError or ValueError when *recurse* has wrong structure. ''' if not recurse: return set() if not isinstance(recurse, list): raise TypeError('"recurse" must be formed as a list of strings') try: recurse_set = set(recurse) except TypeError: # non-hashable elements recurse_set = None if recurse_set is None or not set(_RECURSE_TYPES) >= recurse_set: raise ValueError('Types for "recurse" limited to {0}.'.format( ', '.join('"{0}"'.format(rtype) for rtype in _RECURSE_TYPES))) if 'ignore_files' in recurse_set and 'ignore_dirs' in recurse_set: raise ValueError('Must not specify "recurse" options "ignore_files"' ' and "ignore_dirs" at the same time.') return recurse_set def _depth_limited_walk(top, max_depth=None): ''' Walk the directory tree under root up till reaching max_depth. With max_depth=None (default), do not limit depth. ''' for root, dirs, files in os.walk(top): if max_depth is not None: rel_depth = root.count(os.sep) - top.count(os.sep) if rel_depth >= max_depth: del dirs[:] yield (str(root), list(dirs), list(files)) def directory(name, user=None, group=None, recurse=None, max_depth=None, dir_mode=None, file_mode=None, makedirs=False, clean=False, require=None, exclude_pat=None, follow_symlinks=False, force=False, backupname=None, allow_symlink=True, children_only=False, **kwargs): ''' Ensure that a named directory is present and has the right perms name The location to create or manage a directory user The user to own the directory; this defaults to the user salt is running as on the minion group The group ownership set for the directory; this defaults to the group salt is running as on the minion. On Windows, this is ignored recurse Enforce user/group ownership and mode of directory recursively. Accepts a list of strings representing what you would like to recurse. If ``mode`` is defined, will recurse on both ``file_mode`` and ``dir_mode`` if they are defined. If ``ignore_files`` or ``ignore_dirs`` is included, files or directories will be left unchanged respectively. Example: .. code-block:: yaml /var/log/httpd: file.directory: - user: root - group: root - dir_mode: 755 - file_mode: 644 - recurse: - user - group - mode Leave files or directories unchanged: .. code-block:: yaml /var/log/httpd: file.directory: - user: root - group: root - dir_mode: 755 - file_mode: 644 - recurse: - user - group - mode - ignore_dirs .. versionadded:: 2015.5.0 max_depth Limit the recursion depth. The default is no limit=None. 'max_depth' and 'clean' are mutually exclusive. .. versionadded:: 2016.11.0 dir_mode / mode The permissions mode to set any directories created. Not supported on Windows. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. file_mode The permissions mode to set any files created if 'mode' is run in 'recurse'. This defaults to dir_mode. Not supported on Windows. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. makedirs If the directory is located in a path without a parent directory, then the state will fail. If makedirs is set to True, then the parent directories will be created to facilitate the creation of the named file. clean Make sure that only files that are set up by salt and required by this function are kept. If this option is set then everything in this directory will be deleted unless it is required. 'clean' and 'max_depth' are mutually exclusive. require Require other resources such as packages or files exclude_pat When 'clean' is set to True, exclude this pattern from removal list and preserve in the destination. follow_symlinks : False If the desired path is a symlink (or ``recurse`` is defined and a symlink is encountered while recursing), follow it and check the permissions of the directory/file to which the symlink points. .. versionadded:: 2014.1.4 force If the name of the directory exists and is not a directory and force is set to False, the state will fail. If force is set to True, the file in the way of the directory will be deleted to make room for the directory, unless backupname is set, then it will be renamed. .. versionadded:: 2014.7.0 backupname If the name of the directory exists and is not a directory, it will be renamed to the backupname. If the backupname already exists and force is False, the state will fail. Otherwise, the backupname will be removed first. .. versionadded:: 2014.7.0 allow_symlink : True If allow_symlink is True and the specified path is a symlink, it will be allowed to remain if it points to a directory. If allow_symlink is False then the state will fail, unless force is also set to True, in which case it will be removed or renamed, depending on the value of the backupname argument. .. versionadded:: 2014.7.0 children_only : False If children_only is True the base of a path is excluded when performing a recursive operation. In case of /path/to/base, base will be ignored while all of /path/to/base/* are still operated on. ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': True, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.directory') # Remove trailing slash, if present and we're not working on "/" itself if name[-1] == '/' and name != '/': name = name[:-1] if max_depth is not None and clean: return _error(ret, 'Cannot specify both max_depth and clean') user = _test_owner(kwargs, user=user) if salt.utils.is_windows(): if group is not None: log.warning( 'The group argument for {0} has been ignored as this is ' 'a Windows system.'.format(name) ) group = user if 'mode' in kwargs and not dir_mode: dir_mode = kwargs.get('mode', []) if not file_mode: file_mode = dir_mode # Make sure that leading zeros stripped by YAML loader are added back dir_mode = salt.utils.normalize_mode(dir_mode) file_mode = salt.utils.normalize_mode(file_mode) u_check = _check_user(user, group) if u_check: # The specified user or group do not exist return _error(ret, u_check) if not os.path.isabs(name): return _error( ret, 'Specified file {0} is not an absolute path'.format(name)) # Check for existing file or symlink if os.path.isfile(name) or (not allow_symlink and os.path.islink(name)) \ or (force and os.path.islink(name)): # Was a backupname specified if backupname is not None: # Make a backup first if os.path.lexists(backupname): if not force: return _error(ret, (( 'File exists where the backup target {0} should go' ).format(backupname))) else: __salt__['file.remove'](backupname) os.rename(name, backupname) elif force: # Remove whatever is in the way if os.path.isfile(name): os.remove(name) ret['changes']['forced'] = 'File was forcibly replaced' elif __salt__['file.is_link'](name): __salt__['file.remove'](name) ret['changes']['forced'] = 'Symlink was forcibly replaced' else: __salt__['file.remove'](name) else: if os.path.isfile(name): return _error( ret, 'Specified location {0} exists and is a file'.format(name)) elif os.path.islink(name): return _error( ret, 'Specified location {0} exists and is a symlink'.format(name)) presult, pcomment, ret['pchanges'] = _check_directory( name, user, group, recurse or [], dir_mode, clean, require, exclude_pat, max_depth) if __opts__['test']: ret['result'] = presult ret['comment'] = pcomment return ret if not os.path.isdir(name): # The dir does not exist, make it if not os.path.isdir(os.path.dirname(name)): # The parent directory does not exist, create them if makedirs: # Make sure the drive is mapped before trying to create the # path in windows if salt.utils.is_windows(): drive, path = os.path.splitdrive(name) if not os.path.isdir(drive): return _error( ret, 'Drive {0} is not mapped'.format(drive)) # Everything's good, create the path __salt__['file.makedirs']( name, user=user, group=group, mode=dir_mode ) else: return _error( ret, 'No directory to create {0} in'.format(name)) __salt__['file.mkdir']( name, user=user, group=group, mode=dir_mode ) ret['changes'][name] = 'New Dir' if not os.path.isdir(name): return _error(ret, 'Failed to create directory {0}'.format(name)) # issue 32707: skip this __salt__['file.check_perms'] call if children_only == True # Check permissions if not children_only: ret, perms = __salt__['file.check_perms'](name, ret, user, group, dir_mode, follow_symlinks) errors = [] if recurse or clean: # walk path only once and store the result walk_l = list(_depth_limited_walk(name, max_depth)) # root: (dirs, files) structure, compatible for python2.6 walk_d = {} for i in walk_l: walk_d[i[0]] = (i[1], i[2]) recurse_set = None if recurse: try: recurse_set = _get_recurse_set(recurse) except (TypeError, ValueError) as exc: ret['result'] = False ret['comment'] = '{0}'.format(exc) # NOTE: Should this be enough to stop the whole check altogether? if recurse_set: if 'user' in recurse_set: if user: uid = __salt__['file.user_to_uid'](user) # file.user_to_uid returns '' if user does not exist. Above # check for user is not fatal, so we need to be sure user # exists. if isinstance(uid, six.string_types): ret['result'] = False ret['comment'] = 'Failed to enforce ownership for ' \ 'user {0} (user does not ' \ 'exist)'.format(user) else: ret['result'] = False ret['comment'] = 'user not specified, but configured as ' \ 'a target for recursive ownership ' \ 'management' else: user = None if 'group' in recurse_set: if group: gid = __salt__['file.group_to_gid'](group) # As above with user, we need to make sure group exists. if isinstance(gid, six.string_types): ret['result'] = False ret['comment'] = 'Failed to enforce group ownership ' \ 'for group {0}'.format(group) else: ret['result'] = False ret['comment'] = 'group not specified, but configured ' \ 'as a target for recursive ownership ' \ 'management' else: group = None if 'mode' not in recurse_set: file_mode = None dir_mode = None check_files = 'ignore_files' not in recurse_set check_dirs = 'ignore_dirs' not in recurse_set for root, dirs, files in walk_l: if check_files: for fn_ in files: full = os.path.join(root, fn_) try: ret, _ = __salt__['file.check_perms']( full, ret, user, group, file_mode, follow_symlinks) except CommandExecutionError as exc: if not exc.strerror.endswith('does not exist'): errors.append(exc.strerror) if check_dirs: for dir_ in dirs: full = os.path.join(root, dir_) try: ret, _ = __salt__['file.check_perms']( full, ret, user, group, dir_mode, follow_symlinks) except CommandExecutionError as exc: if not exc.strerror.endswith('does not exist'): errors.append(exc.strerror) if clean: keep = _gen_keep_files(name, require, walk_d) log.debug('List of kept files when use file.directory with clean: %s', keep) removed = _clean_dir(name, list(keep), exclude_pat) if removed: ret['changes']['removed'] = removed ret['comment'] = 'Files cleaned from directory {0}'.format(name) # issue 32707: reflect children_only selection in comments if not ret['comment']: if children_only: ret['comment'] = 'Directory {0}/* updated'.format(name) else: ret['comment'] = 'Directory {0} updated'.format(name) if __opts__['test']: ret['comment'] = 'Directory {0} not updated'.format(name) elif not ret['changes'] and ret['result']: ret['comment'] = 'Directory {0} is in the correct state'.format(name) if errors: ret['result'] = False ret['comment'] += '\n\nThe following errors were encountered:\n' for error in errors: ret['comment'] += '\n- {0}'.format(error) return ret def recurse(name, source, clean=False, require=None, user=None, group=None, dir_mode=None, file_mode=None, sym_mode=None, template=None, context=None, defaults=None, include_empty=False, backup='', include_pat=None, exclude_pat=None, maxdepth=None, keep_symlinks=False, force_symlinks=False, **kwargs): ''' Recurse through a subdirectory on the master and copy said subdirectory over to the specified path. name The directory to set the recursion in source The source directory, this directory is located on the salt master file server and is specified with the salt:// protocol. If the directory is located on the master in the directory named spam, and is called eggs, the source string is salt://spam/eggs clean Make sure that only files that are set up by salt and required by this function are kept. If this option is set then everything in this directory will be deleted unless it is required. require Require other resources such as packages or files user The user to own the directory. This defaults to the user salt is running as on the minion group The group ownership set for the directory. This defaults to the group salt is running as on the minion. On Windows, this is ignored dir_mode The permissions mode to set on any directories created. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. .. note:: This option is **not** supported on Windows. file_mode The permissions mode to set on any files created. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. .. note:: This option is **not** supported on Windows. .. versionchanged:: 2016.11.0 This option can be set to ``keep``, and Salt will keep the mode from the Salt fileserver. This is only supported when the ``source`` URL begins with ``salt://``, or for files local to the minion. Because the ``source`` option cannot be used with any of the ``contents`` options, setting the ``mode`` to ``keep`` is also incompatible with the ``contents`` options. sym_mode The permissions mode to set on any symlink created. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. .. note:: This option is **not** supported on Windows. template If this setting is applied, the named templating engine will be used to render the downloaded file. The following templates are supported: - :mod:`cheetah<salt.renderers.cheetah>` - :mod:`genshi<salt.renderers.genshi>` - :mod:`jinja<salt.renderers.jinja>` - :mod:`mako<salt.renderers.mako>` - :mod:`py<salt.renderers.py>` - :mod:`wempy<salt.renderers.wempy>` .. note:: The template option is required when recursively applying templates. context Overrides default context variables passed to the template. defaults Default context passed to the template. include_empty Set this to True if empty directories should also be created (default is False) backup Overrides the default backup mode for all replaced files. See :ref:`backup_mode documentation <file-state-backups>` for more details. include_pat When copying, include only this pattern from the source. Default is glob match; if prefixed with 'E@', then regexp match. Example: .. code-block:: yaml - include_pat: hello* :: glob matches 'hello01', 'hello02' ... but not 'otherhello' - include_pat: E@hello :: regexp matches 'otherhello', 'hello01' ... exclude_pat Exclude this pattern from the source when copying. If both `include_pat` and `exclude_pat` are supplied, then it will apply conditions cumulatively. i.e. first select based on include_pat, and then within that result apply exclude_pat. Also, when 'clean=True', exclude this pattern from the removal list and preserve in the destination. Example: .. code-block:: yaml - exclude_pat: APPDATA* :: glob matches APPDATA.01, APPDATA.02,.. for exclusion - exclude_pat: E@(APPDATA)|(TEMPDATA) :: regexp matches APPDATA or TEMPDATA for exclusion maxdepth When copying, only copy paths which are of depth `maxdepth` from the source path. Example: .. code-block:: yaml - maxdepth: 0 :: Only include files located in the source directory - maxdepth: 1 :: Only include files located in the source or immediate subdirectories keep_symlinks Keep symlinks when copying from the source. This option will cause the copy operation to terminate at the symlink. If desire behavior similar to rsync, then set this to True. force_symlinks Force symlink creation. This option will force the symlink creation. If a file or directory is obstructing symlink creation it will be recursively removed so that symlink creation can proceed. This option is usually not needed except in special circumstances. ''' if 'env' in kwargs: salt.utils.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' ) kwargs.pop('env') name = os.path.expanduser(sdecode(name)) user = _test_owner(kwargs, user=user) if salt.utils.is_windows(): if group is not None: log.warning( 'The group argument for {0} has been ignored as this ' 'is a Windows system.'.format(name) ) group = user ret = { 'name': name, 'changes': {}, 'pchanges': {}, 'result': True, 'comment': {} # { path: [comment, ...] } } if 'mode' in kwargs: ret['result'] = False ret['comment'] = ( '\'mode\' is not allowed in \'file.recurse\'. Please use ' '\'file_mode\' and \'dir_mode\'.' ) return ret if any([x is not None for x in (dir_mode, file_mode, sym_mode)]) \ and salt.utils.is_windows(): return _error(ret, 'mode management is not supported on Windows') # Make sure that leading zeros stripped by YAML loader are added back dir_mode = salt.utils.normalize_mode(dir_mode) try: keep_mode = file_mode.lower() == 'keep' if keep_mode: # We're not hard-coding the mode, so set it to None file_mode = None except AttributeError: keep_mode = False file_mode = salt.utils.normalize_mode(file_mode) u_check = _check_user(user, group) if u_check: # The specified user or group do not exist return _error(ret, u_check) if not os.path.isabs(name): return _error( ret, 'Specified file {0} is not an absolute path'.format(name)) # expand source into source_list source_list = _validate_str_list(source) for idx, val in enumerate(source_list): source_list[idx] = val.rstrip('/') for precheck in source_list: if not precheck.startswith('salt://'): return _error(ret, ('Invalid source \'{0}\' ' '(must be a salt:// URI)'.format(precheck))) # Select the first source in source_list that exists try: source, source_hash = __salt__['file.source_list'](source_list, '', __env__) except CommandExecutionError as exc: ret['result'] = False ret['comment'] = 'Recurse failed: {0}'.format(exc) return ret # Check source path relative to fileserver root, make sure it is a # directory srcpath, senv = salt.utils.url.parse(source) if senv is None: senv = __env__ master_dirs = __salt__['cp.list_master_dirs'](saltenv=senv) if srcpath not in master_dirs \ and not any((x for x in master_dirs if x.startswith(srcpath + '/'))): ret['result'] = False ret['comment'] = ( 'The directory \'{0}\' does not exist on the salt fileserver ' 'in saltenv \'{1}\''.format(srcpath, senv) ) return ret # Verify the target directory if not os.path.isdir(name): if os.path.exists(name): # it is not a dir, but it exists - fail out return _error( ret, 'The path {0} exists and is not a directory'.format(name)) if not __opts__['test']: __salt__['file.makedirs_perms'](name, user, group, dir_mode) def add_comment(path, comment): comments = ret['comment'].setdefault(path, []) if isinstance(comment, six.string_types): comments.append(comment) else: comments.extend(comment) def merge_ret(path, _ret): # Use the most "negative" result code (out of True, None, False) if _ret['result'] is False or ret['result'] is True: ret['result'] = _ret['result'] # Only include comments about files that changed if _ret['result'] is not True and _ret['comment']: add_comment(path, _ret['comment']) if _ret['changes']: ret['changes'][path] = _ret['changes'] def manage_file(path, source): if clean and os.path.exists(path) and os.path.isdir(path): _ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if __opts__['test']: _ret['comment'] = 'Replacing directory {0} with a ' \ 'file'.format(path) _ret['result'] = None merge_ret(path, _ret) return else: __salt__['file.remove'](path) _ret['changes'] = {'diff': 'Replaced directory with a ' 'new file'} merge_ret(path, _ret) # Conflicts can occur if some kwargs are passed in here pass_kwargs = {} faults = ['mode', 'makedirs'] for key in kwargs: if key not in faults: pass_kwargs[key] = kwargs[key] _ret = managed( path, source=source, user=user, group=group, mode='keep' if keep_mode else file_mode, template=template, makedirs=True, context=context, defaults=defaults, backup=backup, **pass_kwargs) merge_ret(path, _ret) def manage_directory(path): if os.path.basename(path) == '..': return if clean and os.path.exists(path) and not os.path.isdir(path): _ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if __opts__['test']: _ret['comment'] = 'Replacing {0} with a directory'.format(path) _ret['result'] = None merge_ret(path, _ret) return else: __salt__['file.remove'](path) _ret['changes'] = {'diff': 'Replaced file with a directory'} merge_ret(path, _ret) _ret = directory( path, user=user, group=group, recurse=[], dir_mode=dir_mode, file_mode=None, makedirs=True, clean=False, require=None) merge_ret(path, _ret) # Process symlinks and return the updated filenames list def process_symlinks(filenames, symlinks): for lname, ltarget in six.iteritems(symlinks): if not salt.utils.check_include_exclude( os.path.relpath(lname, srcpath), include_pat, exclude_pat): continue srelpath = os.path.relpath(lname, srcpath) # Check for max depth if maxdepth is not None: srelpieces = srelpath.split('/') if not srelpieces[-1]: srelpieces = srelpieces[:-1] if len(srelpieces) > maxdepth + 1: continue # Check for all paths that begin with the symlink # and axe it leaving only the dirs/files below it. # This needs to use list() otherwise they reference # the same list. _filenames = list(filenames) for filename in _filenames: if filename.startswith(lname): log.debug('** skipping file ** {0}, it intersects a ' 'symlink'.format(filename)) filenames.remove(filename) # Create the symlink along with the necessary dirs. # The dir perms/ownership will be adjusted later # if needed _ret = symlink(os.path.join(name, srelpath), ltarget, makedirs=True, force=force_symlinks, user=user, group=group, mode=sym_mode) if not _ret: continue merge_ret(os.path.join(name, srelpath), _ret) # Add the path to the keep set in case clean is set to True keep.add(os.path.join(name, srelpath)) vdir.update(keep) return filenames keep = set() vdir = set() if not srcpath.endswith('/'): # we're searching for things that start with this *directory*. # use '/' since #master only runs on POSIX srcpath = srcpath + '/' fns_ = __salt__['cp.list_master'](senv, srcpath) # If we are instructed to keep symlinks, then process them. if keep_symlinks: # Make this global so that emptydirs can use it if needed. symlinks = __salt__['cp.list_master_symlinks'](senv, srcpath) fns_ = process_symlinks(fns_, symlinks) for fn_ in fns_: if not fn_.strip(): continue # fn_ here is the absolute (from file_roots) source path of # the file to copy from; it is either a normal file or an # empty dir(if include_empty==true). relname = sdecode(os.path.relpath(fn_, srcpath)) if relname.startswith('..'): continue # Check for maxdepth of the relative path if maxdepth is not None: # Since paths are all master, just use POSIX separator relpieces = relname.split('/') # Handle empty directories (include_empty==true) by removing the # the last piece if it is an empty string if not relpieces[-1]: relpieces = relpieces[:-1] if len(relpieces) > maxdepth + 1: continue # Check if it is to be excluded. Match only part of the path # relative to the target directory if not salt.utils.check_include_exclude( relname, include_pat, exclude_pat): continue dest = os.path.join(name, relname) dirname = os.path.dirname(dest) keep.add(dest) if dirname not in vdir: # verify the directory perms if they are set manage_directory(dirname) vdir.add(dirname) src = salt.utils.url.create(fn_, saltenv=senv) manage_file(dest, src) if include_empty: mdirs = __salt__['cp.list_master_dirs'](senv, srcpath) for mdir in mdirs: if not salt.utils.check_include_exclude( os.path.relpath(mdir, srcpath), include_pat, exclude_pat): continue mdest = os.path.join(name, os.path.relpath(mdir, srcpath)) # Check for symlinks that happen to point to an empty dir. if keep_symlinks: islink = False for link in symlinks: if mdir.startswith(link, 0): log.debug('** skipping empty dir ** {0}, it intersects' ' a symlink'.format(mdir)) islink = True break if islink: continue manage_directory(mdest) keep.add(mdest) keep = list(keep) if clean: # TODO: Use directory(clean=True) instead keep += _gen_keep_files(name, require) removed = _clean_dir(name, list(keep), exclude_pat) if removed: if __opts__['test']: if ret['result']: ret['result'] = None add_comment('removed', removed) else: ret['changes']['removed'] = removed # Flatten comments until salt command line client learns # to display structured comments in a readable fashion ret['comment'] = '\n'.join(u'\n#### {0} ####\n{1}'.format( k, v if isinstance(v, six.string_types) else '\n'.join(v) ) for (k, v) in six.iteritems(ret['comment'])).strip() if not ret['comment']: ret['comment'] = 'Recursively updated {0}'.format(name) if not ret['changes'] and ret['result']: ret['comment'] = 'The directory {0} is in the correct state'.format( name ) return ret def retention_schedule(name, retain, strptime_format=None, timezone=None): ''' Apply retention scheduling to backup storage directory. .. versionadded:: 2016.11.0 :param name: The filesystem path to the directory containing backups to be managed. :param retain: Delete the backups, except for the ones we want to keep. The N below should be an integer but may also be the special value of ``all``, which keeps all files matching the criteria. All of the retain options default to None, which means to not keep files based on this criteria. :most_recent N: Keep the most recent N files. :first_of_hour N: For the last N hours from now, keep the first file after the hour. :first_of_day N: For the last N days from now, keep the first file after midnight. See also ``timezone``. :first_of_week N: For the last N weeks from now, keep the first file after Sunday midnight. :first_of_month N: For the last N months from now, keep the first file after the start of the month. :first_of_year N: For the last N years from now, keep the first file after the start of the year. :param strptime_format: A python strptime format string used to first match the filenames of backups and then parse the filename to determine the datetime of the file. https://docs.python.org/2/library/datetime.html#datetime.datetime.strptime Defaults to None, which considers all files in the directory to be backups eligible for deletion and uses ``os.path.getmtime()`` to determine the datetime. :param timezone: The timezone to use when determining midnight. This is only used when datetime is pulled from ``os.path.getmtime()``. Defaults to ``None`` which uses the timezone from the locale. .. code-block: yaml /var/backups/example_directory: file.retention_schedule: - retain: most_recent: 5 first_of_hour: 4 first_of_day: 7 first_of_week: 6 # NotImplemented yet. first_of_month: 6 first_of_year: all - strptime_format: example_name_%Y%m%dT%H%M%S.tar.bz2 - timezone: None ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {'retained': [], 'deleted': [], 'ignored': []}, 'pchanges': {'retained': [], 'deleted': [], 'ignored': []}, 'result': True, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.retention_schedule') if not os.path.isdir(name): return _error(ret, 'Name provided to file.retention must be a directory') # get list of files in directory all_files = __salt__['file.readdir'](name) # if strptime_format is set, filter through the list to find names which parse and get their datetimes. beginning_of_unix_time = datetime(1970, 1, 1) def get_file_time_from_strptime(f): try: ts = datetime.strptime(f, strptime_format) ts_epoch = salt.utils.total_seconds(ts - beginning_of_unix_time) return (ts, ts_epoch) except ValueError: # Files which don't match the pattern are not relevant files. return (None, None) def get_file_time_from_mtime(f): lstat = __salt__['file.lstat'](os.path.join(name, f)) if lstat: mtime = lstat['st_mtime'] return (datetime.fromtimestamp(mtime, timezone), mtime) else: # maybe it was deleted since we did the readdir? return (None, None) get_file_time = get_file_time_from_strptime if strptime_format else get_file_time_from_mtime # data structures are nested dicts: # files_by_ymd = year.month.day.hour.unixtime: filename # files_by_y_week_dow = year.week_of_year.day_of_week.unixtime: filename # http://the.randomengineer.com/2015/04/28/python-recursive-defaultdict/ # TODO: move to an ordered dict model and reduce the number of sorts in the rest of the code? def dict_maker(): return defaultdict(dict_maker) files_by_ymd = dict_maker() files_by_y_week_dow = dict_maker() relevant_files = set() ignored_files = set() for f in all_files: ts, ts_epoch = get_file_time(f) if ts: files_by_ymd[ts.year][ts.month][ts.day][ts.hour][ts_epoch] = f week_of_year = ts.isocalendar()[1] files_by_y_week_dow[ts.year][week_of_year][ts.weekday()][ts_epoch] = f relevant_files.add(f) else: ignored_files.add(f) # This is tightly coupled with the file_with_times data-structure above. RETAIN_TO_DEPTH = { 'first_of_year': 1, 'first_of_month': 2, 'first_of_day': 3, 'first_of_hour': 4, 'most_recent': 5, } def get_first(fwt): if isinstance(fwt, dict): first_sub_key = sorted(fwt.keys())[0] return get_first(fwt[first_sub_key]) else: return set([fwt, ]) def get_first_n_at_depth(fwt, depth, n): if depth <= 0: return get_first(fwt) else: result_set = set() for k in sorted(fwt.keys(), reverse=True): needed = n - len(result_set) if needed < 1: break result_set |= get_first_n_at_depth(fwt[k], depth - 1, needed) return result_set # for each retain criteria, add filenames which match the criteria to the retain set. retained_files = set() for retention_rule, keep_count in retain.items(): # This is kind of a hack, since 'all' should really mean all, # but I think it's a large enough number that even modern filesystems would # choke if they had this many files in a single directory. keep_count = sys.maxsize if 'all' == keep_count else int(keep_count) if 'first_of_week' == retention_rule: first_of_week_depth = 2 # year + week_of_year = 2 # I'm adding 1 to keep_count below because it fixed an off-by one # issue in the tests. I don't understand why, and that bothers me. retained_files |= get_first_n_at_depth(files_by_y_week_dow, first_of_week_depth, keep_count + 1) else: retained_files |= get_first_n_at_depth(files_by_ymd, RETAIN_TO_DEPTH[retention_rule], keep_count) deletable_files = list(relevant_files - retained_files) deletable_files.sort(reverse=True) changes = { 'retained': sorted(list(retained_files), reverse=True), 'deleted': deletable_files, 'ignored': sorted(list(ignored_files), reverse=True), } ret['pchanges'] = changes # TODO: track and report how much space was / would be reclaimed if __opts__['test']: ret['comment'] = '{0} backups would have been removed from {1}.\n'.format(len(deletable_files), name) if deletable_files: ret['result'] = None else: for f in deletable_files: __salt__['file.remove'](os.path.join(name, f)) ret['comment'] = '{0} backups were removed from {1}.\n'.format(len(deletable_files), name) ret['changes'] = changes return ret def line(name, content, match=None, mode=None, location=None, before=None, after=None, show_changes=True, backup=False, quiet=False, indent=True, create=False, user=None, group=None, file_mode=None): ''' Line-based editing of a file. .. versionadded:: 2015.8.0 name Filesystem path to the file to be edited. content Content of the line. match Match the target line for an action by a fragment of a string or regular expression. If neither ``before`` nor ``after`` are provided, and ``match`` is also ``None``, match becomes the ``content`` value. mode Defines how to edit a line. One of the following options is required: - ensure If line does not exist, it will be added. - replace If line already exists, it will be replaced. - delete Delete the line, once found. - insert Insert a line. .. note:: If ``mode=insert`` is used, at least one of the following options must also be defined: ``location``, ``before``, or ``after``. If ``location`` is used, it takes precedence over the other two options. location Defines where to place content in the line. Note this option is only used when ``mode=insert`` is specified. If a location is passed in, it takes precedence over both the ``before`` and ``after`` kwargs. Valid locations are: - start Place the content at the beginning of the file. - end Place the content at the end of the file. before Regular expression or an exact case-sensitive fragment of the string. This option is only used when either the ``ensure`` or ``insert`` mode is defined. after Regular expression or an exact case-sensitive fragment of the string. This option is only used when either the ``ensure`` or ``insert`` mode is defined. show_changes Output a unified diff of the old file and the new file. If ``False`` return a boolean if any changes were made. Default is ``True`` .. note:: Using this option will store two copies of the file in-memory (the original version and the edited version) in order to generate the diff. backup Create a backup of the original file with the extension: "Year-Month-Day-Hour-Minutes-Seconds". quiet Do not raise any exceptions. E.g. ignore the fact that the file that is tried to be edited does not exist and nothing really happened. indent Keep indentation with the previous line. This option is not considered when the ``delete`` mode is specified. :param create: Create an empty file if doesn't exists. .. versionadded:: 2016.11.0 :param user: The user to own the file, this defaults to the user salt is running as on the minion. .. versionadded:: 2016.11.0 :param group: The group ownership set for the file, this defaults to the group salt is running as on the minion On Windows, this is ignored. .. versionadded:: 2016.11.0 :param file_mode: The permissions to set on this file, aka 644, 0775, 4664. Not supported on Windows. .. versionadded:: 2016.11.0 If an equal sign (``=``) appears in an argument to a Salt command, it is interpreted as a keyword argument in the format of ``key=val``. That processing can be bypassed in order to pass an equal sign through to the remote shell command by manually specifying the kwarg: .. code-block: yaml update_config: file.line: - name: /etc/myconfig.conf - mode: ensure - content: my key = my value - before: somekey.*? ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': True, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.line') managed(name, create=create, user=user, group=group, mode=file_mode) check_res, check_msg = _check_file(name) if not check_res: return _error(ret, check_msg) changes = __salt__['file.line']( name, content, match=match, mode=mode, location=location, before=before, after=after, show_changes=show_changes, backup=backup, quiet=quiet, indent=indent) if changes: ret['pchanges']['diff'] = changes if __opts__['test']: ret['result'] = None ret['comment'] = 'Changes would be made:\ndiff:\n{0}'.format(changes) else: ret['result'] = True ret['comment'] = 'Changes were made' ret['changes'] = {'diff': changes} else: ret['result'] = True ret['comment'] = 'No changes needed to be made' return ret def replace(name, pattern, repl, count=0, flags=8, bufsize=1, append_if_not_found=False, prepend_if_not_found=False, not_found_content=None, backup='.bak', show_changes=True, ignore_if_missing=False): r''' Maintain an edit in a file. .. versionadded:: 0.17.0 name Filesystem path to the file to be edited. If a symlink is specified, it will be resolved to its target. pattern A regular expression, to be matched using Python's :py:func:`~re.search`. repl The replacement text count Maximum number of pattern occurrences to be replaced. Defaults to 0. If count is a positive integer n, no more than n occurrences will be replaced, otherwise all occurrences will be replaced. flags A list of flags defined in the :ref:`re module documentation <contents-of-module-re>`. Each list item should be a string that will correlate to the human-friendly flag name. E.g., ``['IGNORECASE', 'MULTILINE']``. Optionally, ``flags`` may be an int, with a value corresponding to the XOR (``|``) of all the desired flags. Defaults to ``8`` (which equates to ``['MULTILINE']``). .. note:: ``file.replace`` reads the entire file as a string to support multiline regex patterns. Therefore, when using anchors such as ``^`` or ``$`` in the pattern, those anchors may be relative to the line OR relative to the file. The default for ``file.replace`` is to treat anchors as relative to the line, which is implemented by setting the default value of ``flags`` to ``['MULTILINE']``. When overriding the default value for ``flags``, if ``'MULTILINE'`` is not present then anchors will be relative to the file. If the desired behavior is for anchors to be relative to the line, then simply add ``'MULTILINE'`` to the list of flags. bufsize How much of the file to buffer into memory at once. The default value ``1`` processes one line at a time. The special value ``file`` may be specified which will read the entire file into memory before processing. append_if_not_found : False If set to ``True``, and pattern is not found, then the content will be appended to the file. .. versionadded:: 2014.7.0 prepend_if_not_found : False If set to ``True`` and pattern is not found, then the content will be prepended to the file. .. versionadded:: 2014.7.0 not_found_content Content to use for append/prepend if not found. If ``None`` (default), uses ``repl``. Useful when ``repl`` uses references to group in pattern. .. versionadded:: 2014.7.0 backup The file extension to use for a backup of the file before editing. Set to ``False`` to skip making a backup. show_changes : True Output a unified diff of the old file and the new file. If ``False`` return a boolean if any changes were made. Returns a boolean or a string. .. note: Using this option will store two copies of the file in memory (the original version and the edited version) in order to generate the diff. This may not normally be a concern, but could impact performance if used with large files. ignore_if_missing : False .. versionadded:: 2016.3.4 Controls what to do if the file is missing. If set to ``False``, the state will display an error raised by the execution module. If set to ``True``, the state will simply report no changes. For complex regex patterns, it can be useful to avoid the need for complex quoting and escape sequences by making use of YAML's multiline string syntax. .. code-block:: yaml complex_search_and_replace: file.replace: # <...snip...> - pattern: | CentOS \(2.6.32[^\n]+\n\s+root[^\n]+\n\)+ .. note:: When using YAML multiline string syntax in ``pattern:``, make sure to also use that syntax in the ``repl:`` part, or you might loose line feeds. ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': True, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.replace') check_res, check_msg = _check_file(name) if not check_res: if ignore_if_missing and 'file not found' in check_msg: ret['comment'] = 'No changes needed to be made' return ret else: return _error(ret, check_msg) changes = __salt__['file.replace'](name, pattern, repl, count=count, flags=flags, bufsize=bufsize, append_if_not_found=append_if_not_found, prepend_if_not_found=prepend_if_not_found, not_found_content=not_found_content, backup=backup, dry_run=__opts__['test'], show_changes=show_changes, ignore_if_missing=ignore_if_missing) if changes: ret['pchanges']['diff'] = changes if __opts__['test']: ret['result'] = None ret['comment'] = 'Changes would have been made:\ndiff:\n{0}'.format(changes) else: ret['result'] = True ret['comment'] = 'Changes were made' ret['changes'] = {'diff': changes} else: ret['result'] = True ret['comment'] = 'No changes needed to be made' return ret def blockreplace( name, marker_start='#-- start managed zone --', marker_end='#-- end managed zone --', source=None, source_hash=None, template='jinja', sources=None, source_hashes=None, defaults=None, context=None, content='', append_if_not_found=False, prepend_if_not_found=False, backup='.bak', show_changes=True): ''' Maintain an edit in a file in a zone delimited by two line markers .. versionadded:: 2014.1.0 A block of content delimited by comments can help you manage several lines entries without worrying about old entries removal. This can help you maintaining an un-managed file containing manual edits. Note: this function will store two copies of the file in-memory (the original version and the edited version) in order to detect changes and only edit the targeted file if necessary. name Filesystem path to the file to be edited marker_start The line content identifying a line as the start of the content block. Note that the whole line containing this marker will be considered, so whitespace or extra content before or after the marker is included in final output marker_end The line content identifying a line as the end of the content block. Note that the whole line containing this marker will be considered, so whitespace or extra content before or after the marker is included in final output. Note: you can use file.accumulated and target this state. All accumulated data dictionaries content will be added as new lines in the content content The content to be used between the two lines identified by ``marker_start`` and ``marker_end`` source The source file to download to the minion, this source file can be hosted on either the salt master server, or on an HTTP or FTP server. Both HTTPS and HTTP are supported as well as downloading directly from Amazon S3 compatible URLs with both pre-configured and automatic IAM credentials. (see s3.get state documentation) File retrieval from Openstack Swift object storage is supported via swift://container/object_path URLs, see swift.get documentation. For files hosted on the salt file server, if the file is located on the master in the directory named spam, and is called eggs, the source string is salt://spam/eggs. If source is left blank or None (use ~ in YAML), the file will be created as an empty file and the content will not be managed. This is also the case when a file already exists and the source is undefined; the contents of the file will not be changed or managed. If the file is hosted on a HTTP or FTP server then the source_hash argument is also required. A list of sources can also be passed in to provide a default source and a set of fallbacks. The first source in the list that is found to exist will be used and subsequent entries in the list will be ignored. .. code-block:: yaml file_override_example: file.blockreplace: - name: /etc/example.conf - source: - salt://file_that_does_not_exist - salt://file_that_exists source_hash This can be one of the following: 1. a source hash string 2. the URI of a file that contains source hash strings The function accepts the first encountered long unbroken alphanumeric string of correct length as a valid hash, in order from most secure to least secure: .. code-block:: text Type Length ====== ====== sha512 128 sha384 96 sha256 64 sha224 56 sha1 40 md5 32 See the ``source_hash`` parameter description for :mod:`file.managed <salt.states.file.managed>` function for more details and examples. template The named templating engine will be used to render the downloaded file. Defaults to ``jinja``. The following templates are supported: - :mod:`cheetah<salt.renderers.cheetah>` - :mod:`genshi<salt.renderers.genshi>` - :mod:`jinja<salt.renderers.jinja>` - :mod:`mako<salt.renderers.mako>` - :mod:`py<salt.renderers.py>` - :mod:`wempy<salt.renderers.wempy>` context Overrides default context variables passed to the template. defaults Default context passed to the template. append_if_not_found If markers are not found and set to True then the markers and content will be appended to the file. Default is ``False`` prepend_if_not_found If markers are not found and set to True then the markers and content will be prepended to the file. Default is ``False`` backup The file extension to use for a backup of the file if any edit is made. Set this to ``False`` to skip making a backup. dry_run Don't make any edits to the file show_changes Output a unified diff of the old file and the new file. If ``False`` return a boolean if any changes were made Example of usage with an accumulator and with a variable: .. code-block:: yaml {% set myvar = 42 %} hosts-config-block-{{ myvar }}: file.blockreplace: - name: /etc/hosts - marker_start: "# START managed zone {{ myvar }} -DO-NOT-EDIT-" - marker_end: "# END managed zone {{ myvar }} --" - content: 'First line of content' - append_if_not_found: True - backup: '.bak' - show_changes: True hosts-config-block-{{ myvar }}-accumulated1: file.accumulated: - filename: /etc/hosts - name: my-accumulator-{{ myvar }} - text: "text 2" - require_in: - file: hosts-config-block-{{ myvar }} hosts-config-block-{{ myvar }}-accumulated2: file.accumulated: - filename: /etc/hosts - name: my-accumulator-{{ myvar }} - text: | text 3 text 4 - require_in: - file: hosts-config-block-{{ myvar }} will generate and maintain a block of content in ``/etc/hosts``: .. code-block:: text # START managed zone 42 -DO-NOT-EDIT- First line of content text 2 text 3 text 4 # END managed zone 42 -- ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': False, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.blockreplace') if sources is None: sources = [] if source_hashes is None: source_hashes = [] (ok_, err, sl_) = _unify_sources_and_hashes(source=source, source_hash=source_hash, sources=sources, source_hashes=source_hashes) if not ok_: return _error(ret, err) check_res, check_msg = _check_file(name) if not check_res: return _error(ret, check_msg) accum_data, accum_deps = _load_accumulators() if name in accum_data: accumulator = accum_data[name] # if we have multiple accumulators for a file, only apply the one # required at a time deps = accum_deps.get(name, []) filtered = [a for a in deps if __low__['__id__'] in deps[a] and a in accumulator] if not filtered: filtered = [a for a in accumulator] for acc in filtered: acc_content = accumulator[acc] for line in acc_content: if content == '': content = line else: content += "\n" + line if sl_: tmpret = _get_template_texts(source_list=sl_, template=template, defaults=defaults, context=context) if not tmpret['result']: return tmpret text = tmpret['data'] for index, item in enumerate(text): content += str(item) changes = __salt__['file.blockreplace']( name, marker_start, marker_end, content=content, append_if_not_found=append_if_not_found, prepend_if_not_found=prepend_if_not_found, backup=backup, dry_run=__opts__['test'], show_changes=show_changes ) if changes: ret['pchanges'] = {'diff': changes} if __opts__['test']: ret['result'] = None ret['comment'] = 'Changes would be made' else: ret['changes'] = {'diff': changes} ret['result'] = True ret['comment'] = 'Changes were made' else: ret['result'] = True ret['comment'] = 'No changes needed to be made' return ret def comment(name, regex, char='#', backup='.bak'): ''' Comment out specified lines in a file. name The full path to the file to be edited regex A regular expression used to find the lines that are to be commented; this pattern will be wrapped in parenthesis and will move any preceding/trailing ``^`` or ``$`` characters outside the parenthesis (e.g., the pattern ``^foo$`` will be rewritten as ``^(foo)$``) Note that you _need_ the leading ^, otherwise each time you run highstate, another comment char will be inserted. char : ``#`` The character to be inserted at the beginning of a line in order to comment it out backup : ``.bak`` The file will be backed up before edit with this file extension .. warning:: This backup will be overwritten each time ``sed`` / ``comment`` / ``uncomment`` is called. Meaning the backup will only be useful after the first invocation. Set to False/None to not keep a backup. Usage: .. code-block:: yaml /etc/fstab: file.comment: - regex: ^bind 127.0.0.1 .. versionadded:: 0.9.5 ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': False, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.comment') check_res, check_msg = _check_file(name) if not check_res: return _error(ret, check_msg) unanchor_regex = regex.lstrip('^').rstrip('$') comment_regex = char + unanchor_regex # Check if the line is already commented if __salt__['file.search'](name, comment_regex, multiline=True): commented = True else: commented = False # Make sure the pattern appears in the file before continuing if commented or not __salt__['file.search'](name, regex, multiline=True): if __salt__['file.search'](name, unanchor_regex, multiline=True): ret['comment'] = 'Pattern already commented' ret['result'] = True return ret else: return _error(ret, '{0}: Pattern not found'.format(unanchor_regex)) ret['pchanges'][name] = 'updated' if __opts__['test']: ret['comment'] = 'File {0} is set to be updated'.format(name) ret['result'] = None return ret with salt.utils.fopen(name, 'rb') as fp_: slines = fp_.readlines() # Perform the edit __salt__['file.comment_line'](name, regex, char, True, backup) with salt.utils.fopen(name, 'rb') as fp_: nlines = fp_.readlines() # Check the result ret['result'] = __salt__['file.search'](name, unanchor_regex, multiline=True) if slines != nlines: if not salt.utils.istextfile(name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them ret['changes']['diff'] = ( ''.join(difflib.unified_diff(slines, nlines)) ) if ret['result']: ret['comment'] = 'Commented lines successfully' else: ret['comment'] = 'Expected commented lines not found' return ret def uncomment(name, regex, char='#', backup='.bak'): ''' Uncomment specified commented lines in a file name The full path to the file to be edited regex A regular expression used to find the lines that are to be uncommented. This regex should not include the comment character. A leading ``^`` character will be stripped for convenience (for easily switching between comment() and uncomment()). The regex will be searched for from the beginning of the line, ignoring leading spaces (we prepend '^[ \\t]*') char : ``#`` The character to remove in order to uncomment a line backup : ``.bak`` The file will be backed up before edit with this file extension; .. warning:: This backup will be overwritten each time ``sed`` / ``comment`` / ``uncomment`` is called. Meaning the backup will only be useful after the first invocation. Set to False/None to not keep a backup. Usage: .. code-block:: yaml /etc/adduser.conf: file.uncomment: - regex: EXTRA_GROUPS .. versionadded:: 0.9.5 ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': False, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.uncomment') check_res, check_msg = _check_file(name) if not check_res: return _error(ret, check_msg) # Make sure the pattern appears in the file if __salt__['file.search']( name, '^[ \t]*{0}'.format(regex.lstrip('^')), multiline=True): ret['comment'] = 'Pattern already uncommented' ret['result'] = True return ret elif __salt__['file.search']( name, '{0}[ \t]*{1}'.format(char, regex.lstrip('^')), multiline=True): # Line exists and is commented pass else: return _error(ret, '{0}: Pattern not found'.format(regex)) ret['pchanges'][name] = 'updated' if __opts__['test']: ret['comment'] = 'File {0} is set to be updated'.format(name) ret['result'] = None return ret with salt.utils.fopen(name, 'rb') as fp_: slines = fp_.readlines() # Perform the edit __salt__['file.comment_line'](name, regex, char, False, backup) with salt.utils.fopen(name, 'rb') as fp_: nlines = fp_.readlines() # Check the result ret['result'] = __salt__['file.search']( name, '^[ \t]*{0}'.format(regex.lstrip('^')), multiline=True ) if slines != nlines: if not salt.utils.istextfile(name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them ret['changes']['diff'] = ( ''.join(difflib.unified_diff(slines, nlines)) ) if ret['result']: ret['comment'] = 'Uncommented lines successfully' else: ret['comment'] = 'Expected uncommented lines not found' return ret def append(name, text=None, makedirs=False, source=None, source_hash=None, template='jinja', sources=None, source_hashes=None, defaults=None, context=None, ignore_whitespace=True): ''' Ensure that some text appears at the end of a file. The text will not be appended if it already exists in the file. A single string of text or a list of strings may be appended. name The location of the file to append to. text The text to be appended, which can be a single string or a list of strings. makedirs If the file is located in a path without a parent directory, then the state will fail. If makedirs is set to True, then the parent directories will be created to facilitate the creation of the named file. Defaults to False. source A single source file to append. This source file can be hosted on either the salt master server, or on an HTTP or FTP server. Both HTTPS and HTTP are supported as well as downloading directly from Amazon S3 compatible URLs with both pre-configured and automatic IAM credentials (see s3.get state documentation). File retrieval from Openstack Swift object storage is supported via swift://container/object_path URLs (see swift.get documentation). For files hosted on the salt file server, if the file is located on the master in the directory named spam, and is called eggs, the source string is salt://spam/eggs. If the file is hosted on an HTTP or FTP server, the source_hash argument is also required. source_hash This can be one of the following: 1. a source hash string 2. the URI of a file that contains source hash strings The function accepts the first encountered long unbroken alphanumeric string of correct length as a valid hash, in order from most secure to least secure: .. code-block:: text Type Length ====== ====== sha512 128 sha384 96 sha256 64 sha224 56 sha1 40 md5 32 See the ``source_hash`` parameter description for :mod:`file.managed <salt.states.file.managed>` function for more details and examples. template The named templating engine will be used to render the appended-to file. Defaults to ``jinja``. The following templates are supported: - :mod:`cheetah<salt.renderers.cheetah>` - :mod:`genshi<salt.renderers.genshi>` - :mod:`jinja<salt.renderers.jinja>` - :mod:`mako<salt.renderers.mako>` - :mod:`py<salt.renderers.py>` - :mod:`wempy<salt.renderers.wempy>` sources A list of source files to append. If the files are hosted on an HTTP or FTP server, the source_hashes argument is also required. source_hashes A list of source_hashes corresponding to the sources list specified in the sources argument. defaults Default context passed to the template. context Overrides default context variables passed to the template. ignore_whitespace .. versionadded:: 2015.8.4 Spaces and Tabs in text are ignored by default, when searching for the appending content, one space or multiple tabs are the same for salt. Set this option to ``False`` if you want to change this behavior. Multi-line example: .. code-block:: yaml /etc/motd: file.append: - text: | Thou hadst better eat salt with the Philosophers of Greece, than sugar with the Courtiers of Italy. - Benjamin Franklin Multiple lines of text: .. code-block:: yaml /etc/motd: file.append: - text: - Trust no one unless you have eaten much salt with him. - "Salt is born of the purest of parents: the sun and the sea." Gather text from multiple template files: .. code-block:: yaml /etc/motd: file: - append - template: jinja - sources: - salt://motd/devops-messages.tmpl - salt://motd/hr-messages.tmpl - salt://motd/general-messages.tmpl .. versionadded:: 0.9.5 ''' ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': False, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.append') name = os.path.expanduser(name) if sources is None: sources = [] if source_hashes is None: source_hashes = [] # Add sources and source_hashes with template support # NOTE: FIX 'text' and any 'source' are mutually exclusive as 'text' # is re-assigned in the original code. (ok_, err, sl_) = _unify_sources_and_hashes(source=source, source_hash=source_hash, sources=sources, source_hashes=source_hashes) if not ok_: return _error(ret, err) if makedirs is True: dirname = os.path.dirname(name) if not __salt__['file.directory_exists'](dirname): __salt__['file.makedirs'](name) check_res, check_msg, ret['pchanges'] = _check_directory( dirname, None, None, False, None, False, False, None ) if not check_res: return _error(ret, check_msg) check_res, check_msg = _check_file(name) if not check_res: # Try to create the file touch(name, makedirs=makedirs) retry_res, retry_msg = _check_file(name) if not retry_res: return _error(ret, check_msg) # Follow the original logic and re-assign 'text' if using source(s)... if sl_: tmpret = _get_template_texts(source_list=sl_, template=template, defaults=defaults, context=context) if not tmpret['result']: return tmpret text = tmpret['data'] text = _validate_str_list(text) with salt.utils.fopen(name, 'rb') as fp_: slines = fp_.read().splitlines() append_lines = [] try: for chunk in text: if ignore_whitespace: if __salt__['file.search']( name, salt.utils.build_whitespace_split_regex(chunk), multiline=True): continue elif __salt__['file.search']( name, chunk, multiline=True): continue for line_item in chunk.splitlines(): append_lines.append('{0}'.format(line_item)) except TypeError: return _error(ret, 'No text found to append. Nothing appended') if __opts__['test']: ret['comment'] = 'File {0} is set to be updated'.format(name) ret['result'] = None nlines = list(slines) nlines.extend(append_lines) if slines != nlines: if not salt.utils.istextfile(name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them ret['changes']['diff'] = ( '\n'.join(difflib.unified_diff(slines, nlines)) ) else: ret['comment'] = 'File {0} is in correct state'.format(name) ret['result'] = True return ret if append_lines: __salt__['file.append'](name, args=append_lines) ret['comment'] = 'Appended {0} lines'.format(len(append_lines)) else: ret['comment'] = 'File {0} is in correct state'.format(name) with salt.utils.fopen(name, 'rb') as fp_: nlines = fp_.read().splitlines() if slines != nlines: if not salt.utils.istextfile(name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them ret['changes']['diff'] = ( '\n'.join(difflib.unified_diff(slines, nlines))) ret['result'] = True return ret def prepend(name, text=None, makedirs=False, source=None, source_hash=None, template='jinja', sources=None, source_hashes=None, defaults=None, context=None, header=None): ''' Ensure that some text appears at the beginning of a file The text will not be prepended again if it already exists in the file. You may specify a single line of text or a list of lines to append. Multi-line example: .. code-block:: yaml /etc/motd: file.prepend: - text: | Thou hadst better eat salt with the Philosophers of Greece, than sugar with the Courtiers of Italy. - Benjamin Franklin Multiple lines of text: .. code-block:: yaml /etc/motd: file.prepend: - text: - Trust no one unless you have eaten much salt with him. - "Salt is born of the purest of parents: the sun and the sea." Optionally, require the text to appear exactly as specified (order and position). Combine with multi-line or multiple lines of input. .. code-block:: yaml /etc/motd: file.prepend: - header: True - text: - This will be the very first line in the file. - The 2nd line, regardless of duplicates elsewhere in the file. - These will be written anew if they do not appear verbatim. Gather text from multiple template files: .. code-block:: yaml /etc/motd: file: - prepend - template: jinja - sources: - salt://motd/devops-messages.tmpl - salt://motd/hr-messages.tmpl - salt://motd/general-messages.tmpl .. versionadded:: 2014.7.0 ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'pchanges': {}, 'result': False, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.prepend') if sources is None: sources = [] if source_hashes is None: source_hashes = [] # Add sources and source_hashes with template support # NOTE: FIX 'text' and any 'source' are mutually exclusive as 'text' # is re-assigned in the original code. (ok_, err, sl_) = _unify_sources_and_hashes(source=source, source_hash=source_hash, sources=sources, source_hashes=source_hashes) if not ok_: return _error(ret, err) if makedirs is True: dirname = os.path.dirname(name) if not __salt__['file.directory_exists'](dirname): __salt__['file.makedirs'](name) check_res, check_msg, ret['pchanges'] = _check_directory( dirname, None, None, False, None, False, False, None ) if not check_res: return _error(ret, check_msg) check_res, check_msg = _check_file(name) if not check_res: # Try to create the file touch(name, makedirs=makedirs) retry_res, retry_msg = _check_file(name) if not retry_res: return _error(ret, check_msg) # Follow the original logic and re-assign 'text' if using source(s)... if sl_: tmpret = _get_template_texts(source_list=sl_, template=template, defaults=defaults, context=context) if not tmpret['result']: return tmpret text = tmpret['data'] text = _validate_str_list(text) with salt.utils.fopen(name, 'rb') as fp_: slines = fp_.readlines() count = 0 test_lines = [] preface = [] for chunk in text: # if header kwarg is unset of False, use regex search if not header: if __salt__['file.search']( name, salt.utils.build_whitespace_split_regex(chunk), multiline=True): continue lines = chunk.splitlines() for line in lines: if __opts__['test']: ret['comment'] = 'File {0} is set to be updated'.format(name) ret['result'] = None test_lines.append('{0}\n'.format(line)) else: preface.append(line) count += 1 if __opts__['test']: nlines = test_lines + slines if slines != nlines: if not salt.utils.istextfile(name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them ret['changes']['diff'] = ( ''.join(difflib.unified_diff(slines, nlines)) ) ret['result'] = None else: ret['comment'] = 'File {0} is in correct state'.format(name) ret['result'] = True return ret # if header kwarg is True, use verbatim compare if header: with salt.utils.fopen(name, 'rb') as fp_: # read as many lines of target file as length of user input target_head = fp_.readlines()[0:len(preface)] target_lines = [] # strip newline chars from list entries for chunk in target_head: target_lines += chunk.splitlines() # compare current top lines in target file with user input # and write user input if they differ if target_lines != preface: __salt__['file.prepend'](name, *preface) else: # clear changed lines counter if target file not modified count = 0 else: __salt__['file.prepend'](name, *preface) with salt.utils.fopen(name, 'rb') as fp_: nlines = fp_.readlines() if slines != nlines: if not salt.utils.istextfile(name): ret['changes']['diff'] = 'Replace binary file' else: # Changes happened, add them ret['changes']['diff'] = ( ''.join(difflib.unified_diff(slines, nlines)) ) if count: ret['comment'] = 'Prepended {0} lines'.format(count) else: ret['comment'] = 'File {0} is in correct state'.format(name) ret['result'] = True return ret def patch(name, source=None, options='', dry_run_first=True, **kwargs): ''' Apply a patch to a file or directory. .. note:: A suitable ``patch`` executable must be available on the minion when using this state function. name The file or directory to which the patch will be applied. source The source patch to download to the minion, this source file must be hosted on the salt master server. If the file is located in the directory named spam, and is called eggs, the source string is salt://spam/eggs. A source is required. hash The hash of the patched file. If the hash of the target file matches this value then the patch is assumed to have been applied. For versions 2016.11.4 and newer, the hash can be specified without an accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``), but for earlier releases it is necessary to also specify the hash type in the format ``<hash_type>:<hash_value>`` (e.g. ``md5:e138491e9d5b97023cea823fe17bac22``). options Extra options to pass to patch. dry_run_first : ``True`` Run patch with ``--dry-run`` first to check if it will apply cleanly. saltenv Specify the environment from which to retrieve the patch file indicated by the ``source`` parameter. If not provided, this defaults to the environment from which the state is being executed. **Usage:** .. code-block:: yaml # Equivalent to ``patch --forward /opt/file.txt file.patch`` /opt/file.txt: file.patch: - source: salt://file.patch - hash: e138491e9d5b97023cea823fe17bac22 .. note:: For minions running version 2016.11.3 or older, the hash in the example above would need to be specified with the hash type (i.e. ``md5:e138491e9d5b97023cea823fe17bac22``). ''' hash_ = kwargs.pop('hash', None) if 'env' in kwargs: salt.utils.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' ) kwargs.pop('env') name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if not name: return _error(ret, 'Must provide name to file.patch') check_res, check_msg = _check_file(name) if not check_res: return _error(ret, check_msg) if not source: return _error(ret, 'Source is required') if hash_ is None: return _error(ret, 'Hash is required') try: if hash_ and __salt__['file.check_hash'](name, hash_): ret['result'] = True ret['comment'] = 'Patch is already applied' return ret except (SaltInvocationError, ValueError) as exc: ret['comment'] = exc.__str__() return ret # get cached file or copy it to cache cached_source_path = __salt__['cp.cache_file'](source, __env__) if not cached_source_path: ret['comment'] = ('Unable to cache {0} from saltenv \'{1}\'' .format(source, __env__)) return ret log.debug( 'State patch.applied cached source %s -> %s', source, cached_source_path ) if dry_run_first or __opts__['test']: ret['changes'] = __salt__['file.patch']( name, cached_source_path, options=options, dry_run=True ) if __opts__['test']: ret['comment'] = 'File {0} will be patched'.format(name) ret['result'] = None return ret if ret['changes']['retcode'] != 0: return ret ret['changes'] = __salt__['file.patch']( name, cached_source_path, options=options ) ret['result'] = ret['changes']['retcode'] == 0 # No need to check for SaltInvocationError or ValueError this time, since # these exceptions would have been caught above. if ret['result'] and hash_ and not __salt__['file.check_hash'](name, hash_): ret['result'] = False ret['comment'] = 'Hash mismatch after patch was applied' return ret def touch(name, atime=None, mtime=None, makedirs=False): ''' Replicate the 'nix "touch" command to create a new empty file or update the atime and mtime of an existing file. Note that if you just want to create a file and don't care about atime or mtime, you should use ``file.managed`` instead, as it is more feature-complete. (Just leave out the ``source``/``template``/``contents`` arguments, and it will just create the file and/or check its permissions, without messing with contents) name name of the file atime atime of the file mtime mtime of the file makedirs whether we should create the parent directory/directories in order to touch the file Usage: .. code-block:: yaml /var/log/httpd/logrotate.empty: file.touch .. versionadded:: 0.9.5 ''' name = os.path.expanduser(name) ret = { 'name': name, 'changes': {}, } if not name: return _error(ret, 'Must provide name to file.touch') if not os.path.isabs(name): return _error( ret, 'Specified file {0} is not an absolute path'.format(name) ) if __opts__['test']: ret['result'], ret['comment'] = _check_touch(name, atime, mtime) return ret if makedirs: __salt__['file.makedirs'](name) if not os.path.isdir(os.path.dirname(name)): return _error( ret, 'Directory not present to touch file {0}'.format(name) ) extant = os.path.exists(name) ret['result'] = __salt__['file.touch'](name, atime, mtime) if not extant and ret['result']: ret['comment'] = 'Created empty file {0}'.format(name) ret['changes']['new'] = name elif extant and ret['result']: ret['comment'] = 'Updated times on {0} {1}'.format( 'directory' if os.path.isdir(name) else 'file', name ) ret['changes']['touched'] = name return ret def copy( name, source, force=False, makedirs=False, preserve=False, user=None, group=None, mode=None, subdir=False, **kwargs): ''' If the source file exists on the system, copy it to the named file. The named file will not be overwritten if it already exists unless the force option is set to True. name The location of the file to copy to source The location of the file to copy to the location specified with name force If the target location is present then the file will not be moved, specify "force: True" to overwrite the target file makedirs If the target subdirectories don't exist create them preserve .. versionadded:: 2015.5.0 Set ``preserve: True`` to preserve user/group ownership and mode after copying. Default is ``False``. If ``preserve`` is set to ``True``, then user/group/mode attributes will be ignored. user .. versionadded:: 2015.5.0 The user to own the copied file, this defaults to the user salt is running as on the minion. If ``preserve`` is set to ``True``, then this will be ignored group .. versionadded:: 2015.5.0 The group to own the copied file, this defaults to the group salt is running as on the minion. If ``preserve`` is set to ``True`` or on Windows this will be ignored mode .. versionadded:: 2015.5.0 The permissions to set on the copied file, aka 644, '0775', '4664'. If ``preserve`` is set to ``True``, then this will be ignored. Not supported on Windows. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. subdir .. versionadded:: 2015.5.0 If the name is a directory then place the file inside the named directory .. note:: The copy function accepts paths that are local to the Salt minion. This function does not support salt://, http://, or the other additional file paths that are supported by :mod:`states.file.managed <salt.states.file.managed>` and :mod:`states.file.recurse <salt.states.file.recurse>`. ''' name = os.path.expanduser(name) source = os.path.expanduser(source) ret = { 'name': name, 'changes': {}, 'comment': 'Copied "{0}" to "{1}"'.format(source, name), 'result': True} if not name: return _error(ret, 'Must provide name to file.copy') changed = True if not os.path.isabs(name): return _error( ret, 'Specified file {0} is not an absolute path'.format(name)) if not os.path.exists(source): return _error(ret, 'Source file "{0}" is not present'.format(source)) if preserve: user = __salt__['file.get_user'](source) group = __salt__['file.get_group'](source) mode = __salt__['file.get_mode'](source) else: user = _test_owner(kwargs, user=user) if user is None: user = __opts__['user'] if salt.utils.is_windows(): if group is not None: log.warning( 'The group argument for {0} has been ignored as this is ' 'a Windows system.'.format(name) ) group = user if group is None: group = __salt__['file.gid_to_group']( __salt__['user.info'](user).get('gid', 0) ) u_check = _check_user(user, group) if u_check: # The specified user or group do not exist return _error(ret, u_check) if mode is None: mode = __salt__['file.get_mode'](source) if os.path.isdir(name) and subdir: # If the target is a dir, and overwrite_dir is False, copy into the dir name = os.path.join(name, os.path.basename(source)) if os.path.lexists(source) and os.path.lexists(name): # if this is a file which did not change, do not update if force and os.path.isfile(name): hash1 = salt.utils.get_hash(name) hash2 = salt.utils.get_hash(source) if hash1 == hash2: changed = True ret['comment'] = ' '.join([ret['comment'], '- files are identical but force flag is set']) if not force: changed = False elif not __opts__['test'] and changed: # Remove the destination to prevent problems later try: __salt__['file.remove'](name) except (IOError, OSError): return _error( ret, 'Failed to delete "{0}" in preparation for ' 'forced move'.format(name) ) if __opts__['test']: if changed: ret['comment'] = 'File "{0}" is set to be copied to "{1}"'.format( source, name ) ret['result'] = None else: ret['comment'] = ('The target file "{0}" exists and will not be ' 'overwritten'.format(name)) ret['result'] = True return ret if not changed: ret['comment'] = ('The target file "{0}" exists and will not be ' 'overwritten'.format(name)) ret['result'] = True return ret # Run makedirs dname = os.path.dirname(name) if not os.path.isdir(dname): if makedirs: __salt__['file.makedirs'](name) else: return _error( ret, 'The target directory {0} is not present'.format(dname)) # All tests pass, move the file into place try: if os.path.isdir(source): shutil.copytree(source, name, symlinks=True) for root, dirs, files in os.walk(name): for dir_ in dirs: __salt__['file.lchown'](os.path.join(root, dir_), user, group) for file_ in files: __salt__['file.lchown'](os.path.join(root, file_), user, group) else: shutil.copy(source, name) ret['changes'] = {name: source} # Preserve really means just keep the behavior of the cp command. If # the filesystem we're copying to is squashed or doesn't support chown # then we shouldn't be checking anything. if not preserve: __salt__['file.check_perms'](name, ret, user, group, mode) except (IOError, OSError): return _error( ret, 'Failed to copy "{0}" to "{1}"'.format(source, name)) return ret def rename(name, source, force=False, makedirs=False): ''' If the source file exists on the system, rename it to the named file. The named file will not be overwritten if it already exists unless the force option is set to True. name The location of the file to rename to source The location of the file to move to the location specified with name force If the target location is present then the file will not be moved, specify "force: True" to overwrite the target file makedirs If the target subdirectories don't exist create them ''' name = os.path.expanduser(name) source = os.path.expanduser(source) ret = { 'name': name, 'changes': {}, 'comment': '', 'result': True} if not name: return _error(ret, 'Must provide name to file.rename') if not os.path.isabs(name): return _error( ret, 'Specified file {0} is not an absolute path'.format(name)) if not os.path.lexists(source): ret['comment'] = ('Source file "{0}" has already been moved out of ' 'place').format(source) return ret if os.path.lexists(source) and os.path.lexists(name): if not force: ret['comment'] = ('The target file "{0}" exists and will not be ' 'overwritten'.format(name)) ret['result'] = False return ret elif not __opts__['test']: # Remove the destination to prevent problems later try: __salt__['file.remove'](name) except (IOError, OSError): return _error( ret, 'Failed to delete "{0}" in preparation for ' 'forced move'.format(name) ) if __opts__['test']: ret['comment'] = 'File "{0}" is set to be moved to "{1}"'.format( source, name ) ret['result'] = None return ret # Run makedirs dname = os.path.dirname(name) if not os.path.isdir(dname): if makedirs: __salt__['file.makedirs'](name) else: return _error( ret, 'The target directory {0} is not present'.format(dname)) # All tests pass, move the file into place try: if os.path.islink(source): linkto = os.readlink(source) os.symlink(linkto, name) os.unlink(source) else: shutil.move(source, name) except (IOError, OSError): return _error( ret, 'Failed to move "{0}" to "{1}"'.format(source, name)) ret['comment'] = 'Moved "{0}" to "{1}"'.format(source, name) ret['changes'] = {name: source} return ret def accumulated(name, filename, text, **kwargs): ''' Prepare accumulator which can be used in template in file.managed state. Accumulator dictionary becomes available in template. It can also be used in file.blockreplace. name Accumulator name filename Filename which would receive this accumulator (see file.managed state documentation about ``name``) text String or list for adding in accumulator require_in / watch_in One of them required for sure we fill up accumulator before we manage the file. Probably the same as filename Example: Given the following: .. code-block:: yaml animals_doing_things: file.accumulated: - filename: /tmp/animal_file.txt - text: ' jumps over the lazy dog.' - require_in: - file: animal_file animal_file: file.managed: - name: /tmp/animal_file.txt - source: salt://animal_file.txt - template: jinja One might write a template for ``animal_file.txt`` like the following: .. code-block:: jinja The quick brown fox{% for animal in accumulator['animals_doing_things'] %}{{ animal }}{% endfor %} Collectively, the above states and template file will produce: .. code-block:: text The quick brown fox jumps over the lazy dog. Multiple accumulators can be "chained" together. .. note:: The 'accumulator' data structure is a Python dictionary. Do not expect any loop over the keys in a deterministic order! ''' ret = { 'name': name, 'changes': {}, 'result': True, 'comment': '' } if not name: return _error(ret, 'Must provide name to file.accumulated') if text is None: ret['result'] = False ret['comment'] = 'No text supplied for accumulator' return ret require_in = __low__.get('require_in', []) watch_in = __low__.get('watch_in', []) deps = require_in + watch_in if not [x for x in deps if 'file' in x]: ret['result'] = False ret['comment'] = 'Orphaned accumulator {0} in {1}:{2}'.format( name, __low__['__sls__'], __low__['__id__'] ) return ret if isinstance(text, six.string_types): text = (text,) elif isinstance(text, dict): text = (text,) accum_data, accum_deps = _load_accumulators() if filename not in accum_data: accum_data[filename] = {} if filename not in accum_deps: accum_deps[filename] = {} if name not in accum_deps[filename]: accum_deps[filename][name] = [] for accumulator in deps: accum_deps[filename][name].extend(six.itervalues(accumulator)) if name not in accum_data[filename]: accum_data[filename][name] = [] for chunk in text: if chunk not in accum_data[filename][name]: accum_data[filename][name].append(chunk) ret['comment'] = ('Accumulator {0} for file {1} ' 'was charged by text'.format(name, filename)) _persist_accummulators(accum_data, accum_deps) return ret def serialize(name, dataset=None, dataset_pillar=None, user=None, group=None, mode=None, backup='', makedirs=False, show_diff=None, show_changes=True, create=True, merge_if_exists=False, **kwargs): ''' Serializes dataset and store it into managed file. Useful for sharing simple configuration files. name The location of the file to create dataset The dataset that will be serialized dataset_pillar Operates like ``dataset``, but draws from a value stored in pillar, using the pillar path syntax used in :mod:`pillar.get <salt.modules.pillar.get>`. This is useful when the pillar value contains newlines, as referencing a pillar variable using a jinja/mako template can result in YAML formatting issues due to the newlines causing indentation mismatches. .. versionadded:: 2015.8.0 formatter Write the data as this format. Supported output formats: * JSON * YAML * Python (via pprint.pformat) user The user to own the directory, this defaults to the user salt is running as on the minion group The group ownership set for the directory, this defaults to the group salt is running as on the minion mode The permissions to set on this file, e.g. ``644``, ``0775``, or ``4664``. The default mode for new files and directories corresponds umask of salt process. For existing files and directories it's not enforced. .. note:: This option is **not** supported on Windows. backup Overrides the default backup mode for this specific file. makedirs Create parent directories for destination file. .. versionadded:: 2014.1.3 show_diff DEPRECATED: Please use show_changes. If set to ``False``, the diff will not be shown in the return data if changes are made. show_changes Output a unified diff of the old file and the new file. If ``False`` return a boolean if any changes were made. create Default is True, if create is set to False then the file will only be managed if the file already exists on the system. merge_if_exists Default is False, if merge_if_exists is True then the existing file will be parsed and the dataset passed in will be merged with the existing content .. versionadded:: 2014.7.0 For example, this state: .. code-block:: yaml /etc/dummy/package.json: file.serialize: - dataset: name: naive description: A package using naive versioning author: A confused individual <iam@confused.com> dependencies: express: >= 1.2.0 optimist: >= 0.1.0 engine: node 0.4.1 - formatter: json will manage the file ``/etc/dummy/package.json``: .. code-block:: json { "author": "A confused individual <iam@confused.com>", "dependencies": { "express": ">= 1.2.0", "optimist": ">= 0.1.0" }, "description": "A package using naive versioning", "engine": "node 0.4.1", "name": "naive" } ''' if 'env' in kwargs: salt.utils.warn_until( 'Oxygen', 'Parameter \'env\' has been detected in the argument list. This ' 'parameter is no longer used and has been replaced by \'saltenv\' ' 'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.' ) kwargs.pop('env') name = os.path.expanduser(name) default_serializer_opts = {'yaml.serialize': {'default_flow_style': False}, 'json.serialize': {'indent': 2, 'separators': (',', ': '), 'sort_keys': True} } ret = {'changes': {}, 'comment': '', 'name': name, 'result': True} if not name: return _error(ret, 'Must provide name to file.serialize') if not create: if not os.path.isfile(name): # Don't create a file that is not already present ret['comment'] = ('File {0} is not present and is not set for ' 'creation').format(name) return ret formatter = kwargs.pop('formatter', 'yaml').lower() if len([x for x in (dataset, dataset_pillar) if x]) > 1: return _error( ret, 'Only one of \'dataset\' and \'dataset_pillar\' is permitted') if dataset_pillar: dataset = __salt__['pillar.get'](dataset_pillar) if dataset is None: return _error( ret, 'Neither \'dataset\' nor \'dataset_pillar\' was defined') if salt.utils.is_windows(): if group is not None: log.warning( 'The group argument for {0} has been ignored as this ' 'is a Windows system.'.format(name) ) group = user serializer_name = '{0}.serialize'.format(formatter) deserializer_name = '{0}.deserialize'.format(formatter) if serializer_name not in __serializers__: return {'changes': {}, 'comment': '{0} format is not supported'.format( formatter.capitalize()), 'name': name, 'result': False } if merge_if_exists: if os.path.isfile(name): if '{0}.deserialize'.format(formatter) not in __serializers__: return {'changes': {}, 'comment': ('{0} format is not supported for merging' .format(formatter.capitalize())), 'name': name, 'result': False} with salt.utils.fopen(name, 'r') as fhr: existing_data = __serializers__[deserializer_name](fhr) if existing_data is not None: merged_data = salt.utils.dictupdate.merge_recurse(existing_data, dataset) if existing_data == merged_data: ret['result'] = True ret['comment'] = 'The file {0} is in the correct state'.format(name) return ret dataset = merged_data contents = __serializers__[serializer_name](dataset, **default_serializer_opts.get(serializer_name, {})) contents += '\n' # Make sure that any leading zeros stripped by YAML loader are added back mode = salt.utils.normalize_mode(mode) if show_diff is not None: show_changes = show_diff msg = ( 'The \'show_diff\' argument to the file.serialized state has been ' 'deprecated, please use \'show_changes\' instead.' ) salt.utils.warn_until('Oxygen', msg) if __opts__['test']: ret['changes'] = __salt__['file.check_managed_changes']( name=name, source=None, source_hash={}, source_hash_name=None, user=user, group=group, mode=mode, template=None, context=None, defaults=None, saltenv=__env__, contents=contents, skip_verify=False, **kwargs ) if ret['changes']: ret['result'] = None ret['comment'] = 'Dataset will be serialized and stored into {0}'.format( name) if not show_changes: ret['changes']['diff'] = '<show_changes=False>' else: ret['result'] = True ret['comment'] = 'The file {0} is in the correct state'.format(name) return ret return __salt__['file.manage_file'](name=name, sfn='', ret=ret, source=None, source_sum={}, user=user, group=group, mode=mode, saltenv=__env__, backup=backup, makedirs=makedirs, template=None, show_changes=show_changes, contents=contents) def mknod(name, ntype, major=0, minor=0, user=None, group=None, mode='0600'): ''' Create a special file similar to the 'nix mknod command. The supported device types are ``p`` (fifo pipe), ``c`` (character device), and ``b`` (block device). Provide the major and minor numbers when specifying a character device or block device. A fifo pipe does not require this information. The command will create the necessary dirs if needed. If a file of the same name not of the same type/major/minor exists, it will not be overwritten or unlinked (deleted). This is logically in place as a safety measure because you can really shoot yourself in the foot here and it is the behavior of 'nix ``mknod``. It is also important to note that not just anyone can create special devices. Usually this is only done as root. If the state is executed as none other than root on a minion, you may receive a permission error. name name of the file ntype node type 'p' (fifo pipe), 'c' (character device), or 'b' (block device) major major number of the device does not apply to a fifo pipe minor minor number of the device does not apply to a fifo pipe user owning user of the device/pipe group owning group of the device/pipe mode permissions on the device/pipe Usage: .. code-block:: yaml /dev/chr: file.mknod: - ntype: c - major: 180 - minor: 31 - user: root - group: root - mode: 660 /dev/blk: file.mknod: - ntype: b - major: 8 - minor: 999 - user: root - group: root - mode: 660 /dev/fifo: file.mknod: - ntype: p - user: root - group: root - mode: 660 .. versionadded:: 0.17.0 ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} if not name: return _error(ret, 'Must provide name to file.mknod') if ntype == 'c': # Check for file existence if __salt__['file.file_exists'](name): ret['comment'] = ( 'File exists and is not a character device {0}. Cowardly ' 'refusing to continue'.format(name) ) # Check if it is a character device elif not __salt__['file.is_chrdev'](name): if __opts__['test']: ret['comment'] = ( 'Character device {0} is set to be created' ).format(name) ret['result'] = None else: ret = __salt__['file.mknod'](name, ntype, major, minor, user, group, mode) # Check the major/minor else: devmaj, devmin = __salt__['file.get_devmm'](name) if (major, minor) != (devmaj, devmin): ret['comment'] = ( 'Character device {0} exists and has a different ' 'major/minor {1}/{2}. Cowardly refusing to continue' .format(name, devmaj, devmin) ) # Check the perms else: ret = __salt__['file.check_perms'](name, None, user, group, mode)[0] if not ret['changes']: ret['comment'] = ( 'Character device {0} is in the correct state'.format( name ) ) elif ntype == 'b': # Check for file existence if __salt__['file.file_exists'](name): ret['comment'] = ( 'File exists and is not a block device {0}. Cowardly ' 'refusing to continue'.format(name) ) # Check if it is a block device elif not __salt__['file.is_blkdev'](name): if __opts__['test']: ret['comment'] = ( 'Block device {0} is set to be created' ).format(name) ret['result'] = None else: ret = __salt__['file.mknod'](name, ntype, major, minor, user, group, mode) # Check the major/minor else: devmaj, devmin = __salt__['file.get_devmm'](name) if (major, minor) != (devmaj, devmin): ret['comment'] = ( 'Block device {0} exists and has a different major/minor ' '{1}/{2}. Cowardly refusing to continue'.format( name, devmaj, devmin ) ) # Check the perms else: ret = __salt__['file.check_perms'](name, None, user, group, mode)[0] if not ret['changes']: ret['comment'] = ( 'Block device {0} is in the correct state'.format(name) ) elif ntype == 'p': # Check for file existence if __salt__['file.file_exists'](name): ret['comment'] = ( 'File exists and is not a fifo pipe {0}. Cowardly refusing ' 'to continue'.format(name) ) # Check if it is a fifo elif not __salt__['file.is_fifo'](name): if __opts__['test']: ret['comment'] = 'Fifo pipe {0} is set to be created'.format( name ) ret['result'] = None else: ret = __salt__['file.mknod'](name, ntype, major, minor, user, group, mode) # Check the perms else: ret = __salt__['file.check_perms'](name, None, user, group, mode)[0] if not ret['changes']: ret['comment'] = ( 'Fifo pipe {0} is in the correct state'.format(name) ) else: ret['comment'] = ( 'Node type unavailable: \'{0}\'. Available node types are ' 'character (\'c\'), block (\'b\'), and pipe (\'p\')'.format(ntype) ) return ret def mod_run_check_cmd(cmd, filename, **check_cmd_opts): ''' Execute the check_cmd logic. Return a result dict if ``check_cmd`` succeeds (check_cmd == 0) otherwise return True ''' log.debug('running our check_cmd') _cmd = '{0} {1}'.format(cmd, filename) cret = __salt__['cmd.run_all'](_cmd, **check_cmd_opts) if cret['retcode'] != 0: ret = {'comment': 'check_cmd execution failed', 'skip_watch': True, 'result': False} if cret.get('stdout'): ret['comment'] += '\n' + cret['stdout'] if cret.get('stderr'): ret['comment'] += '\n' + cret['stderr'] return ret # No reason to stop, return True return True def decode(name, encoded_data=None, contents_pillar=None, encoding_type='base64', checksum='md5'): ''' Decode an encoded file and write it to disk .. versionadded:: 2016.3.0 name Path of the file to be written. encoded_data The encoded file. Either this option or ``contents_pillar`` must be specified. contents_pillar A Pillar path to the encoded file. Uses the same path syntax as :py:func:`pillar.get <salt.modules.pillar.get>`. The :py:func:`hashutil.base64_encodefile <salt.modules.hashutil.base64_encodefile>` function can load encoded content into Pillar. Either this option or ``encoded_data`` must be specified. encoding_type : ``base64`` The type of encoding. checksum : ``md5`` The hashing algorithm to use to generate checksums. Wraps the :py:func:`hashutil.digest <salt.modules.hashutil.digest>` execution function. Usage: .. code-block:: yaml write_base64_encoded_string_to_a_file: file.decode: - name: /tmp/new_file - encoding_type: base64 - contents_pillar: mypillar:thefile # or write_base64_encoded_string_to_a_file: file.decode: - name: /tmp/new_file - encoding_type: base64 - encoded_data: | Z2V0IHNhbHRlZAo= Be careful with multi-line strings that the YAML indentation is correct. E.g., .. code-block:: yaml write_base64_encoded_string_to_a_file: file.decode: - name: /tmp/new_file - encoding_type: base64 - encoded_data: | {{ salt.pillar.get('path:to:data') | indent(8) }} ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if not (encoded_data or contents_pillar): raise CommandExecutionError("Specify either the 'encoded_data' or " "'contents_pillar' argument.") elif encoded_data and contents_pillar: raise CommandExecutionError("Specify only one 'encoded_data' or " "'contents_pillar' argument.") elif encoded_data: content = encoded_data elif contents_pillar: content = __salt__['pillar.get'](contents_pillar, False) if content is False: raise CommandExecutionError('Pillar data not found.') else: raise CommandExecutionError('No contents given.') dest_exists = __salt__['file.file_exists'](name) if dest_exists: instr = __salt__['hashutil.base64_decodestring'](content) insum = __salt__['hashutil.digest'](instr, checksum) del instr # no need to keep in-memory after we have the hash outsum = __salt__['hashutil.digest_file'](name, checksum) if insum != outsum: ret['changes'] = { 'old': outsum, 'new': insum, } if not ret['changes']: ret['comment'] = 'File is in the correct state.' ret['result'] = True return ret if __opts__['test'] is True: ret['comment'] = 'File is set to be updated.' ret['result'] = None return ret ret['result'] = __salt__['hashutil.base64_decodefile'](content, name) ret['comment'] = 'File was updated.' if not ret['changes']: ret['changes'] = { 'old': None, 'new': __salt__['hashutil.digest_file'](name, checksum), } return ret
./CrossVul/dataset_final_sorted/CWE-200/py/bad_3325_3