repo stringlengths 7 55 | path stringlengths 4 223 | func_name stringlengths 1 134 | original_string stringlengths 75 104k | language stringclasses 1 value | code stringlengths 75 104k | code_tokens listlengths 19 28.4k | docstring stringlengths 1 46.9k | docstring_tokens listlengths 1 1.97k | sha stringlengths 40 40 | url stringlengths 87 315 | partition stringclasses 1 value |
|---|---|---|---|---|---|---|---|---|---|---|---|
saltstack/salt | salt/utils/gitfs.py | GitProvider.get_url | def get_url(self):
'''
Examine self.id and assign self.url (and self.branch, for git_pillar)
'''
if self.role in ('git_pillar', 'winrepo'):
# With winrepo and git_pillar, the remote is specified in the
# format '<branch> <url>', so that we can get a unique identifier
# to hash for each remote.
try:
self.branch, self.url = self.id.split(None, 1)
except ValueError:
self.branch = self.conf['branch']
self.url = self.id
else:
self.url = self.id | python | def get_url(self):
'''
Examine self.id and assign self.url (and self.branch, for git_pillar)
'''
if self.role in ('git_pillar', 'winrepo'):
# With winrepo and git_pillar, the remote is specified in the
# format '<branch> <url>', so that we can get a unique identifier
# to hash for each remote.
try:
self.branch, self.url = self.id.split(None, 1)
except ValueError:
self.branch = self.conf['branch']
self.url = self.id
else:
self.url = self.id | [
"def",
"get_url",
"(",
"self",
")",
":",
"if",
"self",
".",
"role",
"in",
"(",
"'git_pillar'",
",",
"'winrepo'",
")",
":",
"# With winrepo and git_pillar, the remote is specified in the",
"# format '<branch> <url>', so that we can get a unique identifier",
"# to hash for each r... | Examine self.id and assign self.url (and self.branch, for git_pillar) | [
"Examine",
"self",
".",
"id",
"and",
"assign",
"self",
".",
"url",
"(",
"and",
"self",
".",
"branch",
"for",
"git_pillar",
")"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1047-L1061 | train |
saltstack/salt | salt/utils/gitfs.py | GitProvider.linkdir_walk | def linkdir_walk(self):
'''
Return the expected result of an os.walk on the linkdir, based on the
mountpoint value.
'''
try:
# Use cached linkdir_walk if we've already run this
return self._linkdir_walk
except AttributeError:
self._linkdir_walk = []
try:
parts = self._mountpoint.split('/')
except AttributeError:
log.error(
'%s class is missing a \'_mountpoint\' attribute',
self.__class__.__name__
)
else:
for idx, item in enumerate(parts[:-1]):
try:
dirs = [parts[idx + 1]]
except IndexError:
dirs = []
self._linkdir_walk.append((
salt.utils.path.join(self.linkdir, *parts[:idx + 1]),
dirs,
[]
))
try:
# The linkdir itself goes at the beginning
self._linkdir_walk.insert(0, (self.linkdir, [parts[0]], []))
except IndexError:
pass
return self._linkdir_walk | python | def linkdir_walk(self):
'''
Return the expected result of an os.walk on the linkdir, based on the
mountpoint value.
'''
try:
# Use cached linkdir_walk if we've already run this
return self._linkdir_walk
except AttributeError:
self._linkdir_walk = []
try:
parts = self._mountpoint.split('/')
except AttributeError:
log.error(
'%s class is missing a \'_mountpoint\' attribute',
self.__class__.__name__
)
else:
for idx, item in enumerate(parts[:-1]):
try:
dirs = [parts[idx + 1]]
except IndexError:
dirs = []
self._linkdir_walk.append((
salt.utils.path.join(self.linkdir, *parts[:idx + 1]),
dirs,
[]
))
try:
# The linkdir itself goes at the beginning
self._linkdir_walk.insert(0, (self.linkdir, [parts[0]], []))
except IndexError:
pass
return self._linkdir_walk | [
"def",
"linkdir_walk",
"(",
"self",
")",
":",
"try",
":",
"# Use cached linkdir_walk if we've already run this",
"return",
"self",
".",
"_linkdir_walk",
"except",
"AttributeError",
":",
"self",
".",
"_linkdir_walk",
"=",
"[",
"]",
"try",
":",
"parts",
"=",
"self",... | Return the expected result of an os.walk on the linkdir, based on the
mountpoint value. | [
"Return",
"the",
"expected",
"result",
"of",
"an",
"os",
".",
"walk",
"on",
"the",
"linkdir",
"based",
"on",
"the",
"mountpoint",
"value",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1064-L1097 | train |
saltstack/salt | salt/utils/gitfs.py | GitPython.checkout | def checkout(self):
'''
Checkout the configured branch/tag. We catch an "Exception" class here
instead of a specific exception class because the exceptions raised by
GitPython when running these functions vary in different versions of
GitPython.
'''
tgt_ref = self.get_checkout_target()
try:
head_sha = self.repo.rev_parse('HEAD').hexsha
except Exception:
# Should only happen the first time we are checking out, since
# we fetch first before ever checking anything out.
head_sha = None
# 'origin/' + tgt_ref ==> matches a branch head
# 'tags/' + tgt_ref + '@{commit}' ==> matches tag's commit
for rev_parse_target, checkout_ref in (
('origin/' + tgt_ref, 'origin/' + tgt_ref),
('tags/' + tgt_ref, 'tags/' + tgt_ref)):
try:
target_sha = self.repo.rev_parse(rev_parse_target).hexsha
except Exception:
# ref does not exist
continue
else:
if head_sha == target_sha:
# No need to checkout, we're already up-to-date
return self.check_root()
try:
with self.gen_lock(lock_type='checkout'):
self.repo.git.checkout(checkout_ref)
log.debug(
'%s remote \'%s\' has been checked out to %s',
self.role,
self.id,
checkout_ref
)
except GitLockError as exc:
if exc.errno == errno.EEXIST:
# Re-raise with a different strerror containing a
# more meaningful error message for the calling
# function.
raise GitLockError(
exc.errno,
'Checkout lock exists for {0} remote \'{1}\''
.format(self.role, self.id)
)
else:
log.error(
'Error %d encountered obtaining checkout lock '
'for %s remote \'%s\'',
exc.errno,
self.role,
self.id
)
return None
except Exception:
continue
return self.check_root()
log.error(
'Failed to checkout %s from %s remote \'%s\': remote ref does '
'not exist', tgt_ref, self.role, self.id
)
return None | python | def checkout(self):
'''
Checkout the configured branch/tag. We catch an "Exception" class here
instead of a specific exception class because the exceptions raised by
GitPython when running these functions vary in different versions of
GitPython.
'''
tgt_ref = self.get_checkout_target()
try:
head_sha = self.repo.rev_parse('HEAD').hexsha
except Exception:
# Should only happen the first time we are checking out, since
# we fetch first before ever checking anything out.
head_sha = None
# 'origin/' + tgt_ref ==> matches a branch head
# 'tags/' + tgt_ref + '@{commit}' ==> matches tag's commit
for rev_parse_target, checkout_ref in (
('origin/' + tgt_ref, 'origin/' + tgt_ref),
('tags/' + tgt_ref, 'tags/' + tgt_ref)):
try:
target_sha = self.repo.rev_parse(rev_parse_target).hexsha
except Exception:
# ref does not exist
continue
else:
if head_sha == target_sha:
# No need to checkout, we're already up-to-date
return self.check_root()
try:
with self.gen_lock(lock_type='checkout'):
self.repo.git.checkout(checkout_ref)
log.debug(
'%s remote \'%s\' has been checked out to %s',
self.role,
self.id,
checkout_ref
)
except GitLockError as exc:
if exc.errno == errno.EEXIST:
# Re-raise with a different strerror containing a
# more meaningful error message for the calling
# function.
raise GitLockError(
exc.errno,
'Checkout lock exists for {0} remote \'{1}\''
.format(self.role, self.id)
)
else:
log.error(
'Error %d encountered obtaining checkout lock '
'for %s remote \'%s\'',
exc.errno,
self.role,
self.id
)
return None
except Exception:
continue
return self.check_root()
log.error(
'Failed to checkout %s from %s remote \'%s\': remote ref does '
'not exist', tgt_ref, self.role, self.id
)
return None | [
"def",
"checkout",
"(",
"self",
")",
":",
"tgt_ref",
"=",
"self",
".",
"get_checkout_target",
"(",
")",
"try",
":",
"head_sha",
"=",
"self",
".",
"repo",
".",
"rev_parse",
"(",
"'HEAD'",
")",
".",
"hexsha",
"except",
"Exception",
":",
"# Should only happen... | Checkout the configured branch/tag. We catch an "Exception" class here
instead of a specific exception class because the exceptions raised by
GitPython when running these functions vary in different versions of
GitPython. | [
"Checkout",
"the",
"configured",
"branch",
"/",
"tag",
".",
"We",
"catch",
"an",
"Exception",
"class",
"here",
"instead",
"of",
"a",
"specific",
"exception",
"class",
"because",
"the",
"exceptions",
"raised",
"by",
"GitPython",
"when",
"running",
"these",
"fun... | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1131-L1196 | train |
saltstack/salt | salt/utils/gitfs.py | GitPython.init_remote | def init_remote(self):
'''
Initialize/attach to a remote using GitPython. Return a boolean
which will let the calling function know whether or not a new repo was
initialized by this function.
'''
new = False
if not os.listdir(self.cachedir):
# Repo cachedir is empty, initialize a new repo there
self.repo = git.Repo.init(self.cachedir)
new = True
else:
# Repo cachedir exists, try to attach
try:
self.repo = git.Repo(self.cachedir)
except git.exc.InvalidGitRepositoryError:
log.error(_INVALID_REPO, self.cachedir, self.url, self.role)
return new
self.gitdir = salt.utils.path.join(self.repo.working_dir, '.git')
self.enforce_git_config()
return new | python | def init_remote(self):
'''
Initialize/attach to a remote using GitPython. Return a boolean
which will let the calling function know whether or not a new repo was
initialized by this function.
'''
new = False
if not os.listdir(self.cachedir):
# Repo cachedir is empty, initialize a new repo there
self.repo = git.Repo.init(self.cachedir)
new = True
else:
# Repo cachedir exists, try to attach
try:
self.repo = git.Repo(self.cachedir)
except git.exc.InvalidGitRepositoryError:
log.error(_INVALID_REPO, self.cachedir, self.url, self.role)
return new
self.gitdir = salt.utils.path.join(self.repo.working_dir, '.git')
self.enforce_git_config()
return new | [
"def",
"init_remote",
"(",
"self",
")",
":",
"new",
"=",
"False",
"if",
"not",
"os",
".",
"listdir",
"(",
"self",
".",
"cachedir",
")",
":",
"# Repo cachedir is empty, initialize a new repo there",
"self",
".",
"repo",
"=",
"git",
".",
"Repo",
".",
"init",
... | Initialize/attach to a remote using GitPython. Return a boolean
which will let the calling function know whether or not a new repo was
initialized by this function. | [
"Initialize",
"/",
"attach",
"to",
"a",
"remote",
"using",
"GitPython",
".",
"Return",
"a",
"boolean",
"which",
"will",
"let",
"the",
"calling",
"function",
"know",
"whether",
"or",
"not",
"a",
"new",
"repo",
"was",
"initialized",
"by",
"this",
"function",
... | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1198-L1220 | train |
saltstack/salt | salt/utils/gitfs.py | GitPython.dir_list | def dir_list(self, tgt_env):
'''
Get list of directories for the target environment using GitPython
'''
ret = set()
tree = self.get_tree(tgt_env)
if not tree:
return ret
if self.root(tgt_env):
try:
tree = tree / self.root(tgt_env)
except KeyError:
return ret
relpath = lambda path: os.path.relpath(path, self.root(tgt_env))
else:
relpath = lambda path: path
add_mountpoint = lambda path: salt.utils.path.join(
self.mountpoint(tgt_env), path, use_posixpath=True)
for blob in tree.traverse():
if isinstance(blob, git.Tree):
ret.add(add_mountpoint(relpath(blob.path)))
if self.mountpoint(tgt_env):
ret.add(self.mountpoint(tgt_env))
return ret | python | def dir_list(self, tgt_env):
'''
Get list of directories for the target environment using GitPython
'''
ret = set()
tree = self.get_tree(tgt_env)
if not tree:
return ret
if self.root(tgt_env):
try:
tree = tree / self.root(tgt_env)
except KeyError:
return ret
relpath = lambda path: os.path.relpath(path, self.root(tgt_env))
else:
relpath = lambda path: path
add_mountpoint = lambda path: salt.utils.path.join(
self.mountpoint(tgt_env), path, use_posixpath=True)
for blob in tree.traverse():
if isinstance(blob, git.Tree):
ret.add(add_mountpoint(relpath(blob.path)))
if self.mountpoint(tgt_env):
ret.add(self.mountpoint(tgt_env))
return ret | [
"def",
"dir_list",
"(",
"self",
",",
"tgt_env",
")",
":",
"ret",
"=",
"set",
"(",
")",
"tree",
"=",
"self",
".",
"get_tree",
"(",
"tgt_env",
")",
"if",
"not",
"tree",
":",
"return",
"ret",
"if",
"self",
".",
"root",
"(",
"tgt_env",
")",
":",
"try... | Get list of directories for the target environment using GitPython | [
"Get",
"list",
"of",
"directories",
"for",
"the",
"target",
"environment",
"using",
"GitPython"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1222-L1245 | train |
saltstack/salt | salt/utils/gitfs.py | GitPython.envs | def envs(self):
'''
Check the refs and return a list of the ones which can be used as salt
environments.
'''
ref_paths = [x.path for x in self.repo.refs]
return self._get_envs_from_ref_paths(ref_paths) | python | def envs(self):
'''
Check the refs and return a list of the ones which can be used as salt
environments.
'''
ref_paths = [x.path for x in self.repo.refs]
return self._get_envs_from_ref_paths(ref_paths) | [
"def",
"envs",
"(",
"self",
")",
":",
"ref_paths",
"=",
"[",
"x",
".",
"path",
"for",
"x",
"in",
"self",
".",
"repo",
".",
"refs",
"]",
"return",
"self",
".",
"_get_envs_from_ref_paths",
"(",
"ref_paths",
")"
] | Check the refs and return a list of the ones which can be used as salt
environments. | [
"Check",
"the",
"refs",
"and",
"return",
"a",
"list",
"of",
"the",
"ones",
"which",
"can",
"be",
"used",
"as",
"salt",
"environments",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1247-L1253 | train |
saltstack/salt | salt/utils/gitfs.py | GitPython._fetch | def _fetch(self):
'''
Fetch the repo. If the local copy was updated, return True. If the
local copy was already up-to-date, return False.
'''
origin = self.repo.remotes[0]
try:
fetch_results = origin.fetch()
except AssertionError:
fetch_results = origin.fetch()
new_objs = False
for fetchinfo in fetch_results:
if fetchinfo.old_commit is not None:
log.debug(
'%s has updated \'%s\' for remote \'%s\' '
'from %s to %s',
self.role,
fetchinfo.name,
self.id,
fetchinfo.old_commit.hexsha[:7],
fetchinfo.commit.hexsha[:7]
)
new_objs = True
elif fetchinfo.flags in (fetchinfo.NEW_TAG,
fetchinfo.NEW_HEAD):
log.debug(
'%s has fetched new %s \'%s\' for remote \'%s\'',
self.role,
'tag' if fetchinfo.flags == fetchinfo.NEW_TAG else 'head',
fetchinfo.name,
self.id
)
new_objs = True
cleaned = self.clean_stale_refs()
return True if (new_objs or cleaned) else None | python | def _fetch(self):
'''
Fetch the repo. If the local copy was updated, return True. If the
local copy was already up-to-date, return False.
'''
origin = self.repo.remotes[0]
try:
fetch_results = origin.fetch()
except AssertionError:
fetch_results = origin.fetch()
new_objs = False
for fetchinfo in fetch_results:
if fetchinfo.old_commit is not None:
log.debug(
'%s has updated \'%s\' for remote \'%s\' '
'from %s to %s',
self.role,
fetchinfo.name,
self.id,
fetchinfo.old_commit.hexsha[:7],
fetchinfo.commit.hexsha[:7]
)
new_objs = True
elif fetchinfo.flags in (fetchinfo.NEW_TAG,
fetchinfo.NEW_HEAD):
log.debug(
'%s has fetched new %s \'%s\' for remote \'%s\'',
self.role,
'tag' if fetchinfo.flags == fetchinfo.NEW_TAG else 'head',
fetchinfo.name,
self.id
)
new_objs = True
cleaned = self.clean_stale_refs()
return True if (new_objs or cleaned) else None | [
"def",
"_fetch",
"(",
"self",
")",
":",
"origin",
"=",
"self",
".",
"repo",
".",
"remotes",
"[",
"0",
"]",
"try",
":",
"fetch_results",
"=",
"origin",
".",
"fetch",
"(",
")",
"except",
"AssertionError",
":",
"fetch_results",
"=",
"origin",
".",
"fetch"... | Fetch the repo. If the local copy was updated, return True. If the
local copy was already up-to-date, return False. | [
"Fetch",
"the",
"repo",
".",
"If",
"the",
"local",
"copy",
"was",
"updated",
"return",
"True",
".",
"If",
"the",
"local",
"copy",
"was",
"already",
"up",
"-",
"to",
"-",
"date",
"return",
"False",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1255-L1291 | train |
saltstack/salt | salt/utils/gitfs.py | GitPython.file_list | def file_list(self, tgt_env):
'''
Get file list for the target environment using GitPython
'''
files = set()
symlinks = {}
tree = self.get_tree(tgt_env)
if not tree:
# Not found, return empty objects
return files, symlinks
if self.root(tgt_env):
try:
tree = tree / self.root(tgt_env)
except KeyError:
return files, symlinks
relpath = lambda path: os.path.relpath(path, self.root(tgt_env))
else:
relpath = lambda path: path
add_mountpoint = lambda path: salt.utils.path.join(
self.mountpoint(tgt_env), path, use_posixpath=True)
for file_blob in tree.traverse():
if not isinstance(file_blob, git.Blob):
continue
file_path = add_mountpoint(relpath(file_blob.path))
files.add(file_path)
if stat.S_ISLNK(file_blob.mode):
stream = six.StringIO()
file_blob.stream_data(stream)
stream.seek(0)
link_tgt = stream.read()
stream.close()
symlinks[file_path] = link_tgt
return files, symlinks | python | def file_list(self, tgt_env):
'''
Get file list for the target environment using GitPython
'''
files = set()
symlinks = {}
tree = self.get_tree(tgt_env)
if not tree:
# Not found, return empty objects
return files, symlinks
if self.root(tgt_env):
try:
tree = tree / self.root(tgt_env)
except KeyError:
return files, symlinks
relpath = lambda path: os.path.relpath(path, self.root(tgt_env))
else:
relpath = lambda path: path
add_mountpoint = lambda path: salt.utils.path.join(
self.mountpoint(tgt_env), path, use_posixpath=True)
for file_blob in tree.traverse():
if not isinstance(file_blob, git.Blob):
continue
file_path = add_mountpoint(relpath(file_blob.path))
files.add(file_path)
if stat.S_ISLNK(file_blob.mode):
stream = six.StringIO()
file_blob.stream_data(stream)
stream.seek(0)
link_tgt = stream.read()
stream.close()
symlinks[file_path] = link_tgt
return files, symlinks | [
"def",
"file_list",
"(",
"self",
",",
"tgt_env",
")",
":",
"files",
"=",
"set",
"(",
")",
"symlinks",
"=",
"{",
"}",
"tree",
"=",
"self",
".",
"get_tree",
"(",
"tgt_env",
")",
"if",
"not",
"tree",
":",
"# Not found, return empty objects",
"return",
"file... | Get file list for the target environment using GitPython | [
"Get",
"file",
"list",
"for",
"the",
"target",
"environment",
"using",
"GitPython"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1293-L1325 | train |
saltstack/salt | salt/utils/gitfs.py | GitPython.find_file | def find_file(self, path, tgt_env):
'''
Find the specified file in the specified environment
'''
tree = self.get_tree(tgt_env)
if not tree:
# Branch/tag/SHA not found in repo
return None, None, None
blob = None
depth = 0
while True:
depth += 1
if depth > SYMLINK_RECURSE_DEPTH:
blob = None
break
try:
file_blob = tree / path
if stat.S_ISLNK(file_blob.mode):
# Path is a symlink. The blob data corresponding to
# this path's object ID will be the target of the
# symlink. Follow the symlink and set path to the
# location indicated in the blob data.
stream = six.StringIO()
file_blob.stream_data(stream)
stream.seek(0)
link_tgt = stream.read()
stream.close()
path = salt.utils.path.join(
os.path.dirname(path), link_tgt, use_posixpath=True)
else:
blob = file_blob
if isinstance(blob, git.Tree):
# Path is a directory, not a file.
blob = None
break
except KeyError:
# File not found or repo_path points to a directory
blob = None
break
if isinstance(blob, git.Blob):
return blob, blob.hexsha, blob.mode
return None, None, None | python | def find_file(self, path, tgt_env):
'''
Find the specified file in the specified environment
'''
tree = self.get_tree(tgt_env)
if not tree:
# Branch/tag/SHA not found in repo
return None, None, None
blob = None
depth = 0
while True:
depth += 1
if depth > SYMLINK_RECURSE_DEPTH:
blob = None
break
try:
file_blob = tree / path
if stat.S_ISLNK(file_blob.mode):
# Path is a symlink. The blob data corresponding to
# this path's object ID will be the target of the
# symlink. Follow the symlink and set path to the
# location indicated in the blob data.
stream = six.StringIO()
file_blob.stream_data(stream)
stream.seek(0)
link_tgt = stream.read()
stream.close()
path = salt.utils.path.join(
os.path.dirname(path), link_tgt, use_posixpath=True)
else:
blob = file_blob
if isinstance(blob, git.Tree):
# Path is a directory, not a file.
blob = None
break
except KeyError:
# File not found or repo_path points to a directory
blob = None
break
if isinstance(blob, git.Blob):
return blob, blob.hexsha, blob.mode
return None, None, None | [
"def",
"find_file",
"(",
"self",
",",
"path",
",",
"tgt_env",
")",
":",
"tree",
"=",
"self",
".",
"get_tree",
"(",
"tgt_env",
")",
"if",
"not",
"tree",
":",
"# Branch/tag/SHA not found in repo",
"return",
"None",
",",
"None",
",",
"None",
"blob",
"=",
"N... | Find the specified file in the specified environment | [
"Find",
"the",
"specified",
"file",
"in",
"the",
"specified",
"environment"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1327-L1368 | train |
saltstack/salt | salt/utils/gitfs.py | GitPython.get_tree_from_branch | def get_tree_from_branch(self, ref):
'''
Return a git.Tree object matching a head ref fetched into
refs/remotes/origin/
'''
try:
return git.RemoteReference(
self.repo,
'refs/remotes/origin/{0}'.format(ref)).commit.tree
except ValueError:
return None | python | def get_tree_from_branch(self, ref):
'''
Return a git.Tree object matching a head ref fetched into
refs/remotes/origin/
'''
try:
return git.RemoteReference(
self.repo,
'refs/remotes/origin/{0}'.format(ref)).commit.tree
except ValueError:
return None | [
"def",
"get_tree_from_branch",
"(",
"self",
",",
"ref",
")",
":",
"try",
":",
"return",
"git",
".",
"RemoteReference",
"(",
"self",
".",
"repo",
",",
"'refs/remotes/origin/{0}'",
".",
"format",
"(",
"ref",
")",
")",
".",
"commit",
".",
"tree",
"except",
... | Return a git.Tree object matching a head ref fetched into
refs/remotes/origin/ | [
"Return",
"a",
"git",
".",
"Tree",
"object",
"matching",
"a",
"head",
"ref",
"fetched",
"into",
"refs",
"/",
"remotes",
"/",
"origin",
"/"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1370-L1380 | train |
saltstack/salt | salt/utils/gitfs.py | GitPython.get_tree_from_tag | def get_tree_from_tag(self, ref):
'''
Return a git.Tree object matching a tag ref fetched into refs/tags/
'''
try:
return git.TagReference(
self.repo,
'refs/tags/{0}'.format(ref)).commit.tree
except ValueError:
return None | python | def get_tree_from_tag(self, ref):
'''
Return a git.Tree object matching a tag ref fetched into refs/tags/
'''
try:
return git.TagReference(
self.repo,
'refs/tags/{0}'.format(ref)).commit.tree
except ValueError:
return None | [
"def",
"get_tree_from_tag",
"(",
"self",
",",
"ref",
")",
":",
"try",
":",
"return",
"git",
".",
"TagReference",
"(",
"self",
".",
"repo",
",",
"'refs/tags/{0}'",
".",
"format",
"(",
"ref",
")",
")",
".",
"commit",
".",
"tree",
"except",
"ValueError",
... | Return a git.Tree object matching a tag ref fetched into refs/tags/ | [
"Return",
"a",
"git",
".",
"Tree",
"object",
"matching",
"a",
"tag",
"ref",
"fetched",
"into",
"refs",
"/",
"tags",
"/"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1382-L1391 | train |
saltstack/salt | salt/utils/gitfs.py | GitPython.get_tree_from_sha | def get_tree_from_sha(self, ref):
'''
Return a git.Tree object matching a SHA
'''
try:
return self.repo.rev_parse(ref).tree
except (gitdb.exc.ODBError, AttributeError):
return None | python | def get_tree_from_sha(self, ref):
'''
Return a git.Tree object matching a SHA
'''
try:
return self.repo.rev_parse(ref).tree
except (gitdb.exc.ODBError, AttributeError):
return None | [
"def",
"get_tree_from_sha",
"(",
"self",
",",
"ref",
")",
":",
"try",
":",
"return",
"self",
".",
"repo",
".",
"rev_parse",
"(",
"ref",
")",
".",
"tree",
"except",
"(",
"gitdb",
".",
"exc",
".",
"ODBError",
",",
"AttributeError",
")",
":",
"return",
... | Return a git.Tree object matching a SHA | [
"Return",
"a",
"git",
".",
"Tree",
"object",
"matching",
"a",
"SHA"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1393-L1400 | train |
saltstack/salt | salt/utils/gitfs.py | GitPython.write_file | def write_file(self, blob, dest):
'''
Using the blob object, write the file to the destination path
'''
with salt.utils.files.fopen(dest, 'wb+') as fp_:
blob.stream_data(fp_) | python | def write_file(self, blob, dest):
'''
Using the blob object, write the file to the destination path
'''
with salt.utils.files.fopen(dest, 'wb+') as fp_:
blob.stream_data(fp_) | [
"def",
"write_file",
"(",
"self",
",",
"blob",
",",
"dest",
")",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"dest",
",",
"'wb+'",
")",
"as",
"fp_",
":",
"blob",
".",
"stream_data",
"(",
"fp_",
")"
] | Using the blob object, write the file to the destination path | [
"Using",
"the",
"blob",
"object",
"write",
"the",
"file",
"to",
"the",
"destination",
"path"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1402-L1407 | train |
saltstack/salt | salt/utils/gitfs.py | Pygit2.checkout | def checkout(self):
'''
Checkout the configured branch/tag
'''
tgt_ref = self.get_checkout_target()
local_ref = 'refs/heads/' + tgt_ref
remote_ref = 'refs/remotes/origin/' + tgt_ref
tag_ref = 'refs/tags/' + tgt_ref
try:
local_head = self.repo.lookup_reference('HEAD')
except KeyError:
log.warning(
'HEAD not present in %s remote \'%s\'', self.role, self.id
)
return None
try:
head_sha = self.peel(local_head).hex
except AttributeError:
# Shouldn't happen, but just in case a future pygit2 API change
# breaks things, avoid a traceback and log an error.
log.error(
'Unable to get SHA of HEAD for %s remote \'%s\'',
self.role, self.id
)
return None
except KeyError:
head_sha = None
refs = self.repo.listall_references()
def _perform_checkout(checkout_ref, branch=True):
'''
DRY function for checking out either a branch or a tag
'''
try:
with self.gen_lock(lock_type='checkout'):
# Checkout the local branch corresponding to the
# remote ref.
self.repo.checkout(checkout_ref)
if branch:
self.repo.reset(oid, pygit2.GIT_RESET_HARD)
return True
except GitLockError as exc:
if exc.errno == errno.EEXIST:
# Re-raise with a different strerror containing a
# more meaningful error message for the calling
# function.
raise GitLockError(
exc.errno,
'Checkout lock exists for {0} remote \'{1}\''
.format(self.role, self.id)
)
else:
log.error(
'Error %d encountered obtaining checkout lock '
'for %s remote \'%s\'',
exc.errno,
self.role,
self.id
)
return False
try:
if remote_ref in refs:
# Get commit id for the remote ref
oid = self.peel(self.repo.lookup_reference(remote_ref)).id
if local_ref not in refs:
# No local branch for this remote, so create one and point
# it at the commit id of the remote ref
self.repo.create_reference(local_ref, oid)
try:
target_sha = \
self.peel(self.repo.lookup_reference(remote_ref)).hex
except KeyError:
log.error(
'pygit2 was unable to get SHA for %s in %s remote '
'\'%s\'', local_ref, self.role, self.id,
exc_info=True
)
return None
# Only perform a checkout if HEAD and target are not pointing
# at the same SHA1.
if head_sha != target_sha:
# Check existence of the ref in refs/heads/ which
# corresponds to the local HEAD. Checking out local_ref
# below when no local ref for HEAD is missing will raise an
# exception in pygit2 >= 0.21. If this ref is not present,
# create it. The "head_ref != local_ref" check ensures we
# don't try to add this ref if it is not necessary, as it
# would have been added above already. head_ref would be
# the same as local_ref if the branch name was changed but
# the cachedir was not (for example if a "name" parameter
# was used in a git_pillar remote, or if we are using
# winrepo which takes the basename of the repo as the
# cachedir).
head_ref = local_head.target
# If head_ref is not a string, it will point to a
# pygit2.Oid object and we are in detached HEAD mode.
# Therefore, there is no need to add a local reference. If
# head_ref == local_ref, then the local reference for HEAD
# in refs/heads/ already exists and again, no need to add.
if isinstance(head_ref, six.string_types) \
and head_ref not in refs and head_ref != local_ref:
branch_name = head_ref.partition('refs/heads/')[-1]
if not branch_name:
# Shouldn't happen, but log an error if it does
log.error(
'pygit2 was unable to resolve branch name from '
'HEAD ref \'%s\' in %s remote \'%s\'',
head_ref, self.role, self.id
)
return None
remote_head = 'refs/remotes/origin/' + branch_name
if remote_head not in refs:
# No remote ref for HEAD exists. This can happen in
# the first-time git_pillar checkout when when the
# remote repo does not have a master branch. Since
# we need a HEAD reference to keep pygit2 from
# throwing an error, and none exists in
# refs/remotes/origin, we'll just point HEAD at the
# remote_ref.
remote_head = remote_ref
self.repo.create_reference(
head_ref,
self.repo.lookup_reference(remote_head).target
)
if not _perform_checkout(local_ref, branch=True):
return None
# Return the relative root, if present
return self.check_root()
elif tag_ref in refs:
tag_obj = self.repo.revparse_single(tag_ref)
if not isinstance(tag_obj, pygit2.Commit):
log.error(
'%s does not correspond to pygit2.Commit object',
tag_ref
)
else:
try:
# If no AttributeError raised, this is an annotated tag
tag_sha = tag_obj.target.hex
except AttributeError:
try:
tag_sha = tag_obj.hex
except AttributeError:
# Shouldn't happen, but could if a future pygit2
# API change breaks things.
log.error(
'Unable to resolve %s from %s remote \'%s\' '
'to either an annotated or non-annotated tag',
tag_ref, self.role, self.id,
exc_info=True
)
return None
log.debug('SHA of tag %s: %s', tgt_ref, tag_sha)
if head_sha != tag_sha:
if not _perform_checkout(tag_ref, branch=False):
return None
# Return the relative root, if present
return self.check_root()
except GitLockError:
raise
except Exception as exc:
log.error(
'Failed to checkout %s from %s remote \'%s\': %s',
tgt_ref, self.role, self.id, exc,
exc_info=True
)
return None
log.error(
'Failed to checkout %s from %s remote \'%s\': remote ref '
'does not exist', tgt_ref, self.role, self.id
)
return None | python | def checkout(self):
'''
Checkout the configured branch/tag
'''
tgt_ref = self.get_checkout_target()
local_ref = 'refs/heads/' + tgt_ref
remote_ref = 'refs/remotes/origin/' + tgt_ref
tag_ref = 'refs/tags/' + tgt_ref
try:
local_head = self.repo.lookup_reference('HEAD')
except KeyError:
log.warning(
'HEAD not present in %s remote \'%s\'', self.role, self.id
)
return None
try:
head_sha = self.peel(local_head).hex
except AttributeError:
# Shouldn't happen, but just in case a future pygit2 API change
# breaks things, avoid a traceback and log an error.
log.error(
'Unable to get SHA of HEAD for %s remote \'%s\'',
self.role, self.id
)
return None
except KeyError:
head_sha = None
refs = self.repo.listall_references()
def _perform_checkout(checkout_ref, branch=True):
'''
DRY function for checking out either a branch or a tag
'''
try:
with self.gen_lock(lock_type='checkout'):
# Checkout the local branch corresponding to the
# remote ref.
self.repo.checkout(checkout_ref)
if branch:
self.repo.reset(oid, pygit2.GIT_RESET_HARD)
return True
except GitLockError as exc:
if exc.errno == errno.EEXIST:
# Re-raise with a different strerror containing a
# more meaningful error message for the calling
# function.
raise GitLockError(
exc.errno,
'Checkout lock exists for {0} remote \'{1}\''
.format(self.role, self.id)
)
else:
log.error(
'Error %d encountered obtaining checkout lock '
'for %s remote \'%s\'',
exc.errno,
self.role,
self.id
)
return False
try:
if remote_ref in refs:
# Get commit id for the remote ref
oid = self.peel(self.repo.lookup_reference(remote_ref)).id
if local_ref not in refs:
# No local branch for this remote, so create one and point
# it at the commit id of the remote ref
self.repo.create_reference(local_ref, oid)
try:
target_sha = \
self.peel(self.repo.lookup_reference(remote_ref)).hex
except KeyError:
log.error(
'pygit2 was unable to get SHA for %s in %s remote '
'\'%s\'', local_ref, self.role, self.id,
exc_info=True
)
return None
# Only perform a checkout if HEAD and target are not pointing
# at the same SHA1.
if head_sha != target_sha:
# Check existence of the ref in refs/heads/ which
# corresponds to the local HEAD. Checking out local_ref
# below when no local ref for HEAD is missing will raise an
# exception in pygit2 >= 0.21. If this ref is not present,
# create it. The "head_ref != local_ref" check ensures we
# don't try to add this ref if it is not necessary, as it
# would have been added above already. head_ref would be
# the same as local_ref if the branch name was changed but
# the cachedir was not (for example if a "name" parameter
# was used in a git_pillar remote, or if we are using
# winrepo which takes the basename of the repo as the
# cachedir).
head_ref = local_head.target
# If head_ref is not a string, it will point to a
# pygit2.Oid object and we are in detached HEAD mode.
# Therefore, there is no need to add a local reference. If
# head_ref == local_ref, then the local reference for HEAD
# in refs/heads/ already exists and again, no need to add.
if isinstance(head_ref, six.string_types) \
and head_ref not in refs and head_ref != local_ref:
branch_name = head_ref.partition('refs/heads/')[-1]
if not branch_name:
# Shouldn't happen, but log an error if it does
log.error(
'pygit2 was unable to resolve branch name from '
'HEAD ref \'%s\' in %s remote \'%s\'',
head_ref, self.role, self.id
)
return None
remote_head = 'refs/remotes/origin/' + branch_name
if remote_head not in refs:
# No remote ref for HEAD exists. This can happen in
# the first-time git_pillar checkout when when the
# remote repo does not have a master branch. Since
# we need a HEAD reference to keep pygit2 from
# throwing an error, and none exists in
# refs/remotes/origin, we'll just point HEAD at the
# remote_ref.
remote_head = remote_ref
self.repo.create_reference(
head_ref,
self.repo.lookup_reference(remote_head).target
)
if not _perform_checkout(local_ref, branch=True):
return None
# Return the relative root, if present
return self.check_root()
elif tag_ref in refs:
tag_obj = self.repo.revparse_single(tag_ref)
if not isinstance(tag_obj, pygit2.Commit):
log.error(
'%s does not correspond to pygit2.Commit object',
tag_ref
)
else:
try:
# If no AttributeError raised, this is an annotated tag
tag_sha = tag_obj.target.hex
except AttributeError:
try:
tag_sha = tag_obj.hex
except AttributeError:
# Shouldn't happen, but could if a future pygit2
# API change breaks things.
log.error(
'Unable to resolve %s from %s remote \'%s\' '
'to either an annotated or non-annotated tag',
tag_ref, self.role, self.id,
exc_info=True
)
return None
log.debug('SHA of tag %s: %s', tgt_ref, tag_sha)
if head_sha != tag_sha:
if not _perform_checkout(tag_ref, branch=False):
return None
# Return the relative root, if present
return self.check_root()
except GitLockError:
raise
except Exception as exc:
log.error(
'Failed to checkout %s from %s remote \'%s\': %s',
tgt_ref, self.role, self.id, exc,
exc_info=True
)
return None
log.error(
'Failed to checkout %s from %s remote \'%s\': remote ref '
'does not exist', tgt_ref, self.role, self.id
)
return None | [
"def",
"checkout",
"(",
"self",
")",
":",
"tgt_ref",
"=",
"self",
".",
"get_checkout_target",
"(",
")",
"local_ref",
"=",
"'refs/heads/'",
"+",
"tgt_ref",
"remote_ref",
"=",
"'refs/remotes/origin/'",
"+",
"tgt_ref",
"tag_ref",
"=",
"'refs/tags/'",
"+",
"tgt_ref"... | Checkout the configured branch/tag | [
"Checkout",
"the",
"configured",
"branch",
"/",
"tag"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1435-L1617 | train |
saltstack/salt | salt/utils/gitfs.py | Pygit2.clean_stale_refs | def clean_stale_refs(self, local_refs=None): # pylint: disable=arguments-differ
'''
Clean stale local refs so they don't appear as fileserver environments
'''
try:
if pygit2.GIT_FETCH_PRUNE:
# Don't need to clean anything, pygit2 can do it by itself
return []
except AttributeError:
# However, only in 0.26.2 and newer
pass
if self.credentials is not None:
log.debug(
'The installed version of pygit2 (%s) does not support '
'detecting stale refs for authenticated remotes, saltenvs '
'will not reflect branches/tags removed from remote \'%s\'',
PYGIT2_VERSION, self.id
)
return []
return super(Pygit2, self).clean_stale_refs() | python | def clean_stale_refs(self, local_refs=None): # pylint: disable=arguments-differ
'''
Clean stale local refs so they don't appear as fileserver environments
'''
try:
if pygit2.GIT_FETCH_PRUNE:
# Don't need to clean anything, pygit2 can do it by itself
return []
except AttributeError:
# However, only in 0.26.2 and newer
pass
if self.credentials is not None:
log.debug(
'The installed version of pygit2 (%s) does not support '
'detecting stale refs for authenticated remotes, saltenvs '
'will not reflect branches/tags removed from remote \'%s\'',
PYGIT2_VERSION, self.id
)
return []
return super(Pygit2, self).clean_stale_refs() | [
"def",
"clean_stale_refs",
"(",
"self",
",",
"local_refs",
"=",
"None",
")",
":",
"# pylint: disable=arguments-differ",
"try",
":",
"if",
"pygit2",
".",
"GIT_FETCH_PRUNE",
":",
"# Don't need to clean anything, pygit2 can do it by itself",
"return",
"[",
"]",
"except",
"... | Clean stale local refs so they don't appear as fileserver environments | [
"Clean",
"stale",
"local",
"refs",
"so",
"they",
"don",
"t",
"appear",
"as",
"fileserver",
"environments"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1619-L1638 | train |
saltstack/salt | salt/utils/gitfs.py | Pygit2.init_remote | def init_remote(self):
'''
Initialize/attach to a remote using pygit2. Return a boolean which
will let the calling function know whether or not a new repo was
initialized by this function.
'''
# https://github.com/libgit2/pygit2/issues/339
# https://github.com/libgit2/libgit2/issues/2122
home = os.path.expanduser('~')
pygit2.settings.search_path[pygit2.GIT_CONFIG_LEVEL_GLOBAL] = home
new = False
if not os.listdir(self.cachedir):
# Repo cachedir is empty, initialize a new repo there
self.repo = pygit2.init_repository(self.cachedir)
new = True
else:
# Repo cachedir exists, try to attach
try:
self.repo = pygit2.Repository(self.cachedir)
except KeyError:
log.error(_INVALID_REPO, self.cachedir, self.url, self.role)
return new
self.gitdir = salt.utils.path.join(self.repo.workdir, '.git')
self.enforce_git_config()
return new | python | def init_remote(self):
'''
Initialize/attach to a remote using pygit2. Return a boolean which
will let the calling function know whether or not a new repo was
initialized by this function.
'''
# https://github.com/libgit2/pygit2/issues/339
# https://github.com/libgit2/libgit2/issues/2122
home = os.path.expanduser('~')
pygit2.settings.search_path[pygit2.GIT_CONFIG_LEVEL_GLOBAL] = home
new = False
if not os.listdir(self.cachedir):
# Repo cachedir is empty, initialize a new repo there
self.repo = pygit2.init_repository(self.cachedir)
new = True
else:
# Repo cachedir exists, try to attach
try:
self.repo = pygit2.Repository(self.cachedir)
except KeyError:
log.error(_INVALID_REPO, self.cachedir, self.url, self.role)
return new
self.gitdir = salt.utils.path.join(self.repo.workdir, '.git')
self.enforce_git_config()
return new | [
"def",
"init_remote",
"(",
"self",
")",
":",
"# https://github.com/libgit2/pygit2/issues/339",
"# https://github.com/libgit2/libgit2/issues/2122",
"home",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~'",
")",
"pygit2",
".",
"settings",
".",
"search_path",
"[",
"... | Initialize/attach to a remote using pygit2. Return a boolean which
will let the calling function know whether or not a new repo was
initialized by this function. | [
"Initialize",
"/",
"attach",
"to",
"a",
"remote",
"using",
"pygit2",
".",
"Return",
"a",
"boolean",
"which",
"will",
"let",
"the",
"calling",
"function",
"know",
"whether",
"or",
"not",
"a",
"new",
"repo",
"was",
"initialized",
"by",
"this",
"function",
".... | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1640-L1666 | train |
saltstack/salt | salt/utils/gitfs.py | Pygit2.dir_list | def dir_list(self, tgt_env):
'''
Get a list of directories for the target environment using pygit2
'''
def _traverse(tree, blobs, prefix):
'''
Traverse through a pygit2 Tree object recursively, accumulating all
the empty directories within it in the "blobs" list
'''
for entry in iter(tree):
if entry.oid not in self.repo:
# Entry is a submodule, skip it
continue
blob = self.repo[entry.oid]
if not isinstance(blob, pygit2.Tree):
continue
blobs.append(
salt.utils.path.join(prefix, entry.name, use_posixpath=True)
)
if blob:
_traverse(
blob, blobs, salt.utils.path.join(
prefix, entry.name, use_posixpath=True)
)
ret = set()
tree = self.get_tree(tgt_env)
if not tree:
return ret
if self.root(tgt_env):
try:
oid = tree[self.root(tgt_env)].oid
tree = self.repo[oid]
except KeyError:
return ret
if not isinstance(tree, pygit2.Tree):
return ret
relpath = lambda path: os.path.relpath(path, self.root(tgt_env))
else:
relpath = lambda path: path
blobs = []
if tree:
_traverse(tree, blobs, self.root(tgt_env))
add_mountpoint = lambda path: salt.utils.path.join(
self.mountpoint(tgt_env), path, use_posixpath=True)
for blob in blobs:
ret.add(add_mountpoint(relpath(blob)))
if self.mountpoint(tgt_env):
ret.add(self.mountpoint(tgt_env))
return ret | python | def dir_list(self, tgt_env):
'''
Get a list of directories for the target environment using pygit2
'''
def _traverse(tree, blobs, prefix):
'''
Traverse through a pygit2 Tree object recursively, accumulating all
the empty directories within it in the "blobs" list
'''
for entry in iter(tree):
if entry.oid not in self.repo:
# Entry is a submodule, skip it
continue
blob = self.repo[entry.oid]
if not isinstance(blob, pygit2.Tree):
continue
blobs.append(
salt.utils.path.join(prefix, entry.name, use_posixpath=True)
)
if blob:
_traverse(
blob, blobs, salt.utils.path.join(
prefix, entry.name, use_posixpath=True)
)
ret = set()
tree = self.get_tree(tgt_env)
if not tree:
return ret
if self.root(tgt_env):
try:
oid = tree[self.root(tgt_env)].oid
tree = self.repo[oid]
except KeyError:
return ret
if not isinstance(tree, pygit2.Tree):
return ret
relpath = lambda path: os.path.relpath(path, self.root(tgt_env))
else:
relpath = lambda path: path
blobs = []
if tree:
_traverse(tree, blobs, self.root(tgt_env))
add_mountpoint = lambda path: salt.utils.path.join(
self.mountpoint(tgt_env), path, use_posixpath=True)
for blob in blobs:
ret.add(add_mountpoint(relpath(blob)))
if self.mountpoint(tgt_env):
ret.add(self.mountpoint(tgt_env))
return ret | [
"def",
"dir_list",
"(",
"self",
",",
"tgt_env",
")",
":",
"def",
"_traverse",
"(",
"tree",
",",
"blobs",
",",
"prefix",
")",
":",
"'''\n Traverse through a pygit2 Tree object recursively, accumulating all\n the empty directories within it in the \"blobs\" li... | Get a list of directories for the target environment using pygit2 | [
"Get",
"a",
"list",
"of",
"directories",
"for",
"the",
"target",
"environment",
"using",
"pygit2"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1668-L1717 | train |
saltstack/salt | salt/utils/gitfs.py | Pygit2._fetch | def _fetch(self):
'''
Fetch the repo. If the local copy was updated, return True. If the
local copy was already up-to-date, return False.
'''
origin = self.repo.remotes[0]
refs_pre = self.repo.listall_references()
fetch_kwargs = {}
# pygit2 radically changed fetchiing in 0.23.2
if self.remotecallbacks is not None:
fetch_kwargs['callbacks'] = self.remotecallbacks
else:
if self.credentials is not None:
origin.credentials = self.credentials
try:
fetch_kwargs['prune'] = pygit2.GIT_FETCH_PRUNE
except AttributeError:
# pruning only available in pygit2 >= 0.26.2
pass
try:
fetch_results = origin.fetch(**fetch_kwargs)
except GitError as exc:
exc_str = get_error_message(exc).lower()
if 'unsupported url protocol' in exc_str \
and isinstance(self.credentials, pygit2.Keypair):
log.error(
'Unable to fetch SSH-based %s remote \'%s\'. '
'You may need to add ssh:// to the repo string or '
'libgit2 must be compiled with libssh2 to support '
'SSH authentication.', self.role, self.id,
exc_info=True
)
elif 'authentication required but no callback set' in exc_str:
log.error(
'%s remote \'%s\' requires authentication, but no '
'authentication configured', self.role, self.id,
exc_info=True
)
else:
log.error(
'Error occurred fetching %s remote \'%s\': %s',
self.role, self.id, exc,
exc_info=True
)
return False
try:
# pygit2.Remote.fetch() returns a dict in pygit2 < 0.21.0
received_objects = fetch_results['received_objects']
except (AttributeError, TypeError):
# pygit2.Remote.fetch() returns a class instance in
# pygit2 >= 0.21.0
received_objects = fetch_results.received_objects
if received_objects != 0:
log.debug(
'%s received %s objects for remote \'%s\'',
self.role, received_objects, self.id
)
else:
log.debug('%s remote \'%s\' is up-to-date', self.role, self.id)
refs_post = self.repo.listall_references()
cleaned = self.clean_stale_refs(local_refs=refs_post)
return True \
if (received_objects or refs_pre != refs_post or cleaned) \
else None | python | def _fetch(self):
'''
Fetch the repo. If the local copy was updated, return True. If the
local copy was already up-to-date, return False.
'''
origin = self.repo.remotes[0]
refs_pre = self.repo.listall_references()
fetch_kwargs = {}
# pygit2 radically changed fetchiing in 0.23.2
if self.remotecallbacks is not None:
fetch_kwargs['callbacks'] = self.remotecallbacks
else:
if self.credentials is not None:
origin.credentials = self.credentials
try:
fetch_kwargs['prune'] = pygit2.GIT_FETCH_PRUNE
except AttributeError:
# pruning only available in pygit2 >= 0.26.2
pass
try:
fetch_results = origin.fetch(**fetch_kwargs)
except GitError as exc:
exc_str = get_error_message(exc).lower()
if 'unsupported url protocol' in exc_str \
and isinstance(self.credentials, pygit2.Keypair):
log.error(
'Unable to fetch SSH-based %s remote \'%s\'. '
'You may need to add ssh:// to the repo string or '
'libgit2 must be compiled with libssh2 to support '
'SSH authentication.', self.role, self.id,
exc_info=True
)
elif 'authentication required but no callback set' in exc_str:
log.error(
'%s remote \'%s\' requires authentication, but no '
'authentication configured', self.role, self.id,
exc_info=True
)
else:
log.error(
'Error occurred fetching %s remote \'%s\': %s',
self.role, self.id, exc,
exc_info=True
)
return False
try:
# pygit2.Remote.fetch() returns a dict in pygit2 < 0.21.0
received_objects = fetch_results['received_objects']
except (AttributeError, TypeError):
# pygit2.Remote.fetch() returns a class instance in
# pygit2 >= 0.21.0
received_objects = fetch_results.received_objects
if received_objects != 0:
log.debug(
'%s received %s objects for remote \'%s\'',
self.role, received_objects, self.id
)
else:
log.debug('%s remote \'%s\' is up-to-date', self.role, self.id)
refs_post = self.repo.listall_references()
cleaned = self.clean_stale_refs(local_refs=refs_post)
return True \
if (received_objects or refs_pre != refs_post or cleaned) \
else None | [
"def",
"_fetch",
"(",
"self",
")",
":",
"origin",
"=",
"self",
".",
"repo",
".",
"remotes",
"[",
"0",
"]",
"refs_pre",
"=",
"self",
".",
"repo",
".",
"listall_references",
"(",
")",
"fetch_kwargs",
"=",
"{",
"}",
"# pygit2 radically changed fetchiing in 0.23... | Fetch the repo. If the local copy was updated, return True. If the
local copy was already up-to-date, return False. | [
"Fetch",
"the",
"repo",
".",
"If",
"the",
"local",
"copy",
"was",
"updated",
"return",
"True",
".",
"If",
"the",
"local",
"copy",
"was",
"already",
"up",
"-",
"to",
"-",
"date",
"return",
"False",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1727-L1790 | train |
saltstack/salt | salt/utils/gitfs.py | Pygit2.file_list | def file_list(self, tgt_env):
'''
Get file list for the target environment using pygit2
'''
def _traverse(tree, blobs, prefix):
'''
Traverse through a pygit2 Tree object recursively, accumulating all
the file paths and symlink info in the "blobs" dict
'''
for entry in iter(tree):
if entry.oid not in self.repo:
# Entry is a submodule, skip it
continue
obj = self.repo[entry.oid]
if isinstance(obj, pygit2.Blob):
repo_path = salt.utils.path.join(
prefix, entry.name, use_posixpath=True)
blobs.setdefault('files', []).append(repo_path)
if stat.S_ISLNK(tree[entry.name].filemode):
link_tgt = self.repo[tree[entry.name].oid].data
blobs.setdefault('symlinks', {})[repo_path] = link_tgt
elif isinstance(obj, pygit2.Tree):
_traverse(
obj, blobs, salt.utils.path.join(
prefix, entry.name, use_posixpath=True)
)
files = set()
symlinks = {}
tree = self.get_tree(tgt_env)
if not tree:
# Not found, return empty objects
return files, symlinks
if self.root(tgt_env):
try:
# This might need to be changed to account for a root that
# spans more than one directory
oid = tree[self.root(tgt_env)].oid
tree = self.repo[oid]
except KeyError:
return files, symlinks
if not isinstance(tree, pygit2.Tree):
return files, symlinks
relpath = lambda path: os.path.relpath(path, self.root(tgt_env))
else:
relpath = lambda path: path
blobs = {}
if tree:
_traverse(tree, blobs, self.root(tgt_env))
add_mountpoint = lambda path: salt.utils.path.join(
self.mountpoint(tgt_env), path, use_posixpath=True)
for repo_path in blobs.get('files', []):
files.add(add_mountpoint(relpath(repo_path)))
for repo_path, link_tgt in six.iteritems(blobs.get('symlinks', {})):
symlinks[add_mountpoint(relpath(repo_path))] = link_tgt
return files, symlinks | python | def file_list(self, tgt_env):
'''
Get file list for the target environment using pygit2
'''
def _traverse(tree, blobs, prefix):
'''
Traverse through a pygit2 Tree object recursively, accumulating all
the file paths and symlink info in the "blobs" dict
'''
for entry in iter(tree):
if entry.oid not in self.repo:
# Entry is a submodule, skip it
continue
obj = self.repo[entry.oid]
if isinstance(obj, pygit2.Blob):
repo_path = salt.utils.path.join(
prefix, entry.name, use_posixpath=True)
blobs.setdefault('files', []).append(repo_path)
if stat.S_ISLNK(tree[entry.name].filemode):
link_tgt = self.repo[tree[entry.name].oid].data
blobs.setdefault('symlinks', {})[repo_path] = link_tgt
elif isinstance(obj, pygit2.Tree):
_traverse(
obj, blobs, salt.utils.path.join(
prefix, entry.name, use_posixpath=True)
)
files = set()
symlinks = {}
tree = self.get_tree(tgt_env)
if not tree:
# Not found, return empty objects
return files, symlinks
if self.root(tgt_env):
try:
# This might need to be changed to account for a root that
# spans more than one directory
oid = tree[self.root(tgt_env)].oid
tree = self.repo[oid]
except KeyError:
return files, symlinks
if not isinstance(tree, pygit2.Tree):
return files, symlinks
relpath = lambda path: os.path.relpath(path, self.root(tgt_env))
else:
relpath = lambda path: path
blobs = {}
if tree:
_traverse(tree, blobs, self.root(tgt_env))
add_mountpoint = lambda path: salt.utils.path.join(
self.mountpoint(tgt_env), path, use_posixpath=True)
for repo_path in blobs.get('files', []):
files.add(add_mountpoint(relpath(repo_path)))
for repo_path, link_tgt in six.iteritems(blobs.get('symlinks', {})):
symlinks[add_mountpoint(relpath(repo_path))] = link_tgt
return files, symlinks | [
"def",
"file_list",
"(",
"self",
",",
"tgt_env",
")",
":",
"def",
"_traverse",
"(",
"tree",
",",
"blobs",
",",
"prefix",
")",
":",
"'''\n Traverse through a pygit2 Tree object recursively, accumulating all\n the file paths and symlink info in the \"blobs\" d... | Get file list for the target environment using pygit2 | [
"Get",
"file",
"list",
"for",
"the",
"target",
"environment",
"using",
"pygit2"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1792-L1847 | train |
saltstack/salt | salt/utils/gitfs.py | Pygit2.find_file | def find_file(self, path, tgt_env):
'''
Find the specified file in the specified environment
'''
tree = self.get_tree(tgt_env)
if not tree:
# Branch/tag/SHA not found in repo
return None, None, None
blob = None
mode = None
depth = 0
while True:
depth += 1
if depth > SYMLINK_RECURSE_DEPTH:
blob = None
break
try:
entry = tree[path]
mode = entry.filemode
if stat.S_ISLNK(mode):
# Path is a symlink. The blob data corresponding to this
# path's object ID will be the target of the symlink. Follow
# the symlink and set path to the location indicated
# in the blob data.
link_tgt = self.repo[entry.oid].data
path = salt.utils.path.join(
os.path.dirname(path), link_tgt, use_posixpath=True)
else:
blob = self.repo[entry.oid]
if isinstance(blob, pygit2.Tree):
# Path is a directory, not a file.
blob = None
break
except KeyError:
blob = None
break
if isinstance(blob, pygit2.Blob):
return blob, blob.hex, mode
return None, None, None | python | def find_file(self, path, tgt_env):
'''
Find the specified file in the specified environment
'''
tree = self.get_tree(tgt_env)
if not tree:
# Branch/tag/SHA not found in repo
return None, None, None
blob = None
mode = None
depth = 0
while True:
depth += 1
if depth > SYMLINK_RECURSE_DEPTH:
blob = None
break
try:
entry = tree[path]
mode = entry.filemode
if stat.S_ISLNK(mode):
# Path is a symlink. The blob data corresponding to this
# path's object ID will be the target of the symlink. Follow
# the symlink and set path to the location indicated
# in the blob data.
link_tgt = self.repo[entry.oid].data
path = salt.utils.path.join(
os.path.dirname(path), link_tgt, use_posixpath=True)
else:
blob = self.repo[entry.oid]
if isinstance(blob, pygit2.Tree):
# Path is a directory, not a file.
blob = None
break
except KeyError:
blob = None
break
if isinstance(blob, pygit2.Blob):
return blob, blob.hex, mode
return None, None, None | [
"def",
"find_file",
"(",
"self",
",",
"path",
",",
"tgt_env",
")",
":",
"tree",
"=",
"self",
".",
"get_tree",
"(",
"tgt_env",
")",
"if",
"not",
"tree",
":",
"# Branch/tag/SHA not found in repo",
"return",
"None",
",",
"None",
",",
"None",
"blob",
"=",
"N... | Find the specified file in the specified environment | [
"Find",
"the",
"specified",
"file",
"in",
"the",
"specified",
"environment"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1849-L1887 | train |
saltstack/salt | salt/utils/gitfs.py | Pygit2.get_tree_from_branch | def get_tree_from_branch(self, ref):
'''
Return a pygit2.Tree object matching a head ref fetched into
refs/remotes/origin/
'''
try:
return self.peel(self.repo.lookup_reference(
'refs/remotes/origin/{0}'.format(ref))).tree
except KeyError:
return None | python | def get_tree_from_branch(self, ref):
'''
Return a pygit2.Tree object matching a head ref fetched into
refs/remotes/origin/
'''
try:
return self.peel(self.repo.lookup_reference(
'refs/remotes/origin/{0}'.format(ref))).tree
except KeyError:
return None | [
"def",
"get_tree_from_branch",
"(",
"self",
",",
"ref",
")",
":",
"try",
":",
"return",
"self",
".",
"peel",
"(",
"self",
".",
"repo",
".",
"lookup_reference",
"(",
"'refs/remotes/origin/{0}'",
".",
"format",
"(",
"ref",
")",
")",
")",
".",
"tree",
"exce... | Return a pygit2.Tree object matching a head ref fetched into
refs/remotes/origin/ | [
"Return",
"a",
"pygit2",
".",
"Tree",
"object",
"matching",
"a",
"head",
"ref",
"fetched",
"into",
"refs",
"/",
"remotes",
"/",
"origin",
"/"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1889-L1898 | train |
saltstack/salt | salt/utils/gitfs.py | Pygit2.get_tree_from_sha | def get_tree_from_sha(self, ref):
'''
Return a pygit2.Tree object matching a SHA
'''
try:
return self.repo.revparse_single(ref).tree
except (KeyError, TypeError, ValueError, AttributeError):
return None | python | def get_tree_from_sha(self, ref):
'''
Return a pygit2.Tree object matching a SHA
'''
try:
return self.repo.revparse_single(ref).tree
except (KeyError, TypeError, ValueError, AttributeError):
return None | [
"def",
"get_tree_from_sha",
"(",
"self",
",",
"ref",
")",
":",
"try",
":",
"return",
"self",
".",
"repo",
".",
"revparse_single",
"(",
"ref",
")",
".",
"tree",
"except",
"(",
"KeyError",
",",
"TypeError",
",",
"ValueError",
",",
"AttributeError",
")",
":... | Return a pygit2.Tree object matching a SHA | [
"Return",
"a",
"pygit2",
".",
"Tree",
"object",
"matching",
"a",
"SHA"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1910-L1917 | train |
saltstack/salt | salt/utils/gitfs.py | Pygit2.setup_callbacks | def setup_callbacks(self):
'''
Assign attributes for pygit2 callbacks
'''
if PYGIT2_VERSION >= _LooseVersion('0.23.2'):
self.remotecallbacks = pygit2.RemoteCallbacks(
credentials=self.credentials)
if not self.ssl_verify:
# Override the certificate_check function with a lambda that
# just returns True, thus skipping the cert check.
self.remotecallbacks.certificate_check = \
lambda *args, **kwargs: True
else:
self.remotecallbacks = None
if not self.ssl_verify:
warnings.warn(
'pygit2 does not support disabling the SSL certificate '
'check in versions prior to 0.23.2 (installed: {0}). '
'Fetches for self-signed certificates will fail.'.format(
PYGIT2_VERSION
)
) | python | def setup_callbacks(self):
'''
Assign attributes for pygit2 callbacks
'''
if PYGIT2_VERSION >= _LooseVersion('0.23.2'):
self.remotecallbacks = pygit2.RemoteCallbacks(
credentials=self.credentials)
if not self.ssl_verify:
# Override the certificate_check function with a lambda that
# just returns True, thus skipping the cert check.
self.remotecallbacks.certificate_check = \
lambda *args, **kwargs: True
else:
self.remotecallbacks = None
if not self.ssl_verify:
warnings.warn(
'pygit2 does not support disabling the SSL certificate '
'check in versions prior to 0.23.2 (installed: {0}). '
'Fetches for self-signed certificates will fail.'.format(
PYGIT2_VERSION
)
) | [
"def",
"setup_callbacks",
"(",
"self",
")",
":",
"if",
"PYGIT2_VERSION",
">=",
"_LooseVersion",
"(",
"'0.23.2'",
")",
":",
"self",
".",
"remotecallbacks",
"=",
"pygit2",
".",
"RemoteCallbacks",
"(",
"credentials",
"=",
"self",
".",
"credentials",
")",
"if",
... | Assign attributes for pygit2 callbacks | [
"Assign",
"attributes",
"for",
"pygit2",
"callbacks"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1919-L1940 | train |
saltstack/salt | salt/utils/gitfs.py | Pygit2.verify_auth | def verify_auth(self):
'''
Check the username and password/keypair info for validity. If valid,
set a 'credentials' attribute consisting of the appropriate Pygit2
credentials object. Return False if a required auth param is not
present. Return True if the required auth parameters are present (or
auth is not configured), otherwise failhard if there is a problem with
authenticaion.
'''
self.credentials = None
if os.path.isabs(self.url):
# If the URL is an absolute file path, there is no authentication.
return True
elif not any(getattr(self, x, None) for x in AUTH_PARAMS):
# Auth information not configured for this remote
return True
def _incomplete_auth(missing):
'''
Helper function to log errors about missing auth parameters
'''
log.critical(
'Incomplete authentication information for %s remote '
'\'%s\'. Missing parameters: %s',
self.role, self.id, ', '.join(missing)
)
failhard(self.role)
def _key_does_not_exist(key_type, path):
'''
Helper function to log errors about missing key file
'''
log.critical(
'SSH %s (%s) for %s remote \'%s\' could not be found, path '
'may be incorrect. Note that it may be necessary to clear '
'git_pillar locks to proceed once this is resolved and the '
'master has been started back up. A warning will be logged '
'if this is the case, with instructions.',
key_type, path, self.role, self.id
)
failhard(self.role)
transport, _, address = self.url.partition('://')
if not address:
# Assume scp-like SSH syntax (user@domain.tld:relative/path.git)
transport = 'ssh'
address = self.url
transport = transport.lower()
if transport in ('git', 'file'):
# These transports do not use auth
return True
elif 'ssh' in transport:
required_params = ('pubkey', 'privkey')
user = address.split('@')[0]
if user == address:
# No '@' sign == no user. This is a problem.
log.critical(
'Keypair specified for %s remote \'%s\', but remote URL '
'is missing a username', self.role, self.id
)
failhard(self.role)
self.user = user
if all(bool(getattr(self, x, None)) for x in required_params):
keypair_params = [getattr(self, x, None) for x in
('user', 'pubkey', 'privkey', 'passphrase')]
# Check pubkey and privkey to make sure file exists
for idx, key_type in ((1, 'pubkey'), (2, 'privkey')):
key_path = keypair_params[idx]
if key_path is not None:
try:
if not os.path.isfile(key_path):
_key_does_not_exist(key_type, key_path)
except TypeError:
_key_does_not_exist(key_type, key_path)
self.credentials = pygit2.Keypair(*keypair_params)
return True
else:
missing_auth = [x for x in required_params
if not bool(getattr(self, x, None))]
_incomplete_auth(missing_auth)
elif 'http' in transport:
required_params = ('user', 'password')
password_ok = all(
bool(getattr(self, x, None)) for x in required_params
)
no_password_auth = not any(
bool(getattr(self, x, None)) for x in required_params
)
if no_password_auth:
# No auth params were passed, assuming this is unauthenticated
# http(s).
return True
if password_ok:
if transport == 'http' and not self.insecure_auth:
log.critical(
'Invalid configuration for %s remote \'%s\'. '
'Authentication is disabled by default on http '
'remotes. Either set %s_insecure_auth to True in the '
'master configuration file, set a per-remote config '
'option named \'insecure_auth\' to True, or use https '
'or ssh-based authentication.',
self.role, self.id, self.role
)
failhard(self.role)
self.credentials = pygit2.UserPass(self.user, self.password)
return True
else:
missing_auth = [x for x in required_params
if not bool(getattr(self, x, None))]
_incomplete_auth(missing_auth)
else:
log.critical(
'Invalid configuration for %s remote \'%s\'. Unsupported '
'transport \'%s\'.', self.role, self.id, transport
)
failhard(self.role) | python | def verify_auth(self):
'''
Check the username and password/keypair info for validity. If valid,
set a 'credentials' attribute consisting of the appropriate Pygit2
credentials object. Return False if a required auth param is not
present. Return True if the required auth parameters are present (or
auth is not configured), otherwise failhard if there is a problem with
authenticaion.
'''
self.credentials = None
if os.path.isabs(self.url):
# If the URL is an absolute file path, there is no authentication.
return True
elif not any(getattr(self, x, None) for x in AUTH_PARAMS):
# Auth information not configured for this remote
return True
def _incomplete_auth(missing):
'''
Helper function to log errors about missing auth parameters
'''
log.critical(
'Incomplete authentication information for %s remote '
'\'%s\'. Missing parameters: %s',
self.role, self.id, ', '.join(missing)
)
failhard(self.role)
def _key_does_not_exist(key_type, path):
'''
Helper function to log errors about missing key file
'''
log.critical(
'SSH %s (%s) for %s remote \'%s\' could not be found, path '
'may be incorrect. Note that it may be necessary to clear '
'git_pillar locks to proceed once this is resolved and the '
'master has been started back up. A warning will be logged '
'if this is the case, with instructions.',
key_type, path, self.role, self.id
)
failhard(self.role)
transport, _, address = self.url.partition('://')
if not address:
# Assume scp-like SSH syntax (user@domain.tld:relative/path.git)
transport = 'ssh'
address = self.url
transport = transport.lower()
if transport in ('git', 'file'):
# These transports do not use auth
return True
elif 'ssh' in transport:
required_params = ('pubkey', 'privkey')
user = address.split('@')[0]
if user == address:
# No '@' sign == no user. This is a problem.
log.critical(
'Keypair specified for %s remote \'%s\', but remote URL '
'is missing a username', self.role, self.id
)
failhard(self.role)
self.user = user
if all(bool(getattr(self, x, None)) for x in required_params):
keypair_params = [getattr(self, x, None) for x in
('user', 'pubkey', 'privkey', 'passphrase')]
# Check pubkey and privkey to make sure file exists
for idx, key_type in ((1, 'pubkey'), (2, 'privkey')):
key_path = keypair_params[idx]
if key_path is not None:
try:
if not os.path.isfile(key_path):
_key_does_not_exist(key_type, key_path)
except TypeError:
_key_does_not_exist(key_type, key_path)
self.credentials = pygit2.Keypair(*keypair_params)
return True
else:
missing_auth = [x for x in required_params
if not bool(getattr(self, x, None))]
_incomplete_auth(missing_auth)
elif 'http' in transport:
required_params = ('user', 'password')
password_ok = all(
bool(getattr(self, x, None)) for x in required_params
)
no_password_auth = not any(
bool(getattr(self, x, None)) for x in required_params
)
if no_password_auth:
# No auth params were passed, assuming this is unauthenticated
# http(s).
return True
if password_ok:
if transport == 'http' and not self.insecure_auth:
log.critical(
'Invalid configuration for %s remote \'%s\'. '
'Authentication is disabled by default on http '
'remotes. Either set %s_insecure_auth to True in the '
'master configuration file, set a per-remote config '
'option named \'insecure_auth\' to True, or use https '
'or ssh-based authentication.',
self.role, self.id, self.role
)
failhard(self.role)
self.credentials = pygit2.UserPass(self.user, self.password)
return True
else:
missing_auth = [x for x in required_params
if not bool(getattr(self, x, None))]
_incomplete_auth(missing_auth)
else:
log.critical(
'Invalid configuration for %s remote \'%s\'. Unsupported '
'transport \'%s\'.', self.role, self.id, transport
)
failhard(self.role) | [
"def",
"verify_auth",
"(",
"self",
")",
":",
"self",
".",
"credentials",
"=",
"None",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"self",
".",
"url",
")",
":",
"# If the URL is an absolute file path, there is no authentication.",
"return",
"True",
"elif",
"not",... | Check the username and password/keypair info for validity. If valid,
set a 'credentials' attribute consisting of the appropriate Pygit2
credentials object. Return False if a required auth param is not
present. Return True if the required auth parameters are present (or
auth is not configured), otherwise failhard if there is a problem with
authenticaion. | [
"Check",
"the",
"username",
"and",
"password",
"/",
"keypair",
"info",
"for",
"validity",
".",
"If",
"valid",
"set",
"a",
"credentials",
"attribute",
"consisting",
"of",
"the",
"appropriate",
"Pygit2",
"credentials",
"object",
".",
"Return",
"False",
"if",
"a"... | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L1942-L2063 | train |
saltstack/salt | salt/utils/gitfs.py | Pygit2.write_file | def write_file(self, blob, dest):
'''
Using the blob object, write the file to the destination path
'''
with salt.utils.files.fopen(dest, 'wb+') as fp_:
fp_.write(blob.data) | python | def write_file(self, blob, dest):
'''
Using the blob object, write the file to the destination path
'''
with salt.utils.files.fopen(dest, 'wb+') as fp_:
fp_.write(blob.data) | [
"def",
"write_file",
"(",
"self",
",",
"blob",
",",
"dest",
")",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"dest",
",",
"'wb+'",
")",
"as",
"fp_",
":",
"fp_",
".",
"write",
"(",
"blob",
".",
"data",
")"
] | Using the blob object, write the file to the destination path | [
"Using",
"the",
"blob",
"object",
"write",
"the",
"file",
"to",
"the",
"destination",
"path"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L2065-L2070 | train |
saltstack/salt | salt/utils/gitfs.py | GitBase.init_remotes | def init_remotes(self, remotes, per_remote_overrides=(),
per_remote_only=PER_REMOTE_ONLY,
global_only=GLOBAL_ONLY):
'''
Initialize remotes
'''
# The global versions of the auth params (gitfs_user,
# gitfs_password, etc.) default to empty strings. If any of them
# are defined and the provider is not one that supports auth, then
# error out and do not proceed.
override_params = copy.deepcopy(per_remote_overrides)
global_auth_params = [
'{0}_{1}'.format(self.role, x) for x in AUTH_PARAMS
if self.opts['{0}_{1}'.format(self.role, x)]
]
if self.provider in AUTH_PROVIDERS:
override_params += AUTH_PARAMS
elif global_auth_params:
msg = (
'{0} authentication was configured, but the \'{1}\' '
'{0}_provider does not support authentication. The '
'providers for which authentication is supported in {0} '
'are: {2}.'.format(
self.role, self.provider, ', '.join(AUTH_PROVIDERS)
)
)
if self.role == 'gitfs':
msg += (
' See the GitFS Walkthrough in the Salt documentation '
'for further information.'
)
log.critical(msg)
failhard(self.role)
per_remote_defaults = {}
global_values = set(override_params)
global_values.update(set(global_only))
for param in global_values:
key = '{0}_{1}'.format(self.role, param)
if key not in self.opts:
log.critical(
'Key \'%s\' not present in global configuration. This is '
'a bug, please report it.', key
)
failhard(self.role)
per_remote_defaults[param] = enforce_types(key, self.opts[key])
self.remotes = []
for remote in remotes:
repo_obj = self.git_providers[self.provider](
self.opts,
remote,
per_remote_defaults,
per_remote_only,
override_params,
self.cache_root,
self.role
)
if hasattr(repo_obj, 'repo'):
# Sanity check and assign the credential parameter
repo_obj.verify_auth()
repo_obj.setup_callbacks()
if self.opts['__role'] == 'minion' and repo_obj.new:
# Perform initial fetch on masterless minion
repo_obj.fetch()
# Reverse map to be used when running envs() to detect the
# available envs.
repo_obj.saltenv_revmap = {}
for saltenv, saltenv_conf in six.iteritems(repo_obj.saltenv):
if 'ref' in saltenv_conf:
ref = saltenv_conf['ref']
repo_obj.saltenv_revmap.setdefault(
ref, []).append(saltenv)
if saltenv == 'base':
# Remove redundant 'ref' config for base saltenv
repo_obj.saltenv[saltenv].pop('ref')
if ref != repo_obj.base:
log.warning(
'The \'base\' environment has been '
'defined in the \'saltenv\' param for %s '
'remote %s and will override the '
'branch/tag specified by %s_base (or a '
'per-remote \'base\' parameter).',
self.role, repo_obj.id, self.role
)
# Rewrite 'base' config param
repo_obj.base = ref
# Build list of all envs defined by ref mappings in the
# per-remote 'saltenv' param. We won't add any matching envs
# from the global saltenv map to the revmap.
all_envs = []
for env_names in six.itervalues(repo_obj.saltenv_revmap):
all_envs.extend(env_names)
# Add the global saltenv map to the reverse map, skipping envs
# explicitly mapped in the per-remote 'saltenv' param.
for key, conf in six.iteritems(repo_obj.global_saltenv):
if key not in all_envs and 'ref' in conf:
repo_obj.saltenv_revmap.setdefault(
conf['ref'], []).append(key)
self.remotes.append(repo_obj)
# Don't allow collisions in cachedir naming
cachedir_map = {}
for repo in self.remotes:
cachedir_map.setdefault(repo.cachedir, []).append(repo.id)
collisions = [x for x in cachedir_map if len(cachedir_map[x]) > 1]
if collisions:
for dirname in collisions:
log.critical(
'The following %s remotes have conflicting cachedirs: '
'%s. Resolve this using a per-remote parameter called '
'\'name\'.', self.role, ', '.join(cachedir_map[dirname])
)
failhard(self.role)
if any(x.new for x in self.remotes):
self.write_remote_map() | python | def init_remotes(self, remotes, per_remote_overrides=(),
per_remote_only=PER_REMOTE_ONLY,
global_only=GLOBAL_ONLY):
'''
Initialize remotes
'''
# The global versions of the auth params (gitfs_user,
# gitfs_password, etc.) default to empty strings. If any of them
# are defined and the provider is not one that supports auth, then
# error out and do not proceed.
override_params = copy.deepcopy(per_remote_overrides)
global_auth_params = [
'{0}_{1}'.format(self.role, x) for x in AUTH_PARAMS
if self.opts['{0}_{1}'.format(self.role, x)]
]
if self.provider in AUTH_PROVIDERS:
override_params += AUTH_PARAMS
elif global_auth_params:
msg = (
'{0} authentication was configured, but the \'{1}\' '
'{0}_provider does not support authentication. The '
'providers for which authentication is supported in {0} '
'are: {2}.'.format(
self.role, self.provider, ', '.join(AUTH_PROVIDERS)
)
)
if self.role == 'gitfs':
msg += (
' See the GitFS Walkthrough in the Salt documentation '
'for further information.'
)
log.critical(msg)
failhard(self.role)
per_remote_defaults = {}
global_values = set(override_params)
global_values.update(set(global_only))
for param in global_values:
key = '{0}_{1}'.format(self.role, param)
if key not in self.opts:
log.critical(
'Key \'%s\' not present in global configuration. This is '
'a bug, please report it.', key
)
failhard(self.role)
per_remote_defaults[param] = enforce_types(key, self.opts[key])
self.remotes = []
for remote in remotes:
repo_obj = self.git_providers[self.provider](
self.opts,
remote,
per_remote_defaults,
per_remote_only,
override_params,
self.cache_root,
self.role
)
if hasattr(repo_obj, 'repo'):
# Sanity check and assign the credential parameter
repo_obj.verify_auth()
repo_obj.setup_callbacks()
if self.opts['__role'] == 'minion' and repo_obj.new:
# Perform initial fetch on masterless minion
repo_obj.fetch()
# Reverse map to be used when running envs() to detect the
# available envs.
repo_obj.saltenv_revmap = {}
for saltenv, saltenv_conf in six.iteritems(repo_obj.saltenv):
if 'ref' in saltenv_conf:
ref = saltenv_conf['ref']
repo_obj.saltenv_revmap.setdefault(
ref, []).append(saltenv)
if saltenv == 'base':
# Remove redundant 'ref' config for base saltenv
repo_obj.saltenv[saltenv].pop('ref')
if ref != repo_obj.base:
log.warning(
'The \'base\' environment has been '
'defined in the \'saltenv\' param for %s '
'remote %s and will override the '
'branch/tag specified by %s_base (or a '
'per-remote \'base\' parameter).',
self.role, repo_obj.id, self.role
)
# Rewrite 'base' config param
repo_obj.base = ref
# Build list of all envs defined by ref mappings in the
# per-remote 'saltenv' param. We won't add any matching envs
# from the global saltenv map to the revmap.
all_envs = []
for env_names in six.itervalues(repo_obj.saltenv_revmap):
all_envs.extend(env_names)
# Add the global saltenv map to the reverse map, skipping envs
# explicitly mapped in the per-remote 'saltenv' param.
for key, conf in six.iteritems(repo_obj.global_saltenv):
if key not in all_envs and 'ref' in conf:
repo_obj.saltenv_revmap.setdefault(
conf['ref'], []).append(key)
self.remotes.append(repo_obj)
# Don't allow collisions in cachedir naming
cachedir_map = {}
for repo in self.remotes:
cachedir_map.setdefault(repo.cachedir, []).append(repo.id)
collisions = [x for x in cachedir_map if len(cachedir_map[x]) > 1]
if collisions:
for dirname in collisions:
log.critical(
'The following %s remotes have conflicting cachedirs: '
'%s. Resolve this using a per-remote parameter called '
'\'name\'.', self.role, ', '.join(cachedir_map[dirname])
)
failhard(self.role)
if any(x.new for x in self.remotes):
self.write_remote_map() | [
"def",
"init_remotes",
"(",
"self",
",",
"remotes",
",",
"per_remote_overrides",
"=",
"(",
")",
",",
"per_remote_only",
"=",
"PER_REMOTE_ONLY",
",",
"global_only",
"=",
"GLOBAL_ONLY",
")",
":",
"# The global versions of the auth params (gitfs_user,",
"# gitfs_password, et... | Initialize remotes | [
"Initialize",
"remotes"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L2146-L2269 | train |
saltstack/salt | salt/utils/gitfs.py | GitBase.clear_old_remotes | def clear_old_remotes(self):
'''
Remove cache directories for remotes no longer configured
'''
try:
cachedir_ls = os.listdir(self.cache_root)
except OSError:
cachedir_ls = []
# Remove actively-used remotes from list
for repo in self.remotes:
try:
cachedir_ls.remove(repo.cachedir_basename)
except ValueError:
pass
to_remove = []
for item in cachedir_ls:
if item in ('hash', 'refs'):
continue
path = salt.utils.path.join(self.cache_root, item)
if os.path.isdir(path):
to_remove.append(path)
failed = []
if to_remove:
for rdir in to_remove:
try:
shutil.rmtree(rdir)
except OSError as exc:
log.error(
'Unable to remove old %s remote cachedir %s: %s',
self.role, rdir, exc
)
failed.append(rdir)
else:
log.debug('%s removed old cachedir %s', self.role, rdir)
for fdir in failed:
to_remove.remove(fdir)
ret = bool(to_remove)
if ret:
self.write_remote_map()
return ret | python | def clear_old_remotes(self):
'''
Remove cache directories for remotes no longer configured
'''
try:
cachedir_ls = os.listdir(self.cache_root)
except OSError:
cachedir_ls = []
# Remove actively-used remotes from list
for repo in self.remotes:
try:
cachedir_ls.remove(repo.cachedir_basename)
except ValueError:
pass
to_remove = []
for item in cachedir_ls:
if item in ('hash', 'refs'):
continue
path = salt.utils.path.join(self.cache_root, item)
if os.path.isdir(path):
to_remove.append(path)
failed = []
if to_remove:
for rdir in to_remove:
try:
shutil.rmtree(rdir)
except OSError as exc:
log.error(
'Unable to remove old %s remote cachedir %s: %s',
self.role, rdir, exc
)
failed.append(rdir)
else:
log.debug('%s removed old cachedir %s', self.role, rdir)
for fdir in failed:
to_remove.remove(fdir)
ret = bool(to_remove)
if ret:
self.write_remote_map()
return ret | [
"def",
"clear_old_remotes",
"(",
"self",
")",
":",
"try",
":",
"cachedir_ls",
"=",
"os",
".",
"listdir",
"(",
"self",
".",
"cache_root",
")",
"except",
"OSError",
":",
"cachedir_ls",
"=",
"[",
"]",
"# Remove actively-used remotes from list",
"for",
"repo",
"in... | Remove cache directories for remotes no longer configured | [
"Remove",
"cache",
"directories",
"for",
"remotes",
"no",
"longer",
"configured"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L2271-L2310 | train |
saltstack/salt | salt/utils/gitfs.py | GitBase.clear_cache | def clear_cache(self):
'''
Completely clear cache
'''
errors = []
for rdir in (self.cache_root, self.file_list_cachedir):
if os.path.exists(rdir):
try:
shutil.rmtree(rdir)
except OSError as exc:
errors.append(
'Unable to delete {0}: {1}'.format(rdir, exc)
)
return errors | python | def clear_cache(self):
'''
Completely clear cache
'''
errors = []
for rdir in (self.cache_root, self.file_list_cachedir):
if os.path.exists(rdir):
try:
shutil.rmtree(rdir)
except OSError as exc:
errors.append(
'Unable to delete {0}: {1}'.format(rdir, exc)
)
return errors | [
"def",
"clear_cache",
"(",
"self",
")",
":",
"errors",
"=",
"[",
"]",
"for",
"rdir",
"in",
"(",
"self",
".",
"cache_root",
",",
"self",
".",
"file_list_cachedir",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"rdir",
")",
":",
"try",
":",
... | Completely clear cache | [
"Completely",
"clear",
"cache"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L2312-L2325 | train |
saltstack/salt | salt/utils/gitfs.py | GitBase.clear_lock | def clear_lock(self, remote=None, lock_type='update'):
'''
Clear update.lk for all remotes
'''
cleared = []
errors = []
for repo in self.remotes:
if remote:
# Specific remote URL/pattern was passed, ensure that the URL
# matches or else skip this one
try:
if not fnmatch.fnmatch(repo.url, remote):
continue
except TypeError:
# remote was non-string, try again
if not fnmatch.fnmatch(repo.url, six.text_type(remote)):
continue
success, failed = repo.clear_lock(lock_type=lock_type)
cleared.extend(success)
errors.extend(failed)
return cleared, errors | python | def clear_lock(self, remote=None, lock_type='update'):
'''
Clear update.lk for all remotes
'''
cleared = []
errors = []
for repo in self.remotes:
if remote:
# Specific remote URL/pattern was passed, ensure that the URL
# matches or else skip this one
try:
if not fnmatch.fnmatch(repo.url, remote):
continue
except TypeError:
# remote was non-string, try again
if not fnmatch.fnmatch(repo.url, six.text_type(remote)):
continue
success, failed = repo.clear_lock(lock_type=lock_type)
cleared.extend(success)
errors.extend(failed)
return cleared, errors | [
"def",
"clear_lock",
"(",
"self",
",",
"remote",
"=",
"None",
",",
"lock_type",
"=",
"'update'",
")",
":",
"cleared",
"=",
"[",
"]",
"errors",
"=",
"[",
"]",
"for",
"repo",
"in",
"self",
".",
"remotes",
":",
"if",
"remote",
":",
"# Specific remote URL/... | Clear update.lk for all remotes | [
"Clear",
"update",
".",
"lk",
"for",
"all",
"remotes"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L2327-L2347 | train |
saltstack/salt | salt/utils/gitfs.py | GitBase.fetch_remotes | def fetch_remotes(self, remotes=None):
'''
Fetch all remotes and return a boolean to let the calling function know
whether or not any remotes were updated in the process of fetching
'''
if remotes is None:
remotes = []
elif not isinstance(remotes, list):
log.error(
'Invalid \'remotes\' argument (%s) for fetch_remotes. '
'Must be a list of strings', remotes
)
remotes = []
changed = False
for repo in self.remotes:
name = getattr(repo, 'name', None)
if not remotes or (repo.id, name) in remotes:
try:
if repo.fetch():
# We can't just use the return value from repo.fetch()
# because the data could still have changed if old
# remotes were cleared above. Additionally, we're
# running this in a loop and later remotes without
# changes would override this value and make it
# incorrect.
changed = True
except Exception as exc:
log.error(
'Exception caught while fetching %s remote \'%s\': %s',
self.role, repo.id, exc,
exc_info=True
)
return changed | python | def fetch_remotes(self, remotes=None):
'''
Fetch all remotes and return a boolean to let the calling function know
whether or not any remotes were updated in the process of fetching
'''
if remotes is None:
remotes = []
elif not isinstance(remotes, list):
log.error(
'Invalid \'remotes\' argument (%s) for fetch_remotes. '
'Must be a list of strings', remotes
)
remotes = []
changed = False
for repo in self.remotes:
name = getattr(repo, 'name', None)
if not remotes or (repo.id, name) in remotes:
try:
if repo.fetch():
# We can't just use the return value from repo.fetch()
# because the data could still have changed if old
# remotes were cleared above. Additionally, we're
# running this in a loop and later remotes without
# changes would override this value and make it
# incorrect.
changed = True
except Exception as exc:
log.error(
'Exception caught while fetching %s remote \'%s\': %s',
self.role, repo.id, exc,
exc_info=True
)
return changed | [
"def",
"fetch_remotes",
"(",
"self",
",",
"remotes",
"=",
"None",
")",
":",
"if",
"remotes",
"is",
"None",
":",
"remotes",
"=",
"[",
"]",
"elif",
"not",
"isinstance",
"(",
"remotes",
",",
"list",
")",
":",
"log",
".",
"error",
"(",
"'Invalid \\'remotes... | Fetch all remotes and return a boolean to let the calling function know
whether or not any remotes were updated in the process of fetching | [
"Fetch",
"all",
"remotes",
"and",
"return",
"a",
"boolean",
"to",
"let",
"the",
"calling",
"function",
"know",
"whether",
"or",
"not",
"any",
"remotes",
"were",
"updated",
"in",
"the",
"process",
"of",
"fetching"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L2349-L2382 | train |
saltstack/salt | salt/utils/gitfs.py | GitBase.lock | def lock(self, remote=None):
'''
Place an update.lk
'''
locked = []
errors = []
for repo in self.remotes:
if remote:
# Specific remote URL/pattern was passed, ensure that the URL
# matches or else skip this one
try:
if not fnmatch.fnmatch(repo.url, remote):
continue
except TypeError:
# remote was non-string, try again
if not fnmatch.fnmatch(repo.url, six.text_type(remote)):
continue
success, failed = repo.lock()
locked.extend(success)
errors.extend(failed)
return locked, errors | python | def lock(self, remote=None):
'''
Place an update.lk
'''
locked = []
errors = []
for repo in self.remotes:
if remote:
# Specific remote URL/pattern was passed, ensure that the URL
# matches or else skip this one
try:
if not fnmatch.fnmatch(repo.url, remote):
continue
except TypeError:
# remote was non-string, try again
if not fnmatch.fnmatch(repo.url, six.text_type(remote)):
continue
success, failed = repo.lock()
locked.extend(success)
errors.extend(failed)
return locked, errors | [
"def",
"lock",
"(",
"self",
",",
"remote",
"=",
"None",
")",
":",
"locked",
"=",
"[",
"]",
"errors",
"=",
"[",
"]",
"for",
"repo",
"in",
"self",
".",
"remotes",
":",
"if",
"remote",
":",
"# Specific remote URL/pattern was passed, ensure that the URL",
"# mat... | Place an update.lk | [
"Place",
"an",
"update",
".",
"lk"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L2384-L2404 | train |
saltstack/salt | salt/utils/gitfs.py | GitBase.update | def update(self, remotes=None):
'''
.. versionchanged:: 2018.3.0
The remotes argument was added. This being a list of remote URLs,
it will only update matching remotes. This actually matches on
repo.id
Execute a git fetch on all of the repos and perform maintenance on the
fileserver cache.
'''
# data for the fileserver event
data = {'changed': False,
'backend': 'gitfs'}
data['changed'] = self.clear_old_remotes()
if self.fetch_remotes(remotes=remotes):
data['changed'] = True
# A masterless minion will need a new env cache file even if no changes
# were fetched.
refresh_env_cache = self.opts['__role'] == 'minion'
if data['changed'] is True or not os.path.isfile(self.env_cache):
env_cachedir = os.path.dirname(self.env_cache)
if not os.path.exists(env_cachedir):
os.makedirs(env_cachedir)
refresh_env_cache = True
if refresh_env_cache:
new_envs = self.envs(ignore_cache=True)
serial = salt.payload.Serial(self.opts)
with salt.utils.files.fopen(self.env_cache, 'wb+') as fp_:
fp_.write(serial.dumps(new_envs))
log.trace('Wrote env cache data to %s', self.env_cache)
# if there is a change, fire an event
if self.opts.get('fileserver_events', False):
event = salt.utils.event.get_event(
'master',
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
event.fire_event(
data,
tagify(['gitfs', 'update'], prefix='fileserver')
)
try:
salt.fileserver.reap_fileserver_cache_dir(
self.hash_cachedir,
self.find_file
)
except (OSError, IOError):
# Hash file won't exist if no files have yet been served up
pass | python | def update(self, remotes=None):
'''
.. versionchanged:: 2018.3.0
The remotes argument was added. This being a list of remote URLs,
it will only update matching remotes. This actually matches on
repo.id
Execute a git fetch on all of the repos and perform maintenance on the
fileserver cache.
'''
# data for the fileserver event
data = {'changed': False,
'backend': 'gitfs'}
data['changed'] = self.clear_old_remotes()
if self.fetch_remotes(remotes=remotes):
data['changed'] = True
# A masterless minion will need a new env cache file even if no changes
# were fetched.
refresh_env_cache = self.opts['__role'] == 'minion'
if data['changed'] is True or not os.path.isfile(self.env_cache):
env_cachedir = os.path.dirname(self.env_cache)
if not os.path.exists(env_cachedir):
os.makedirs(env_cachedir)
refresh_env_cache = True
if refresh_env_cache:
new_envs = self.envs(ignore_cache=True)
serial = salt.payload.Serial(self.opts)
with salt.utils.files.fopen(self.env_cache, 'wb+') as fp_:
fp_.write(serial.dumps(new_envs))
log.trace('Wrote env cache data to %s', self.env_cache)
# if there is a change, fire an event
if self.opts.get('fileserver_events', False):
event = salt.utils.event.get_event(
'master',
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
event.fire_event(
data,
tagify(['gitfs', 'update'], prefix='fileserver')
)
try:
salt.fileserver.reap_fileserver_cache_dir(
self.hash_cachedir,
self.find_file
)
except (OSError, IOError):
# Hash file won't exist if no files have yet been served up
pass | [
"def",
"update",
"(",
"self",
",",
"remotes",
"=",
"None",
")",
":",
"# data for the fileserver event",
"data",
"=",
"{",
"'changed'",
":",
"False",
",",
"'backend'",
":",
"'gitfs'",
"}",
"data",
"[",
"'changed'",
"]",
"=",
"self",
".",
"clear_old_remotes",
... | .. versionchanged:: 2018.3.0
The remotes argument was added. This being a list of remote URLs,
it will only update matching remotes. This actually matches on
repo.id
Execute a git fetch on all of the repos and perform maintenance on the
fileserver cache. | [
"..",
"versionchanged",
"::",
"2018",
".",
"3",
".",
"0",
"The",
"remotes",
"argument",
"was",
"added",
".",
"This",
"being",
"a",
"list",
"of",
"remote",
"URLs",
"it",
"will",
"only",
"update",
"matching",
"remotes",
".",
"This",
"actually",
"matches",
... | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L2406-L2460 | train |
saltstack/salt | salt/utils/gitfs.py | GitBase.update_intervals | def update_intervals(self):
'''
Returns a dictionary mapping remote IDs to their intervals, designed to
be used for variable update intervals in salt.master.FileserverUpdate.
A remote's ID is defined here as a tuple of the GitPython/Pygit2
object's "id" and "name" attributes, with None being assumed as the
"name" value if the attribute is not present.
'''
return {(repo.id, getattr(repo, 'name', None)): repo.update_interval
for repo in self.remotes} | python | def update_intervals(self):
'''
Returns a dictionary mapping remote IDs to their intervals, designed to
be used for variable update intervals in salt.master.FileserverUpdate.
A remote's ID is defined here as a tuple of the GitPython/Pygit2
object's "id" and "name" attributes, with None being assumed as the
"name" value if the attribute is not present.
'''
return {(repo.id, getattr(repo, 'name', None)): repo.update_interval
for repo in self.remotes} | [
"def",
"update_intervals",
"(",
"self",
")",
":",
"return",
"{",
"(",
"repo",
".",
"id",
",",
"getattr",
"(",
"repo",
",",
"'name'",
",",
"None",
")",
")",
":",
"repo",
".",
"update_interval",
"for",
"repo",
"in",
"self",
".",
"remotes",
"}"
] | Returns a dictionary mapping remote IDs to their intervals, designed to
be used for variable update intervals in salt.master.FileserverUpdate.
A remote's ID is defined here as a tuple of the GitPython/Pygit2
object's "id" and "name" attributes, with None being assumed as the
"name" value if the attribute is not present. | [
"Returns",
"a",
"dictionary",
"mapping",
"remote",
"IDs",
"to",
"their",
"intervals",
"designed",
"to",
"be",
"used",
"for",
"variable",
"update",
"intervals",
"in",
"salt",
".",
"master",
".",
"FileserverUpdate",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L2462-L2472 | train |
saltstack/salt | salt/utils/gitfs.py | GitBase.verify_provider | def verify_provider(self):
'''
Determine which provider to use
'''
if 'verified_{0}_provider'.format(self.role) in self.opts:
self.provider = self.opts['verified_{0}_provider'.format(self.role)]
else:
desired_provider = self.opts.get('{0}_provider'.format(self.role))
if not desired_provider:
if self.verify_pygit2(quiet=True):
self.provider = 'pygit2'
elif self.verify_gitpython(quiet=True):
self.provider = 'gitpython'
else:
# Ensure non-lowercase providers work
try:
desired_provider = desired_provider.lower()
except AttributeError:
# Should only happen if someone does something silly like
# set the provider to a numeric value.
desired_provider = six.text_type(desired_provider).lower()
if desired_provider not in self.git_providers:
log.critical(
'Invalid %s_provider \'%s\'. Valid choices are: %s',
self.role,
desired_provider,
', '.join(self.git_providers)
)
failhard(self.role)
elif desired_provider == 'pygit2' and self.verify_pygit2():
self.provider = 'pygit2'
elif desired_provider == 'gitpython' and self.verify_gitpython():
self.provider = 'gitpython'
if not hasattr(self, 'provider'):
log.critical(
'No suitable %s provider module is installed.', self.role
)
failhard(self.role) | python | def verify_provider(self):
'''
Determine which provider to use
'''
if 'verified_{0}_provider'.format(self.role) in self.opts:
self.provider = self.opts['verified_{0}_provider'.format(self.role)]
else:
desired_provider = self.opts.get('{0}_provider'.format(self.role))
if not desired_provider:
if self.verify_pygit2(quiet=True):
self.provider = 'pygit2'
elif self.verify_gitpython(quiet=True):
self.provider = 'gitpython'
else:
# Ensure non-lowercase providers work
try:
desired_provider = desired_provider.lower()
except AttributeError:
# Should only happen if someone does something silly like
# set the provider to a numeric value.
desired_provider = six.text_type(desired_provider).lower()
if desired_provider not in self.git_providers:
log.critical(
'Invalid %s_provider \'%s\'. Valid choices are: %s',
self.role,
desired_provider,
', '.join(self.git_providers)
)
failhard(self.role)
elif desired_provider == 'pygit2' and self.verify_pygit2():
self.provider = 'pygit2'
elif desired_provider == 'gitpython' and self.verify_gitpython():
self.provider = 'gitpython'
if not hasattr(self, 'provider'):
log.critical(
'No suitable %s provider module is installed.', self.role
)
failhard(self.role) | [
"def",
"verify_provider",
"(",
"self",
")",
":",
"if",
"'verified_{0}_provider'",
".",
"format",
"(",
"self",
".",
"role",
")",
"in",
"self",
".",
"opts",
":",
"self",
".",
"provider",
"=",
"self",
".",
"opts",
"[",
"'verified_{0}_provider'",
".",
"format"... | Determine which provider to use | [
"Determine",
"which",
"provider",
"to",
"use"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L2474-L2511 | train |
saltstack/salt | salt/utils/gitfs.py | GitBase.verify_gitpython | def verify_gitpython(self, quiet=False):
'''
Check if GitPython is available and at a compatible version (>= 0.3.0)
'''
def _recommend():
if PYGIT2_VERSION and 'pygit2' in self.git_providers:
log.error(_RECOMMEND_PYGIT2, self.role, self.role)
if not GITPYTHON_VERSION:
if not quiet:
log.error(
'%s is configured but could not be loaded, is GitPython '
'installed?', self.role
)
_recommend()
return False
elif 'gitpython' not in self.git_providers:
return False
errors = []
if GITPYTHON_VERSION < GITPYTHON_MINVER:
errors.append(
'{0} is configured, but the GitPython version is earlier than '
'{1}. Version {2} detected.'.format(
self.role,
GITPYTHON_MINVER,
GITPYTHON_VERSION
)
)
if not salt.utils.path.which('git'):
errors.append(
'The git command line utility is required when using the '
'\'gitpython\' {0}_provider.'.format(self.role)
)
if errors:
for error in errors:
log.error(error)
if not quiet:
_recommend()
return False
self.opts['verified_{0}_provider'.format(self.role)] = 'gitpython'
log.debug('gitpython %s_provider enabled', self.role)
return True | python | def verify_gitpython(self, quiet=False):
'''
Check if GitPython is available and at a compatible version (>= 0.3.0)
'''
def _recommend():
if PYGIT2_VERSION and 'pygit2' in self.git_providers:
log.error(_RECOMMEND_PYGIT2, self.role, self.role)
if not GITPYTHON_VERSION:
if not quiet:
log.error(
'%s is configured but could not be loaded, is GitPython '
'installed?', self.role
)
_recommend()
return False
elif 'gitpython' not in self.git_providers:
return False
errors = []
if GITPYTHON_VERSION < GITPYTHON_MINVER:
errors.append(
'{0} is configured, but the GitPython version is earlier than '
'{1}. Version {2} detected.'.format(
self.role,
GITPYTHON_MINVER,
GITPYTHON_VERSION
)
)
if not salt.utils.path.which('git'):
errors.append(
'The git command line utility is required when using the '
'\'gitpython\' {0}_provider.'.format(self.role)
)
if errors:
for error in errors:
log.error(error)
if not quiet:
_recommend()
return False
self.opts['verified_{0}_provider'.format(self.role)] = 'gitpython'
log.debug('gitpython %s_provider enabled', self.role)
return True | [
"def",
"verify_gitpython",
"(",
"self",
",",
"quiet",
"=",
"False",
")",
":",
"def",
"_recommend",
"(",
")",
":",
"if",
"PYGIT2_VERSION",
"and",
"'pygit2'",
"in",
"self",
".",
"git_providers",
":",
"log",
".",
"error",
"(",
"_RECOMMEND_PYGIT2",
",",
"self"... | Check if GitPython is available and at a compatible version (>= 0.3.0) | [
"Check",
"if",
"GitPython",
"is",
"available",
"and",
"at",
"a",
"compatible",
"version",
"(",
">",
"=",
"0",
".",
"3",
".",
"0",
")"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L2513-L2557 | train |
saltstack/salt | salt/utils/gitfs.py | GitBase.verify_pygit2 | def verify_pygit2(self, quiet=False):
'''
Check if pygit2/libgit2 are available and at a compatible version.
Pygit2 must be at least 0.20.3 and libgit2 must be at least 0.20.0.
'''
def _recommend():
if GITPYTHON_VERSION and 'gitpython' in self.git_providers:
log.error(_RECOMMEND_GITPYTHON, self.role, self.role)
if not PYGIT2_VERSION:
if not quiet:
log.error(
'%s is configured but could not be loaded, are pygit2 '
'and libgit2 installed?', self.role
)
_recommend()
return False
elif 'pygit2' not in self.git_providers:
return False
errors = []
if PYGIT2_VERSION < PYGIT2_MINVER:
errors.append(
'{0} is configured, but the pygit2 version is earlier than '
'{1}. Version {2} detected.'.format(
self.role,
PYGIT2_MINVER,
PYGIT2_VERSION
)
)
if LIBGIT2_VERSION < LIBGIT2_MINVER:
errors.append(
'{0} is configured, but the libgit2 version is earlier than '
'{1}. Version {2} detected.'.format(
self.role,
LIBGIT2_MINVER,
LIBGIT2_VERSION
)
)
if not getattr(pygit2, 'GIT_FETCH_PRUNE', False) \
and not salt.utils.path.which('git'):
errors.append(
'The git command line utility is required when using the '
'\'pygit2\' {0}_provider.'.format(self.role)
)
if errors:
for error in errors:
log.error(error)
if not quiet:
_recommend()
return False
self.opts['verified_{0}_provider'.format(self.role)] = 'pygit2'
log.debug('pygit2 %s_provider enabled', self.role)
return True | python | def verify_pygit2(self, quiet=False):
'''
Check if pygit2/libgit2 are available and at a compatible version.
Pygit2 must be at least 0.20.3 and libgit2 must be at least 0.20.0.
'''
def _recommend():
if GITPYTHON_VERSION and 'gitpython' in self.git_providers:
log.error(_RECOMMEND_GITPYTHON, self.role, self.role)
if not PYGIT2_VERSION:
if not quiet:
log.error(
'%s is configured but could not be loaded, are pygit2 '
'and libgit2 installed?', self.role
)
_recommend()
return False
elif 'pygit2' not in self.git_providers:
return False
errors = []
if PYGIT2_VERSION < PYGIT2_MINVER:
errors.append(
'{0} is configured, but the pygit2 version is earlier than '
'{1}. Version {2} detected.'.format(
self.role,
PYGIT2_MINVER,
PYGIT2_VERSION
)
)
if LIBGIT2_VERSION < LIBGIT2_MINVER:
errors.append(
'{0} is configured, but the libgit2 version is earlier than '
'{1}. Version {2} detected.'.format(
self.role,
LIBGIT2_MINVER,
LIBGIT2_VERSION
)
)
if not getattr(pygit2, 'GIT_FETCH_PRUNE', False) \
and not salt.utils.path.which('git'):
errors.append(
'The git command line utility is required when using the '
'\'pygit2\' {0}_provider.'.format(self.role)
)
if errors:
for error in errors:
log.error(error)
if not quiet:
_recommend()
return False
self.opts['verified_{0}_provider'.format(self.role)] = 'pygit2'
log.debug('pygit2 %s_provider enabled', self.role)
return True | [
"def",
"verify_pygit2",
"(",
"self",
",",
"quiet",
"=",
"False",
")",
":",
"def",
"_recommend",
"(",
")",
":",
"if",
"GITPYTHON_VERSION",
"and",
"'gitpython'",
"in",
"self",
".",
"git_providers",
":",
"log",
".",
"error",
"(",
"_RECOMMEND_GITPYTHON",
",",
... | Check if pygit2/libgit2 are available and at a compatible version.
Pygit2 must be at least 0.20.3 and libgit2 must be at least 0.20.0. | [
"Check",
"if",
"pygit2",
"/",
"libgit2",
"are",
"available",
"and",
"at",
"a",
"compatible",
"version",
".",
"Pygit2",
"must",
"be",
"at",
"least",
"0",
".",
"20",
".",
"3",
"and",
"libgit2",
"must",
"be",
"at",
"least",
"0",
".",
"20",
".",
"0",
"... | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L2559-L2614 | train |
saltstack/salt | salt/utils/gitfs.py | GitBase.write_remote_map | def write_remote_map(self):
'''
Write the remote_map.txt
'''
remote_map = salt.utils.path.join(self.cache_root, 'remote_map.txt')
try:
with salt.utils.files.fopen(remote_map, 'w+') as fp_:
timestamp = \
datetime.now().strftime('%d %b %Y %H:%M:%S.%f')
fp_.write(
'# {0}_remote map as of {1}\n'.format(
self.role,
timestamp
)
)
for repo in self.remotes:
fp_.write(
salt.utils.stringutils.to_str(
'{0} = {1}\n'.format(
repo.cachedir_basename,
repo.id
)
)
)
except OSError:
pass
else:
log.info('Wrote new %s remote map to %s', self.role, remote_map) | python | def write_remote_map(self):
'''
Write the remote_map.txt
'''
remote_map = salt.utils.path.join(self.cache_root, 'remote_map.txt')
try:
with salt.utils.files.fopen(remote_map, 'w+') as fp_:
timestamp = \
datetime.now().strftime('%d %b %Y %H:%M:%S.%f')
fp_.write(
'# {0}_remote map as of {1}\n'.format(
self.role,
timestamp
)
)
for repo in self.remotes:
fp_.write(
salt.utils.stringutils.to_str(
'{0} = {1}\n'.format(
repo.cachedir_basename,
repo.id
)
)
)
except OSError:
pass
else:
log.info('Wrote new %s remote map to %s', self.role, remote_map) | [
"def",
"write_remote_map",
"(",
"self",
")",
":",
"remote_map",
"=",
"salt",
".",
"utils",
".",
"path",
".",
"join",
"(",
"self",
".",
"cache_root",
",",
"'remote_map.txt'",
")",
"try",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"("... | Write the remote_map.txt | [
"Write",
"the",
"remote_map",
".",
"txt"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L2616-L2643 | train |
saltstack/salt | salt/utils/gitfs.py | GitBase.do_checkout | def do_checkout(self, repo):
'''
Common code for git_pillar/winrepo to handle locking and checking out
of a repo.
'''
time_start = time.time()
while time.time() - time_start <= 5:
try:
return repo.checkout()
except GitLockError as exc:
if exc.errno == errno.EEXIST:
time.sleep(0.1)
continue
else:
log.error(
'Error %d encountered while obtaining checkout '
'lock for %s remote \'%s\': %s',
exc.errno,
repo.role,
repo.id,
exc,
exc_info=True
)
break
else:
log.error(
'Timed out waiting for checkout lock to be released for '
'%s remote \'%s\'. If this error persists, run \'salt-run '
'cache.clear_git_lock %s type=checkout\' to clear it.',
self.role, repo.id, self.role
)
return None | python | def do_checkout(self, repo):
'''
Common code for git_pillar/winrepo to handle locking and checking out
of a repo.
'''
time_start = time.time()
while time.time() - time_start <= 5:
try:
return repo.checkout()
except GitLockError as exc:
if exc.errno == errno.EEXIST:
time.sleep(0.1)
continue
else:
log.error(
'Error %d encountered while obtaining checkout '
'lock for %s remote \'%s\': %s',
exc.errno,
repo.role,
repo.id,
exc,
exc_info=True
)
break
else:
log.error(
'Timed out waiting for checkout lock to be released for '
'%s remote \'%s\'. If this error persists, run \'salt-run '
'cache.clear_git_lock %s type=checkout\' to clear it.',
self.role, repo.id, self.role
)
return None | [
"def",
"do_checkout",
"(",
"self",
",",
"repo",
")",
":",
"time_start",
"=",
"time",
".",
"time",
"(",
")",
"while",
"time",
".",
"time",
"(",
")",
"-",
"time_start",
"<=",
"5",
":",
"try",
":",
"return",
"repo",
".",
"checkout",
"(",
")",
"except"... | Common code for git_pillar/winrepo to handle locking and checking out
of a repo. | [
"Common",
"code",
"for",
"git_pillar",
"/",
"winrepo",
"to",
"handle",
"locking",
"and",
"checking",
"out",
"of",
"a",
"repo",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L2645-L2676 | train |
saltstack/salt | salt/utils/gitfs.py | GitFS.envs | def envs(self, ignore_cache=False):
'''
Return a list of refs that can be used as environments
'''
if not ignore_cache:
cache_match = salt.fileserver.check_env_cache(
self.opts,
self.env_cache
)
if cache_match is not None:
return cache_match
ret = set()
for repo in self.remotes:
repo_envs = repo.envs()
for env_list in six.itervalues(repo.saltenv_revmap):
repo_envs.update(env_list)
ret.update([x for x in repo_envs if repo.env_is_exposed(x)])
return sorted(ret) | python | def envs(self, ignore_cache=False):
'''
Return a list of refs that can be used as environments
'''
if not ignore_cache:
cache_match = salt.fileserver.check_env_cache(
self.opts,
self.env_cache
)
if cache_match is not None:
return cache_match
ret = set()
for repo in self.remotes:
repo_envs = repo.envs()
for env_list in six.itervalues(repo.saltenv_revmap):
repo_envs.update(env_list)
ret.update([x for x in repo_envs if repo.env_is_exposed(x)])
return sorted(ret) | [
"def",
"envs",
"(",
"self",
",",
"ignore_cache",
"=",
"False",
")",
":",
"if",
"not",
"ignore_cache",
":",
"cache_match",
"=",
"salt",
".",
"fileserver",
".",
"check_env_cache",
"(",
"self",
".",
"opts",
",",
"self",
".",
"env_cache",
")",
"if",
"cache_m... | Return a list of refs that can be used as environments | [
"Return",
"a",
"list",
"of",
"refs",
"that",
"can",
"be",
"used",
"as",
"environments"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L2735-L2752 | train |
saltstack/salt | salt/utils/gitfs.py | GitFS.find_file | def find_file(self, path, tgt_env='base', **kwargs): # pylint: disable=W0613
'''
Find the first file to match the path and ref, read the file out of git
and send the path to the newly cached file
'''
fnd = {'path': '',
'rel': ''}
if os.path.isabs(path) or \
(not salt.utils.stringutils.is_hex(tgt_env) and tgt_env not in self.envs()):
return fnd
dest = salt.utils.path.join(self.cache_root, 'refs', tgt_env, path)
hashes_glob = salt.utils.path.join(self.hash_cachedir,
tgt_env,
'{0}.hash.*'.format(path))
blobshadest = salt.utils.path.join(self.hash_cachedir,
tgt_env,
'{0}.hash.blob_sha1'.format(path))
lk_fn = salt.utils.path.join(self.hash_cachedir,
tgt_env,
'{0}.lk'.format(path))
destdir = os.path.dirname(dest)
hashdir = os.path.dirname(blobshadest)
if not os.path.isdir(destdir):
try:
os.makedirs(destdir)
except OSError:
# Path exists and is a file, remove it and retry
os.remove(destdir)
os.makedirs(destdir)
if not os.path.isdir(hashdir):
try:
os.makedirs(hashdir)
except OSError:
# Path exists and is a file, remove it and retry
os.remove(hashdir)
os.makedirs(hashdir)
for repo in self.remotes:
if repo.mountpoint(tgt_env) \
and not path.startswith(repo.mountpoint(tgt_env) + os.sep):
continue
repo_path = path[len(repo.mountpoint(tgt_env)):].lstrip(os.sep)
if repo.root(tgt_env):
repo_path = salt.utils.path.join(repo.root(tgt_env), repo_path)
blob, blob_hexsha, blob_mode = repo.find_file(repo_path, tgt_env)
if blob is None:
continue
def _add_file_stat(fnd, mode):
'''
Add a the mode to the return dict. In other fileserver backends
we stat the file to get its mode, and add the stat result
(passed through list() for better serialization) to the 'stat'
key in the return dict. However, since we aren't using the
stat result for anything but the mode at this time, we can
avoid unnecessary work by just manually creating the list and
not running an os.stat() on all files in the repo.
'''
if mode is not None:
fnd['stat'] = [mode]
return fnd
salt.fileserver.wait_lock(lk_fn, dest)
try:
with salt.utils.files.fopen(blobshadest, 'r') as fp_:
sha = salt.utils.stringutils.to_unicode(fp_.read())
if sha == blob_hexsha:
fnd['rel'] = path
fnd['path'] = dest
return _add_file_stat(fnd, blob_mode)
except IOError as exc:
if exc.errno != errno.ENOENT:
raise exc
with salt.utils.files.fopen(lk_fn, 'w'):
pass
for filename in glob.glob(hashes_glob):
try:
os.remove(filename)
except Exception:
pass
# Write contents of file to their destination in the FS cache
repo.write_file(blob, dest)
with salt.utils.files.fopen(blobshadest, 'w+') as fp_:
fp_.write(blob_hexsha)
try:
os.remove(lk_fn)
except OSError:
pass
fnd['rel'] = path
fnd['path'] = dest
return _add_file_stat(fnd, blob_mode)
# No matching file was found in tgt_env. Return a dict with empty paths
# so the calling function knows the file could not be found.
return fnd | python | def find_file(self, path, tgt_env='base', **kwargs): # pylint: disable=W0613
'''
Find the first file to match the path and ref, read the file out of git
and send the path to the newly cached file
'''
fnd = {'path': '',
'rel': ''}
if os.path.isabs(path) or \
(not salt.utils.stringutils.is_hex(tgt_env) and tgt_env not in self.envs()):
return fnd
dest = salt.utils.path.join(self.cache_root, 'refs', tgt_env, path)
hashes_glob = salt.utils.path.join(self.hash_cachedir,
tgt_env,
'{0}.hash.*'.format(path))
blobshadest = salt.utils.path.join(self.hash_cachedir,
tgt_env,
'{0}.hash.blob_sha1'.format(path))
lk_fn = salt.utils.path.join(self.hash_cachedir,
tgt_env,
'{0}.lk'.format(path))
destdir = os.path.dirname(dest)
hashdir = os.path.dirname(blobshadest)
if not os.path.isdir(destdir):
try:
os.makedirs(destdir)
except OSError:
# Path exists and is a file, remove it and retry
os.remove(destdir)
os.makedirs(destdir)
if not os.path.isdir(hashdir):
try:
os.makedirs(hashdir)
except OSError:
# Path exists and is a file, remove it and retry
os.remove(hashdir)
os.makedirs(hashdir)
for repo in self.remotes:
if repo.mountpoint(tgt_env) \
and not path.startswith(repo.mountpoint(tgt_env) + os.sep):
continue
repo_path = path[len(repo.mountpoint(tgt_env)):].lstrip(os.sep)
if repo.root(tgt_env):
repo_path = salt.utils.path.join(repo.root(tgt_env), repo_path)
blob, blob_hexsha, blob_mode = repo.find_file(repo_path, tgt_env)
if blob is None:
continue
def _add_file_stat(fnd, mode):
'''
Add a the mode to the return dict. In other fileserver backends
we stat the file to get its mode, and add the stat result
(passed through list() for better serialization) to the 'stat'
key in the return dict. However, since we aren't using the
stat result for anything but the mode at this time, we can
avoid unnecessary work by just manually creating the list and
not running an os.stat() on all files in the repo.
'''
if mode is not None:
fnd['stat'] = [mode]
return fnd
salt.fileserver.wait_lock(lk_fn, dest)
try:
with salt.utils.files.fopen(blobshadest, 'r') as fp_:
sha = salt.utils.stringutils.to_unicode(fp_.read())
if sha == blob_hexsha:
fnd['rel'] = path
fnd['path'] = dest
return _add_file_stat(fnd, blob_mode)
except IOError as exc:
if exc.errno != errno.ENOENT:
raise exc
with salt.utils.files.fopen(lk_fn, 'w'):
pass
for filename in glob.glob(hashes_glob):
try:
os.remove(filename)
except Exception:
pass
# Write contents of file to their destination in the FS cache
repo.write_file(blob, dest)
with salt.utils.files.fopen(blobshadest, 'w+') as fp_:
fp_.write(blob_hexsha)
try:
os.remove(lk_fn)
except OSError:
pass
fnd['rel'] = path
fnd['path'] = dest
return _add_file_stat(fnd, blob_mode)
# No matching file was found in tgt_env. Return a dict with empty paths
# so the calling function knows the file could not be found.
return fnd | [
"def",
"find_file",
"(",
"self",
",",
"path",
",",
"tgt_env",
"=",
"'base'",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=W0613",
"fnd",
"=",
"{",
"'path'",
":",
"''",
",",
"'rel'",
":",
"''",
"}",
"if",
"os",
".",
"path",
".",
"isabs",
"(... | Find the first file to match the path and ref, read the file out of git
and send the path to the newly cached file | [
"Find",
"the",
"first",
"file",
"to",
"match",
"the",
"path",
"and",
"ref",
"read",
"the",
"file",
"out",
"of",
"git",
"and",
"send",
"the",
"path",
"to",
"the",
"newly",
"cached",
"file"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L2754-L2852 | train |
saltstack/salt | salt/utils/gitfs.py | GitFS.file_hash | def file_hash(self, load, fnd):
'''
Return a file hash, the hash type is set in the master config file
'''
if 'env' in load:
# "env" is not supported; Use "saltenv".
load.pop('env')
if not all(x in load for x in ('path', 'saltenv')):
return '', None
ret = {'hash_type': self.opts['hash_type']}
relpath = fnd['rel']
path = fnd['path']
hashdest = salt.utils.path.join(self.hash_cachedir,
load['saltenv'],
'{0}.hash.{1}'.format(relpath,
self.opts['hash_type']))
try:
with salt.utils.files.fopen(hashdest, 'rb') as fp_:
ret['hsum'] = fp_.read()
return ret
except IOError as exc:
if exc.errno != errno.ENOENT:
raise exc
try:
os.makedirs(os.path.dirname(hashdest))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise exc
ret['hsum'] = salt.utils.hashutils.get_hash(path, self.opts['hash_type'])
with salt.utils.files.fopen(hashdest, 'w+') as fp_:
fp_.write(ret['hsum'])
return ret | python | def file_hash(self, load, fnd):
'''
Return a file hash, the hash type is set in the master config file
'''
if 'env' in load:
# "env" is not supported; Use "saltenv".
load.pop('env')
if not all(x in load for x in ('path', 'saltenv')):
return '', None
ret = {'hash_type': self.opts['hash_type']}
relpath = fnd['rel']
path = fnd['path']
hashdest = salt.utils.path.join(self.hash_cachedir,
load['saltenv'],
'{0}.hash.{1}'.format(relpath,
self.opts['hash_type']))
try:
with salt.utils.files.fopen(hashdest, 'rb') as fp_:
ret['hsum'] = fp_.read()
return ret
except IOError as exc:
if exc.errno != errno.ENOENT:
raise exc
try:
os.makedirs(os.path.dirname(hashdest))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise exc
ret['hsum'] = salt.utils.hashutils.get_hash(path, self.opts['hash_type'])
with salt.utils.files.fopen(hashdest, 'w+') as fp_:
fp_.write(ret['hsum'])
return ret | [
"def",
"file_hash",
"(",
"self",
",",
"load",
",",
"fnd",
")",
":",
"if",
"'env'",
"in",
"load",
":",
"# \"env\" is not supported; Use \"saltenv\".",
"load",
".",
"pop",
"(",
"'env'",
")",
"if",
"not",
"all",
"(",
"x",
"in",
"load",
"for",
"x",
"in",
"... | Return a file hash, the hash type is set in the master config file | [
"Return",
"a",
"file",
"hash",
"the",
"hash",
"type",
"is",
"set",
"in",
"the",
"master",
"config",
"file"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L2887-L2921 | train |
saltstack/salt | salt/utils/gitfs.py | GitFS._file_lists | def _file_lists(self, load, form):
'''
Return a dict containing the file lists for files and dirs
'''
if 'env' in load:
# "env" is not supported; Use "saltenv".
load.pop('env')
if not os.path.isdir(self.file_list_cachedir):
try:
os.makedirs(self.file_list_cachedir)
except os.error:
log.error('Unable to make cachedir %s', self.file_list_cachedir)
return []
list_cache = salt.utils.path.join(
self.file_list_cachedir,
'{0}.p'.format(load['saltenv'].replace(os.path.sep, '_|-'))
)
w_lock = salt.utils.path.join(
self.file_list_cachedir,
'.{0}.w'.format(load['saltenv'].replace(os.path.sep, '_|-'))
)
cache_match, refresh_cache, save_cache = \
salt.fileserver.check_file_list_cache(
self.opts, form, list_cache, w_lock
)
if cache_match is not None:
return cache_match
if refresh_cache:
ret = {'files': set(), 'symlinks': {}, 'dirs': set()}
if salt.utils.stringutils.is_hex(load['saltenv']) \
or load['saltenv'] in self.envs():
for repo in self.remotes:
repo_files, repo_symlinks = repo.file_list(load['saltenv'])
ret['files'].update(repo_files)
ret['symlinks'].update(repo_symlinks)
ret['dirs'].update(repo.dir_list(load['saltenv']))
ret['files'] = sorted(ret['files'])
ret['dirs'] = sorted(ret['dirs'])
if save_cache:
salt.fileserver.write_file_list_cache(
self.opts, ret, list_cache, w_lock
)
# NOTE: symlinks are organized in a dict instead of a list, however
# the 'symlinks' key will be defined above so it will never get to
# the default value in the call to ret.get() below.
return ret.get(form, [])
# Shouldn't get here, but if we do, this prevents a TypeError
return {} if form == 'symlinks' else [] | python | def _file_lists(self, load, form):
'''
Return a dict containing the file lists for files and dirs
'''
if 'env' in load:
# "env" is not supported; Use "saltenv".
load.pop('env')
if not os.path.isdir(self.file_list_cachedir):
try:
os.makedirs(self.file_list_cachedir)
except os.error:
log.error('Unable to make cachedir %s', self.file_list_cachedir)
return []
list_cache = salt.utils.path.join(
self.file_list_cachedir,
'{0}.p'.format(load['saltenv'].replace(os.path.sep, '_|-'))
)
w_lock = salt.utils.path.join(
self.file_list_cachedir,
'.{0}.w'.format(load['saltenv'].replace(os.path.sep, '_|-'))
)
cache_match, refresh_cache, save_cache = \
salt.fileserver.check_file_list_cache(
self.opts, form, list_cache, w_lock
)
if cache_match is not None:
return cache_match
if refresh_cache:
ret = {'files': set(), 'symlinks': {}, 'dirs': set()}
if salt.utils.stringutils.is_hex(load['saltenv']) \
or load['saltenv'] in self.envs():
for repo in self.remotes:
repo_files, repo_symlinks = repo.file_list(load['saltenv'])
ret['files'].update(repo_files)
ret['symlinks'].update(repo_symlinks)
ret['dirs'].update(repo.dir_list(load['saltenv']))
ret['files'] = sorted(ret['files'])
ret['dirs'] = sorted(ret['dirs'])
if save_cache:
salt.fileserver.write_file_list_cache(
self.opts, ret, list_cache, w_lock
)
# NOTE: symlinks are organized in a dict instead of a list, however
# the 'symlinks' key will be defined above so it will never get to
# the default value in the call to ret.get() below.
return ret.get(form, [])
# Shouldn't get here, but if we do, this prevents a TypeError
return {} if form == 'symlinks' else [] | [
"def",
"_file_lists",
"(",
"self",
",",
"load",
",",
"form",
")",
":",
"if",
"'env'",
"in",
"load",
":",
"# \"env\" is not supported; Use \"saltenv\".",
"load",
".",
"pop",
"(",
"'env'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"self",
"."... | Return a dict containing the file lists for files and dirs | [
"Return",
"a",
"dict",
"containing",
"the",
"file",
"lists",
"for",
"files",
"and",
"dirs"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L2923-L2972 | train |
saltstack/salt | salt/utils/gitfs.py | GitFS.symlink_list | def symlink_list(self, load):
'''
Return a dict of all symlinks based on a given path in the repo
'''
if 'env' in load:
# "env" is not supported; Use "saltenv".
load.pop('env')
if not salt.utils.stringutils.is_hex(load['saltenv']) \
and load['saltenv'] not in self.envs():
return {}
if 'prefix' in load:
prefix = load['prefix'].strip('/')
else:
prefix = ''
symlinks = self._file_lists(load, 'symlinks')
return dict([(key, val)
for key, val in six.iteritems(symlinks)
if key.startswith(prefix)]) | python | def symlink_list(self, load):
'''
Return a dict of all symlinks based on a given path in the repo
'''
if 'env' in load:
# "env" is not supported; Use "saltenv".
load.pop('env')
if not salt.utils.stringutils.is_hex(load['saltenv']) \
and load['saltenv'] not in self.envs():
return {}
if 'prefix' in load:
prefix = load['prefix'].strip('/')
else:
prefix = ''
symlinks = self._file_lists(load, 'symlinks')
return dict([(key, val)
for key, val in six.iteritems(symlinks)
if key.startswith(prefix)]) | [
"def",
"symlink_list",
"(",
"self",
",",
"load",
")",
":",
"if",
"'env'",
"in",
"load",
":",
"# \"env\" is not supported; Use \"saltenv\".",
"load",
".",
"pop",
"(",
"'env'",
")",
"if",
"not",
"salt",
".",
"utils",
".",
"stringutils",
".",
"is_hex",
"(",
"... | Return a dict of all symlinks based on a given path in the repo | [
"Return",
"a",
"dict",
"of",
"all",
"symlinks",
"based",
"on",
"a",
"given",
"path",
"in",
"the",
"repo"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L2988-L3006 | train |
saltstack/salt | salt/utils/gitfs.py | GitPillar.checkout | def checkout(self):
'''
Checkout the targeted branches/tags from the git_pillar remotes
'''
self.pillar_dirs = OrderedDict()
self.pillar_linked_dirs = []
for repo in self.remotes:
cachedir = self.do_checkout(repo)
if cachedir is not None:
# Figure out which environment this remote should be assigned
if repo.branch == '__env__' and hasattr(repo, 'all_saltenvs'):
env = self.opts.get('pillarenv') \
or self.opts.get('saltenv') \
or self.opts.get('git_pillar_base')
elif repo.env:
env = repo.env
else:
if repo.branch == repo.base:
env = 'base'
else:
tgt = repo.get_checkout_target()
env = 'base' if tgt == repo.base else tgt
if repo._mountpoint:
if self.link_mountpoint(repo):
self.pillar_dirs[repo.linkdir] = env
self.pillar_linked_dirs.append(repo.linkdir)
else:
self.pillar_dirs[cachedir] = env | python | def checkout(self):
'''
Checkout the targeted branches/tags from the git_pillar remotes
'''
self.pillar_dirs = OrderedDict()
self.pillar_linked_dirs = []
for repo in self.remotes:
cachedir = self.do_checkout(repo)
if cachedir is not None:
# Figure out which environment this remote should be assigned
if repo.branch == '__env__' and hasattr(repo, 'all_saltenvs'):
env = self.opts.get('pillarenv') \
or self.opts.get('saltenv') \
or self.opts.get('git_pillar_base')
elif repo.env:
env = repo.env
else:
if repo.branch == repo.base:
env = 'base'
else:
tgt = repo.get_checkout_target()
env = 'base' if tgt == repo.base else tgt
if repo._mountpoint:
if self.link_mountpoint(repo):
self.pillar_dirs[repo.linkdir] = env
self.pillar_linked_dirs.append(repo.linkdir)
else:
self.pillar_dirs[cachedir] = env | [
"def",
"checkout",
"(",
"self",
")",
":",
"self",
".",
"pillar_dirs",
"=",
"OrderedDict",
"(",
")",
"self",
".",
"pillar_linked_dirs",
"=",
"[",
"]",
"for",
"repo",
"in",
"self",
".",
"remotes",
":",
"cachedir",
"=",
"self",
".",
"do_checkout",
"(",
"r... | Checkout the targeted branches/tags from the git_pillar remotes | [
"Checkout",
"the",
"targeted",
"branches",
"/",
"tags",
"from",
"the",
"git_pillar",
"remotes"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L3015-L3042 | train |
saltstack/salt | salt/utils/gitfs.py | GitPillar.link_mountpoint | def link_mountpoint(self, repo):
'''
Ensure that the mountpoint is present in the correct location and
points at the correct path
'''
lcachelink = salt.utils.path.join(repo.linkdir, repo._mountpoint)
lcachedest = salt.utils.path.join(repo.cachedir, repo.root()).rstrip(os.sep)
wipe_linkdir = False
create_link = False
try:
with repo.gen_lock(lock_type='mountpoint', timeout=10):
walk_results = list(os.walk(repo.linkdir, followlinks=False))
if walk_results != repo.linkdir_walk:
log.debug(
'Results of walking %s differ from expected results',
repo.linkdir
)
log.debug('Walk results: %s', walk_results)
log.debug('Expected results: %s', repo.linkdir_walk)
wipe_linkdir = True
else:
if not all(not salt.utils.path.islink(x[0])
and os.path.isdir(x[0])
for x in walk_results[:-1]):
log.debug(
'Linkdir parents of %s are not all directories',
lcachelink
)
wipe_linkdir = True
elif not salt.utils.path.islink(lcachelink):
wipe_linkdir = True
else:
try:
ldest = salt.utils.path.readlink(lcachelink)
except Exception:
log.debug(
'Failed to read destination of %s', lcachelink
)
wipe_linkdir = True
else:
if ldest != lcachedest:
log.debug(
'Destination of %s (%s) does not match '
'the expected value (%s)',
lcachelink, ldest, lcachedest
)
# Since we know that the parent dirs of the
# link are set up properly, all we need to do
# is remove the symlink and let it be created
# below.
try:
if salt.utils.platform.is_windows() \
and not ldest.startswith('\\\\') \
and os.path.isdir(ldest):
# On Windows, symlinks to directories
# must be removed as if they were
# themselves directories.
shutil.rmtree(lcachelink)
else:
os.remove(lcachelink)
except Exception as exc:
log.exception(
'Failed to remove existing git_pillar '
'mountpoint link %s: %s',
lcachelink, exc.__str__()
)
wipe_linkdir = False
create_link = True
if wipe_linkdir:
# Wiping implies that we need to create the link
create_link = True
try:
shutil.rmtree(repo.linkdir)
except OSError:
pass
try:
ldirname = os.path.dirname(lcachelink)
os.makedirs(ldirname)
log.debug('Successfully made linkdir parent %s', ldirname)
except OSError as exc:
log.error(
'Failed to os.makedirs() linkdir parent %s: %s',
ldirname, exc.__str__()
)
return False
if create_link:
try:
os.symlink(lcachedest, lcachelink)
log.debug(
'Successfully linked %s to cachedir %s',
lcachelink, lcachedest
)
return True
except OSError as exc:
log.error(
'Failed to create symlink to %s at path %s: %s',
lcachedest, lcachelink, exc.__str__()
)
return False
except GitLockError:
log.error(
'Timed out setting mountpoint lock for %s remote \'%s\'. If '
'this error persists, it may be because an earlier %s '
'checkout was interrupted. The lock can be cleared by running '
'\'salt-run cache.clear_git_lock %s type=mountpoint\', or by '
'manually removing %s.',
self.role, repo.id, self.role, self.role,
repo._get_lock_file(lock_type='mountpoint')
)
return False
return True | python | def link_mountpoint(self, repo):
'''
Ensure that the mountpoint is present in the correct location and
points at the correct path
'''
lcachelink = salt.utils.path.join(repo.linkdir, repo._mountpoint)
lcachedest = salt.utils.path.join(repo.cachedir, repo.root()).rstrip(os.sep)
wipe_linkdir = False
create_link = False
try:
with repo.gen_lock(lock_type='mountpoint', timeout=10):
walk_results = list(os.walk(repo.linkdir, followlinks=False))
if walk_results != repo.linkdir_walk:
log.debug(
'Results of walking %s differ from expected results',
repo.linkdir
)
log.debug('Walk results: %s', walk_results)
log.debug('Expected results: %s', repo.linkdir_walk)
wipe_linkdir = True
else:
if not all(not salt.utils.path.islink(x[0])
and os.path.isdir(x[0])
for x in walk_results[:-1]):
log.debug(
'Linkdir parents of %s are not all directories',
lcachelink
)
wipe_linkdir = True
elif not salt.utils.path.islink(lcachelink):
wipe_linkdir = True
else:
try:
ldest = salt.utils.path.readlink(lcachelink)
except Exception:
log.debug(
'Failed to read destination of %s', lcachelink
)
wipe_linkdir = True
else:
if ldest != lcachedest:
log.debug(
'Destination of %s (%s) does not match '
'the expected value (%s)',
lcachelink, ldest, lcachedest
)
# Since we know that the parent dirs of the
# link are set up properly, all we need to do
# is remove the symlink and let it be created
# below.
try:
if salt.utils.platform.is_windows() \
and not ldest.startswith('\\\\') \
and os.path.isdir(ldest):
# On Windows, symlinks to directories
# must be removed as if they were
# themselves directories.
shutil.rmtree(lcachelink)
else:
os.remove(lcachelink)
except Exception as exc:
log.exception(
'Failed to remove existing git_pillar '
'mountpoint link %s: %s',
lcachelink, exc.__str__()
)
wipe_linkdir = False
create_link = True
if wipe_linkdir:
# Wiping implies that we need to create the link
create_link = True
try:
shutil.rmtree(repo.linkdir)
except OSError:
pass
try:
ldirname = os.path.dirname(lcachelink)
os.makedirs(ldirname)
log.debug('Successfully made linkdir parent %s', ldirname)
except OSError as exc:
log.error(
'Failed to os.makedirs() linkdir parent %s: %s',
ldirname, exc.__str__()
)
return False
if create_link:
try:
os.symlink(lcachedest, lcachelink)
log.debug(
'Successfully linked %s to cachedir %s',
lcachelink, lcachedest
)
return True
except OSError as exc:
log.error(
'Failed to create symlink to %s at path %s: %s',
lcachedest, lcachelink, exc.__str__()
)
return False
except GitLockError:
log.error(
'Timed out setting mountpoint lock for %s remote \'%s\'. If '
'this error persists, it may be because an earlier %s '
'checkout was interrupted. The lock can be cleared by running '
'\'salt-run cache.clear_git_lock %s type=mountpoint\', or by '
'manually removing %s.',
self.role, repo.id, self.role, self.role,
repo._get_lock_file(lock_type='mountpoint')
)
return False
return True | [
"def",
"link_mountpoint",
"(",
"self",
",",
"repo",
")",
":",
"lcachelink",
"=",
"salt",
".",
"utils",
".",
"path",
".",
"join",
"(",
"repo",
".",
"linkdir",
",",
"repo",
".",
"_mountpoint",
")",
"lcachedest",
"=",
"salt",
".",
"utils",
".",
"path",
... | Ensure that the mountpoint is present in the correct location and
points at the correct path | [
"Ensure",
"that",
"the",
"mountpoint",
"is",
"present",
"in",
"the",
"correct",
"location",
"and",
"points",
"at",
"the",
"correct",
"path"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L3044-L3156 | train |
saltstack/salt | salt/utils/gitfs.py | WinRepo.checkout | def checkout(self):
'''
Checkout the targeted branches/tags from the winrepo remotes
'''
self.winrepo_dirs = {}
for repo in self.remotes:
cachedir = self.do_checkout(repo)
if cachedir is not None:
self.winrepo_dirs[repo.id] = cachedir | python | def checkout(self):
'''
Checkout the targeted branches/tags from the winrepo remotes
'''
self.winrepo_dirs = {}
for repo in self.remotes:
cachedir = self.do_checkout(repo)
if cachedir is not None:
self.winrepo_dirs[repo.id] = cachedir | [
"def",
"checkout",
"(",
"self",
")",
":",
"self",
".",
"winrepo_dirs",
"=",
"{",
"}",
"for",
"repo",
"in",
"self",
".",
"remotes",
":",
"cachedir",
"=",
"self",
".",
"do_checkout",
"(",
"repo",
")",
"if",
"cachedir",
"is",
"not",
"None",
":",
"self",... | Checkout the targeted branches/tags from the winrepo remotes | [
"Checkout",
"the",
"targeted",
"branches",
"/",
"tags",
"from",
"the",
"winrepo",
"remotes"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L3168-L3176 | train |
saltstack/salt | salt/fileserver/minionfs.py | _is_exposed | def _is_exposed(minion):
'''
Check if the minion is exposed, based on the whitelist and blacklist
'''
return salt.utils.stringutils.check_whitelist_blacklist(
minion,
whitelist=__opts__['minionfs_whitelist'],
blacklist=__opts__['minionfs_blacklist']
) | python | def _is_exposed(minion):
'''
Check if the minion is exposed, based on the whitelist and blacklist
'''
return salt.utils.stringutils.check_whitelist_blacklist(
minion,
whitelist=__opts__['minionfs_whitelist'],
blacklist=__opts__['minionfs_blacklist']
) | [
"def",
"_is_exposed",
"(",
"minion",
")",
":",
"return",
"salt",
".",
"utils",
".",
"stringutils",
".",
"check_whitelist_blacklist",
"(",
"minion",
",",
"whitelist",
"=",
"__opts__",
"[",
"'minionfs_whitelist'",
"]",
",",
"blacklist",
"=",
"__opts__",
"[",
"'m... | Check if the minion is exposed, based on the whitelist and blacklist | [
"Check",
"if",
"the",
"minion",
"is",
"exposed",
"based",
"on",
"the",
"whitelist",
"and",
"blacklist"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/minionfs.py#L65-L73 | train |
saltstack/salt | salt/fileserver/minionfs.py | find_file | def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
'''
Search the environment for the relative path
'''
fnd = {'path': '', 'rel': ''}
if os.path.isabs(path):
return fnd
if tgt_env not in envs():
return fnd
if os.path.basename(path) == 'top.sls':
log.debug(
'minionfs will NOT serve top.sls '
'for security reasons (path requested: %s)', path
)
return fnd
mountpoint = salt.utils.url.strip_proto(__opts__['minionfs_mountpoint'])
# Remove the mountpoint to get the "true" path
path = path[len(mountpoint):].lstrip(os.path.sep)
try:
minion, pushed_file = path.split(os.sep, 1)
except ValueError:
return fnd
if not _is_exposed(minion):
return fnd
full = os.path.join(
__opts__['cachedir'], 'minions', minion, 'files', pushed_file
)
if os.path.isfile(full) \
and not salt.fileserver.is_file_ignored(__opts__, full):
fnd['path'] = full
fnd['rel'] = path
fnd['stat'] = list(os.stat(full))
return fnd
return fnd | python | def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
'''
Search the environment for the relative path
'''
fnd = {'path': '', 'rel': ''}
if os.path.isabs(path):
return fnd
if tgt_env not in envs():
return fnd
if os.path.basename(path) == 'top.sls':
log.debug(
'minionfs will NOT serve top.sls '
'for security reasons (path requested: %s)', path
)
return fnd
mountpoint = salt.utils.url.strip_proto(__opts__['minionfs_mountpoint'])
# Remove the mountpoint to get the "true" path
path = path[len(mountpoint):].lstrip(os.path.sep)
try:
minion, pushed_file = path.split(os.sep, 1)
except ValueError:
return fnd
if not _is_exposed(minion):
return fnd
full = os.path.join(
__opts__['cachedir'], 'minions', minion, 'files', pushed_file
)
if os.path.isfile(full) \
and not salt.fileserver.is_file_ignored(__opts__, full):
fnd['path'] = full
fnd['rel'] = path
fnd['stat'] = list(os.stat(full))
return fnd
return fnd | [
"def",
"find_file",
"(",
"path",
",",
"tgt_env",
"=",
"'base'",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=W0613",
"fnd",
"=",
"{",
"'path'",
":",
"''",
",",
"'rel'",
":",
"''",
"}",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")... | Search the environment for the relative path | [
"Search",
"the",
"environment",
"for",
"the",
"relative",
"path"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/minionfs.py#L76-L110 | train |
saltstack/salt | salt/fileserver/minionfs.py | update | def update():
'''
When we are asked to update (regular interval) lets reap the cache
'''
try:
salt.fileserver.reap_fileserver_cache_dir(
os.path.join(__opts__['cachedir'], 'minionfs/hash'),
find_file)
except os.error:
# Hash file won't exist if no files have yet been served up
pass | python | def update():
'''
When we are asked to update (regular interval) lets reap the cache
'''
try:
salt.fileserver.reap_fileserver_cache_dir(
os.path.join(__opts__['cachedir'], 'minionfs/hash'),
find_file)
except os.error:
# Hash file won't exist if no files have yet been served up
pass | [
"def",
"update",
"(",
")",
":",
"try",
":",
"salt",
".",
"fileserver",
".",
"reap_fileserver_cache_dir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"__opts__",
"[",
"'cachedir'",
"]",
",",
"'minionfs/hash'",
")",
",",
"find_file",
")",
"except",
"os",
"."... | When we are asked to update (regular interval) lets reap the cache | [
"When",
"we",
"are",
"asked",
"to",
"update",
"(",
"regular",
"interval",
")",
"lets",
"reap",
"the",
"cache"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/minionfs.py#L155-L165 | train |
saltstack/salt | salt/fileserver/minionfs.py | file_hash | def file_hash(load, fnd):
'''
Return a file hash, the hash type is set in the master config file
'''
path = fnd['path']
ret = {}
if 'env' in load:
# "env" is not supported; Use "saltenv".
load.pop('env')
if load['saltenv'] not in envs():
return {}
# if the file doesn't exist, we can't get a hash
if not path or not os.path.isfile(path):
return ret
# set the hash_type as it is determined by config-- so mechanism won't change that
ret['hash_type'] = __opts__['hash_type']
# check if the hash is cached
# cache file's contents should be "hash:mtime"
cache_path = os.path.join(
__opts__['cachedir'],
'minionfs',
'hash',
load['saltenv'],
'{0}.hash.{1}'.format(fnd['rel'], __opts__['hash_type'])
)
# if we have a cache, serve that if the mtime hasn't changed
if os.path.exists(cache_path):
try:
with salt.utils.files.fopen(cache_path, 'rb') as fp_:
try:
hsum, mtime = salt.utils.stringutils.to_unicode(fp_.read()).split(':')
except ValueError:
log.debug(
'Fileserver attempted to read incomplete cache file. '
'Retrying.'
)
file_hash(load, fnd)
return ret
if os.path.getmtime(path) == mtime:
# check if mtime changed
ret['hsum'] = hsum
return ret
# Can't use Python select() because we need Windows support
except os.error:
log.debug(
'Fileserver encountered lock when reading cache file. '
'Retrying.'
)
file_hash(load, fnd)
return ret
# if we don't have a cache entry-- lets make one
ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type'])
cache_dir = os.path.dirname(cache_path)
# make cache directory if it doesn't exist
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# save the cache object "hash:mtime"
cache_object = '{0}:{1}'.format(ret['hsum'], os.path.getmtime(path))
with salt.utils.files.flopen(cache_path, 'w') as fp_:
fp_.write(cache_object)
return ret | python | def file_hash(load, fnd):
'''
Return a file hash, the hash type is set in the master config file
'''
path = fnd['path']
ret = {}
if 'env' in load:
# "env" is not supported; Use "saltenv".
load.pop('env')
if load['saltenv'] not in envs():
return {}
# if the file doesn't exist, we can't get a hash
if not path or not os.path.isfile(path):
return ret
# set the hash_type as it is determined by config-- so mechanism won't change that
ret['hash_type'] = __opts__['hash_type']
# check if the hash is cached
# cache file's contents should be "hash:mtime"
cache_path = os.path.join(
__opts__['cachedir'],
'minionfs',
'hash',
load['saltenv'],
'{0}.hash.{1}'.format(fnd['rel'], __opts__['hash_type'])
)
# if we have a cache, serve that if the mtime hasn't changed
if os.path.exists(cache_path):
try:
with salt.utils.files.fopen(cache_path, 'rb') as fp_:
try:
hsum, mtime = salt.utils.stringutils.to_unicode(fp_.read()).split(':')
except ValueError:
log.debug(
'Fileserver attempted to read incomplete cache file. '
'Retrying.'
)
file_hash(load, fnd)
return ret
if os.path.getmtime(path) == mtime:
# check if mtime changed
ret['hsum'] = hsum
return ret
# Can't use Python select() because we need Windows support
except os.error:
log.debug(
'Fileserver encountered lock when reading cache file. '
'Retrying.'
)
file_hash(load, fnd)
return ret
# if we don't have a cache entry-- lets make one
ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type'])
cache_dir = os.path.dirname(cache_path)
# make cache directory if it doesn't exist
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# save the cache object "hash:mtime"
cache_object = '{0}:{1}'.format(ret['hsum'], os.path.getmtime(path))
with salt.utils.files.flopen(cache_path, 'w') as fp_:
fp_.write(cache_object)
return ret | [
"def",
"file_hash",
"(",
"load",
",",
"fnd",
")",
":",
"path",
"=",
"fnd",
"[",
"'path'",
"]",
"ret",
"=",
"{",
"}",
"if",
"'env'",
"in",
"load",
":",
"# \"env\" is not supported; Use \"saltenv\".",
"load",
".",
"pop",
"(",
"'env'",
")",
"if",
"load",
... | Return a file hash, the hash type is set in the master config file | [
"Return",
"a",
"file",
"hash",
"the",
"hash",
"type",
"is",
"set",
"in",
"the",
"master",
"config",
"file"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/minionfs.py#L168-L234 | train |
saltstack/salt | salt/fileserver/minionfs.py | file_list | def file_list(load):
'''
Return a list of all files on the file server in a specified environment
'''
if 'env' in load:
# "env" is not supported; Use "saltenv".
load.pop('env')
if load['saltenv'] not in envs():
return []
mountpoint = salt.utils.url.strip_proto(__opts__['minionfs_mountpoint'])
prefix = load.get('prefix', '').strip('/')
if mountpoint and prefix.startswith(mountpoint + os.path.sep):
prefix = prefix[len(mountpoint + os.path.sep):]
minions_cache_dir = os.path.join(__opts__['cachedir'], 'minions')
minion_dirs = os.listdir(minions_cache_dir)
# If the prefix is not an empty string, then get the minion id from it. The
# minion ID will be the part before the first slash, so if there is no
# slash, this is an invalid path.
if prefix:
tgt_minion, _, prefix = prefix.partition('/')
if not prefix:
# No minion ID in path
return []
# Reassign minion_dirs so we don't unnecessarily walk every minion's
# pushed files
if tgt_minion not in minion_dirs:
log.warning(
'No files found in minionfs cache for minion ID \'%s\'',
tgt_minion
)
return []
minion_dirs = [tgt_minion]
ret = []
for minion in minion_dirs:
if not _is_exposed(minion):
continue
minion_files_dir = os.path.join(minions_cache_dir, minion, 'files')
if not os.path.isdir(minion_files_dir):
log.debug(
'minionfs: could not find files directory under %s!',
os.path.join(minions_cache_dir, minion)
)
continue
walk_dir = os.path.join(minion_files_dir, prefix)
# Do not follow links for security reasons
for root, _, files in salt.utils.path.os_walk(walk_dir, followlinks=False):
for fname in files:
# Ignore links for security reasons
if os.path.islink(os.path.join(root, fname)):
continue
relpath = os.path.relpath(
os.path.join(root, fname), minion_files_dir
)
if relpath.startswith('../'):
continue
rel_fn = os.path.join(mountpoint, minion, relpath)
if not salt.fileserver.is_file_ignored(__opts__, rel_fn):
ret.append(rel_fn)
return ret | python | def file_list(load):
'''
Return a list of all files on the file server in a specified environment
'''
if 'env' in load:
# "env" is not supported; Use "saltenv".
load.pop('env')
if load['saltenv'] not in envs():
return []
mountpoint = salt.utils.url.strip_proto(__opts__['minionfs_mountpoint'])
prefix = load.get('prefix', '').strip('/')
if mountpoint and prefix.startswith(mountpoint + os.path.sep):
prefix = prefix[len(mountpoint + os.path.sep):]
minions_cache_dir = os.path.join(__opts__['cachedir'], 'minions')
minion_dirs = os.listdir(minions_cache_dir)
# If the prefix is not an empty string, then get the minion id from it. The
# minion ID will be the part before the first slash, so if there is no
# slash, this is an invalid path.
if prefix:
tgt_minion, _, prefix = prefix.partition('/')
if not prefix:
# No minion ID in path
return []
# Reassign minion_dirs so we don't unnecessarily walk every minion's
# pushed files
if tgt_minion not in minion_dirs:
log.warning(
'No files found in minionfs cache for minion ID \'%s\'',
tgt_minion
)
return []
minion_dirs = [tgt_minion]
ret = []
for minion in minion_dirs:
if not _is_exposed(minion):
continue
minion_files_dir = os.path.join(minions_cache_dir, minion, 'files')
if not os.path.isdir(minion_files_dir):
log.debug(
'minionfs: could not find files directory under %s!',
os.path.join(minions_cache_dir, minion)
)
continue
walk_dir = os.path.join(minion_files_dir, prefix)
# Do not follow links for security reasons
for root, _, files in salt.utils.path.os_walk(walk_dir, followlinks=False):
for fname in files:
# Ignore links for security reasons
if os.path.islink(os.path.join(root, fname)):
continue
relpath = os.path.relpath(
os.path.join(root, fname), minion_files_dir
)
if relpath.startswith('../'):
continue
rel_fn = os.path.join(mountpoint, minion, relpath)
if not salt.fileserver.is_file_ignored(__opts__, rel_fn):
ret.append(rel_fn)
return ret | [
"def",
"file_list",
"(",
"load",
")",
":",
"if",
"'env'",
"in",
"load",
":",
"# \"env\" is not supported; Use \"saltenv\".",
"load",
".",
"pop",
"(",
"'env'",
")",
"if",
"load",
"[",
"'saltenv'",
"]",
"not",
"in",
"envs",
"(",
")",
":",
"return",
"[",
"]... | Return a list of all files on the file server in a specified environment | [
"Return",
"a",
"list",
"of",
"all",
"files",
"on",
"the",
"file",
"server",
"in",
"a",
"specified",
"environment"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/minionfs.py#L237-L299 | train |
saltstack/salt | salt/modules/trafficserver.py | _subprocess | def _subprocess(cmd):
'''
Function to standardize the subprocess call
'''
log.debug('Running: "%s"', ' '.join(cmd))
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
ret = salt.utils.stringutils.to_unicode(proc.communicate()[0]).strip()
retcode = proc.wait()
if ret:
return ret
elif retcode != 1:
return True
else:
return False
except OSError as err:
log.error(err)
return False | python | def _subprocess(cmd):
'''
Function to standardize the subprocess call
'''
log.debug('Running: "%s"', ' '.join(cmd))
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
ret = salt.utils.stringutils.to_unicode(proc.communicate()[0]).strip()
retcode = proc.wait()
if ret:
return ret
elif retcode != 1:
return True
else:
return False
except OSError as err:
log.error(err)
return False | [
"def",
"_subprocess",
"(",
"cmd",
")",
":",
"log",
".",
"debug",
"(",
"'Running: \"%s\"'",
",",
"' '",
".",
"join",
"(",
"cmd",
")",
")",
"try",
":",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
... | Function to standardize the subprocess call | [
"Function",
"to",
"standardize",
"the",
"subprocess",
"call"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/trafficserver.py#L53-L72 | train |
saltstack/salt | salt/modules/trafficserver.py | bounce_local | def bounce_local(drain=False):
'''
Bounce Traffic Server on the local node. Bouncing Traffic Server shuts down
and immediately restarts the Traffic Server node.
drain
This option modifies the restart behavior such that traffic_server
is not shut down until the number of active client connections
drops to the number given by the
proxy.config.restart.active_client_threshold configuration
variable.
.. code-block:: bash
salt '*' trafficserver.bounce_local
salt '*' trafficserver.bounce_local drain=True
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('server', 'restart')
else:
cmd = _traffic_line('-b')
if drain:
cmd = cmd + ['--drain']
return _subprocess(cmd) | python | def bounce_local(drain=False):
'''
Bounce Traffic Server on the local node. Bouncing Traffic Server shuts down
and immediately restarts the Traffic Server node.
drain
This option modifies the restart behavior such that traffic_server
is not shut down until the number of active client connections
drops to the number given by the
proxy.config.restart.active_client_threshold configuration
variable.
.. code-block:: bash
salt '*' trafficserver.bounce_local
salt '*' trafficserver.bounce_local drain=True
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('server', 'restart')
else:
cmd = _traffic_line('-b')
if drain:
cmd = cmd + ['--drain']
return _subprocess(cmd) | [
"def",
"bounce_local",
"(",
"drain",
"=",
"False",
")",
":",
"if",
"_TRAFFICCTL",
":",
"cmd",
"=",
"_traffic_ctl",
"(",
"'server'",
",",
"'restart'",
")",
"else",
":",
"cmd",
"=",
"_traffic_line",
"(",
"'-b'",
")",
"if",
"drain",
":",
"cmd",
"=",
"cmd"... | Bounce Traffic Server on the local node. Bouncing Traffic Server shuts down
and immediately restarts the Traffic Server node.
drain
This option modifies the restart behavior such that traffic_server
is not shut down until the number of active client connections
drops to the number given by the
proxy.config.restart.active_client_threshold configuration
variable.
.. code-block:: bash
salt '*' trafficserver.bounce_local
salt '*' trafficserver.bounce_local drain=True | [
"Bounce",
"Traffic",
"Server",
"on",
"the",
"local",
"node",
".",
"Bouncing",
"Traffic",
"Server",
"shuts",
"down",
"and",
"immediately",
"restarts",
"the",
"Traffic",
"Server",
"node",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/trafficserver.py#L93-L118 | train |
saltstack/salt | salt/modules/trafficserver.py | restart_local | def restart_local(drain=False):
'''
Restart the traffic_manager and traffic_server processes on the local node.
drain
This option modifies the restart behavior such that
``traffic_server`` is not shut down until the number of
active client connections drops to the number given by the
``proxy.config.restart.active_client_threshold`` configuration
variable.
.. code-block:: bash
salt '*' trafficserver.restart_local
salt '*' trafficserver.restart_local drain=True
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('server', 'restart', '--manager')
else:
cmd = _traffic_line('-L')
if drain:
cmd = cmd + ['--drain']
return _subprocess(cmd) | python | def restart_local(drain=False):
'''
Restart the traffic_manager and traffic_server processes on the local node.
drain
This option modifies the restart behavior such that
``traffic_server`` is not shut down until the number of
active client connections drops to the number given by the
``proxy.config.restart.active_client_threshold`` configuration
variable.
.. code-block:: bash
salt '*' trafficserver.restart_local
salt '*' trafficserver.restart_local drain=True
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('server', 'restart', '--manager')
else:
cmd = _traffic_line('-L')
if drain:
cmd = cmd + ['--drain']
return _subprocess(cmd) | [
"def",
"restart_local",
"(",
"drain",
"=",
"False",
")",
":",
"if",
"_TRAFFICCTL",
":",
"cmd",
"=",
"_traffic_ctl",
"(",
"'server'",
",",
"'restart'",
",",
"'--manager'",
")",
"else",
":",
"cmd",
"=",
"_traffic_line",
"(",
"'-L'",
")",
"if",
"drain",
":"... | Restart the traffic_manager and traffic_server processes on the local node.
drain
This option modifies the restart behavior such that
``traffic_server`` is not shut down until the number of
active client connections drops to the number given by the
``proxy.config.restart.active_client_threshold`` configuration
variable.
.. code-block:: bash
salt '*' trafficserver.restart_local
salt '*' trafficserver.restart_local drain=True | [
"Restart",
"the",
"traffic_manager",
"and",
"traffic_server",
"processes",
"on",
"the",
"local",
"node",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/trafficserver.py#L173-L197 | train |
saltstack/salt | salt/modules/trafficserver.py | match_metric | def match_metric(regex):
'''
Display the current values of all metrics whose names match the
given regular expression.
.. versionadded:: 2016.11.0
.. code-block:: bash
salt '*' trafficserver.match_metric regex
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('metric', 'match', regex)
else:
cmd = _traffic_ctl('-m', regex)
return _subprocess(cmd) | python | def match_metric(regex):
'''
Display the current values of all metrics whose names match the
given regular expression.
.. versionadded:: 2016.11.0
.. code-block:: bash
salt '*' trafficserver.match_metric regex
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('metric', 'match', regex)
else:
cmd = _traffic_ctl('-m', regex)
return _subprocess(cmd) | [
"def",
"match_metric",
"(",
"regex",
")",
":",
"if",
"_TRAFFICCTL",
":",
"cmd",
"=",
"_traffic_ctl",
"(",
"'metric'",
",",
"'match'",
",",
"regex",
")",
"else",
":",
"cmd",
"=",
"_traffic_ctl",
"(",
"'-m'",
",",
"regex",
")",
"return",
"_subprocess",
"("... | Display the current values of all metrics whose names match the
given regular expression.
.. versionadded:: 2016.11.0
.. code-block:: bash
salt '*' trafficserver.match_metric regex | [
"Display",
"the",
"current",
"values",
"of",
"all",
"metrics",
"whose",
"names",
"match",
"the",
"given",
"regular",
"expression",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/trafficserver.py#L200-L216 | train |
saltstack/salt | salt/modules/trafficserver.py | match_config | def match_config(regex):
'''
Display the current values of all configuration variables whose
names match the given regular expression.
.. versionadded:: 2016.11.0
.. code-block:: bash
salt '*' trafficserver.match_config regex
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('config', 'match', regex)
else:
cmd = _traffic_line('-m', regex)
return _subprocess(cmd) | python | def match_config(regex):
'''
Display the current values of all configuration variables whose
names match the given regular expression.
.. versionadded:: 2016.11.0
.. code-block:: bash
salt '*' trafficserver.match_config regex
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('config', 'match', regex)
else:
cmd = _traffic_line('-m', regex)
return _subprocess(cmd) | [
"def",
"match_config",
"(",
"regex",
")",
":",
"if",
"_TRAFFICCTL",
":",
"cmd",
"=",
"_traffic_ctl",
"(",
"'config'",
",",
"'match'",
",",
"regex",
")",
"else",
":",
"cmd",
"=",
"_traffic_line",
"(",
"'-m'",
",",
"regex",
")",
"return",
"_subprocess",
"(... | Display the current values of all configuration variables whose
names match the given regular expression.
.. versionadded:: 2016.11.0
.. code-block:: bash
salt '*' trafficserver.match_config regex | [
"Display",
"the",
"current",
"values",
"of",
"all",
"configuration",
"variables",
"whose",
"names",
"match",
"the",
"given",
"regular",
"expression",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/trafficserver.py#L219-L235 | train |
saltstack/salt | salt/modules/trafficserver.py | read_config | def read_config(*args):
'''
Read Traffic Server configuration variable definitions.
.. versionadded:: 2016.11.0
.. code-block:: bash
salt '*' trafficserver.read_config proxy.config.http.keep_alive_post_out
'''
ret = {}
if _TRAFFICCTL:
cmd = _traffic_ctl('config', 'get')
else:
cmd = _traffic_line('-r')
try:
for arg in args:
log.debug('Querying: %s', arg)
ret[arg] = _subprocess(cmd + [arg])
except KeyError:
pass
return ret | python | def read_config(*args):
'''
Read Traffic Server configuration variable definitions.
.. versionadded:: 2016.11.0
.. code-block:: bash
salt '*' trafficserver.read_config proxy.config.http.keep_alive_post_out
'''
ret = {}
if _TRAFFICCTL:
cmd = _traffic_ctl('config', 'get')
else:
cmd = _traffic_line('-r')
try:
for arg in args:
log.debug('Querying: %s', arg)
ret[arg] = _subprocess(cmd + [arg])
except KeyError:
pass
return ret | [
"def",
"read_config",
"(",
"*",
"args",
")",
":",
"ret",
"=",
"{",
"}",
"if",
"_TRAFFICCTL",
":",
"cmd",
"=",
"_traffic_ctl",
"(",
"'config'",
",",
"'get'",
")",
"else",
":",
"cmd",
"=",
"_traffic_line",
"(",
"'-r'",
")",
"try",
":",
"for",
"arg",
... | Read Traffic Server configuration variable definitions.
.. versionadded:: 2016.11.0
.. code-block:: bash
salt '*' trafficserver.read_config proxy.config.http.keep_alive_post_out | [
"Read",
"Traffic",
"Server",
"configuration",
"variable",
"definitions",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/trafficserver.py#L238-L262 | train |
saltstack/salt | salt/modules/trafficserver.py | set_config | def set_config(variable, value):
'''
Set the value of a Traffic Server configuration variable.
variable
Name of a Traffic Server configuration variable.
value
The new value to set.
.. versionadded:: 2016.11.0
.. code-block:: bash
salt '*' trafficserver.set_config proxy.config.http.keep_alive_post_out 0
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('config', 'set', variable, value)
else:
cmd = _traffic_line('-s', variable, '-v', value)
log.debug('Setting %s to %s', variable, value)
return _subprocess(cmd) | python | def set_config(variable, value):
'''
Set the value of a Traffic Server configuration variable.
variable
Name of a Traffic Server configuration variable.
value
The new value to set.
.. versionadded:: 2016.11.0
.. code-block:: bash
salt '*' trafficserver.set_config proxy.config.http.keep_alive_post_out 0
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('config', 'set', variable, value)
else:
cmd = _traffic_line('-s', variable, '-v', value)
log.debug('Setting %s to %s', variable, value)
return _subprocess(cmd) | [
"def",
"set_config",
"(",
"variable",
",",
"value",
")",
":",
"if",
"_TRAFFICCTL",
":",
"cmd",
"=",
"_traffic_ctl",
"(",
"'config'",
",",
"'set'",
",",
"variable",
",",
"value",
")",
"else",
":",
"cmd",
"=",
"_traffic_line",
"(",
"'-s'",
",",
"variable",... | Set the value of a Traffic Server configuration variable.
variable
Name of a Traffic Server configuration variable.
value
The new value to set.
.. versionadded:: 2016.11.0
.. code-block:: bash
salt '*' trafficserver.set_config proxy.config.http.keep_alive_post_out 0 | [
"Set",
"the",
"value",
"of",
"a",
"Traffic",
"Server",
"configuration",
"variable",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/trafficserver.py#L292-L315 | train |
saltstack/salt | salt/modules/trafficserver.py | offline | def offline(path):
'''
Mark a cache storage device as offline. The storage is identified by a path
which must match exactly a path specified in storage.config. This removes
the storage from the cache and redirects requests that would have used this
storage to other storage. This has exactly the same effect as a disk
failure for that storage. This does not persist across restarts of the
traffic_server process.
.. code-block:: bash
salt '*' trafficserver.offline /path/to/cache
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('storage', 'offline', path)
else:
cmd = _traffic_line('--offline', path)
return _subprocess(cmd) | python | def offline(path):
'''
Mark a cache storage device as offline. The storage is identified by a path
which must match exactly a path specified in storage.config. This removes
the storage from the cache and redirects requests that would have used this
storage to other storage. This has exactly the same effect as a disk
failure for that storage. This does not persist across restarts of the
traffic_server process.
.. code-block:: bash
salt '*' trafficserver.offline /path/to/cache
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('storage', 'offline', path)
else:
cmd = _traffic_line('--offline', path)
return _subprocess(cmd) | [
"def",
"offline",
"(",
"path",
")",
":",
"if",
"_TRAFFICCTL",
":",
"cmd",
"=",
"_traffic_ctl",
"(",
"'storage'",
",",
"'offline'",
",",
"path",
")",
"else",
":",
"cmd",
"=",
"_traffic_line",
"(",
"'--offline'",
",",
"path",
")",
"return",
"_subprocess",
... | Mark a cache storage device as offline. The storage is identified by a path
which must match exactly a path specified in storage.config. This removes
the storage from the cache and redirects requests that would have used this
storage to other storage. This has exactly the same effect as a disk
failure for that storage. This does not persist across restarts of the
traffic_server process.
.. code-block:: bash
salt '*' trafficserver.offline /path/to/cache | [
"Mark",
"a",
"cache",
"storage",
"device",
"as",
"offline",
".",
"The",
"storage",
"is",
"identified",
"by",
"a",
"path",
"which",
"must",
"match",
"exactly",
"a",
"path",
"specified",
"in",
"storage",
".",
"config",
".",
"This",
"removes",
"the",
"storage... | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/trafficserver.py#L411-L430 | train |
saltstack/salt | salt/modules/trafficserver.py | clear_alarms | def clear_alarms(alarm):
'''
Clear (acknowledge) an alarm event. The arguments are “all” for all current
alarms, a specific alarm number (e.g. ‘‘1’‘), or an alarm string identifier
(e.g. ‘’MGMT_ALARM_PROXY_CONFIG_ERROR’‘).
.. code-block:: bash
salt '*' trafficserver.clear_alarms [all | #event | name]
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('alarm', 'clear', alarm)
else:
cmd = _traffic_line('--clear_alarms', alarm)
return _subprocess(cmd) | python | def clear_alarms(alarm):
'''
Clear (acknowledge) an alarm event. The arguments are “all” for all current
alarms, a specific alarm number (e.g. ‘‘1’‘), or an alarm string identifier
(e.g. ‘’MGMT_ALARM_PROXY_CONFIG_ERROR’‘).
.. code-block:: bash
salt '*' trafficserver.clear_alarms [all | #event | name]
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('alarm', 'clear', alarm)
else:
cmd = _traffic_line('--clear_alarms', alarm)
return _subprocess(cmd) | [
"def",
"clear_alarms",
"(",
"alarm",
")",
":",
"if",
"_TRAFFICCTL",
":",
"cmd",
"=",
"_traffic_ctl",
"(",
"'alarm'",
",",
"'clear'",
",",
"alarm",
")",
"else",
":",
"cmd",
"=",
"_traffic_line",
"(",
"'--clear_alarms'",
",",
"alarm",
")",
"return",
"_subpro... | Clear (acknowledge) an alarm event. The arguments are “all” for all current
alarms, a specific alarm number (e.g. ‘‘1’‘), or an alarm string identifier
(e.g. ‘’MGMT_ALARM_PROXY_CONFIG_ERROR’‘).
.. code-block:: bash
salt '*' trafficserver.clear_alarms [all | #event | name] | [
"Clear",
"(",
"acknowledge",
")",
"an",
"alarm",
"event",
".",
"The",
"arguments",
"are",
"“all”",
"for",
"all",
"current",
"alarms",
"a",
"specific",
"alarm",
"number",
"(",
"e",
".",
"g",
".",
"‘‘1’‘",
")",
"or",
"an",
"alarm",
"string",
"identifier",
... | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/trafficserver.py#L450-L466 | train |
saltstack/salt | salt/cloud/libcloudfuncs.py | node_state | def node_state(id_):
'''
Libcloud supported node states
'''
states_int = {
0: 'RUNNING',
1: 'REBOOTING',
2: 'TERMINATED',
3: 'PENDING',
4: 'UNKNOWN',
5: 'STOPPED',
6: 'SUSPENDED',
7: 'ERROR',
8: 'PAUSED'}
states_str = {
'running': 'RUNNING',
'rebooting': 'REBOOTING',
'starting': 'STARTING',
'terminated': 'TERMINATED',
'pending': 'PENDING',
'unknown': 'UNKNOWN',
'stopping': 'STOPPING',
'stopped': 'STOPPED',
'suspended': 'SUSPENDED',
'error': 'ERROR',
'paused': 'PAUSED',
'reconfiguring': 'RECONFIGURING'
}
return states_str[id_] if isinstance(id_, six.string_types) else states_int[id_] | python | def node_state(id_):
'''
Libcloud supported node states
'''
states_int = {
0: 'RUNNING',
1: 'REBOOTING',
2: 'TERMINATED',
3: 'PENDING',
4: 'UNKNOWN',
5: 'STOPPED',
6: 'SUSPENDED',
7: 'ERROR',
8: 'PAUSED'}
states_str = {
'running': 'RUNNING',
'rebooting': 'REBOOTING',
'starting': 'STARTING',
'terminated': 'TERMINATED',
'pending': 'PENDING',
'unknown': 'UNKNOWN',
'stopping': 'STOPPING',
'stopped': 'STOPPED',
'suspended': 'SUSPENDED',
'error': 'ERROR',
'paused': 'PAUSED',
'reconfiguring': 'RECONFIGURING'
}
return states_str[id_] if isinstance(id_, six.string_types) else states_int[id_] | [
"def",
"node_state",
"(",
"id_",
")",
":",
"states_int",
"=",
"{",
"0",
":",
"'RUNNING'",
",",
"1",
":",
"'REBOOTING'",
",",
"2",
":",
"'TERMINATED'",
",",
"3",
":",
"'PENDING'",
",",
"4",
":",
"'UNKNOWN'",
",",
"5",
":",
"'STOPPED'",
",",
"6",
":"... | Libcloud supported node states | [
"Libcloud",
"supported",
"node",
"states"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/libcloudfuncs.py#L54-L82 | train |
saltstack/salt | salt/cloud/libcloudfuncs.py | check_libcloud_version | def check_libcloud_version(reqver=LIBCLOUD_MINIMAL_VERSION, why=None):
'''
Compare different libcloud versions
'''
if not HAS_LIBCLOUD:
return False
if not isinstance(reqver, (list, tuple)):
raise RuntimeError(
'\'reqver\' needs to passed as a tuple or list, i.e., (0, 14, 0)'
)
try:
import libcloud # pylint: disable=redefined-outer-name
except ImportError:
raise ImportError(
'salt-cloud requires >= libcloud {0} which is not installed'.format(
'.'.join([six.text_type(num) for num in reqver])
)
)
if LIBCLOUD_VERSION_INFO >= reqver:
return libcloud.__version__
errormsg = 'Your version of libcloud is {0}. '.format(libcloud.__version__)
errormsg += 'salt-cloud requires >= libcloud {0}'.format(
'.'.join([six.text_type(num) for num in reqver])
)
if why:
errormsg += ' for {0}'.format(why)
errormsg += '. Please upgrade.'
raise ImportError(errormsg) | python | def check_libcloud_version(reqver=LIBCLOUD_MINIMAL_VERSION, why=None):
'''
Compare different libcloud versions
'''
if not HAS_LIBCLOUD:
return False
if not isinstance(reqver, (list, tuple)):
raise RuntimeError(
'\'reqver\' needs to passed as a tuple or list, i.e., (0, 14, 0)'
)
try:
import libcloud # pylint: disable=redefined-outer-name
except ImportError:
raise ImportError(
'salt-cloud requires >= libcloud {0} which is not installed'.format(
'.'.join([six.text_type(num) for num in reqver])
)
)
if LIBCLOUD_VERSION_INFO >= reqver:
return libcloud.__version__
errormsg = 'Your version of libcloud is {0}. '.format(libcloud.__version__)
errormsg += 'salt-cloud requires >= libcloud {0}'.format(
'.'.join([six.text_type(num) for num in reqver])
)
if why:
errormsg += ' for {0}'.format(why)
errormsg += '. Please upgrade.'
raise ImportError(errormsg) | [
"def",
"check_libcloud_version",
"(",
"reqver",
"=",
"LIBCLOUD_MINIMAL_VERSION",
",",
"why",
"=",
"None",
")",
":",
"if",
"not",
"HAS_LIBCLOUD",
":",
"return",
"False",
"if",
"not",
"isinstance",
"(",
"reqver",
",",
"(",
"list",
",",
"tuple",
")",
")",
":"... | Compare different libcloud versions | [
"Compare",
"different",
"libcloud",
"versions"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/libcloudfuncs.py#L85-L115 | train |
saltstack/salt | salt/cloud/libcloudfuncs.py | get_node | def get_node(conn, name):
'''
Return a libcloud node for the named VM
'''
nodes = conn.list_nodes()
for node in nodes:
if node.name == name:
__utils__['cloud.cache_node'](salt.utils.data.simple_types_filter(node.__dict__), __active_provider_name__, __opts__)
return node | python | def get_node(conn, name):
'''
Return a libcloud node for the named VM
'''
nodes = conn.list_nodes()
for node in nodes:
if node.name == name:
__utils__['cloud.cache_node'](salt.utils.data.simple_types_filter(node.__dict__), __active_provider_name__, __opts__)
return node | [
"def",
"get_node",
"(",
"conn",
",",
"name",
")",
":",
"nodes",
"=",
"conn",
".",
"list_nodes",
"(",
")",
"for",
"node",
"in",
"nodes",
":",
"if",
"node",
".",
"name",
"==",
"name",
":",
"__utils__",
"[",
"'cloud.cache_node'",
"]",
"(",
"salt",
".",
... | Return a libcloud node for the named VM | [
"Return",
"a",
"libcloud",
"node",
"for",
"the",
"named",
"VM"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/libcloudfuncs.py#L118-L126 | train |
saltstack/salt | salt/cloud/libcloudfuncs.py | avail_locations | def avail_locations(conn=None, call=None):
'''
Return a dict of all available VM locations on the cloud provider with
relevant data
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
if not conn:
conn = get_conn() # pylint: disable=E0602
locations = conn.list_locations()
ret = {}
for img in locations:
if isinstance(img.name, six.string_types) and not six.PY3:
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name) # future lint: disable=blacklisted-function
ret[img_name] = {}
for attr in dir(img):
if attr.startswith('_') or attr == 'driver':
continue
attr_value = getattr(img, attr)
if isinstance(attr_value, six.string_types) and not six.PY3:
attr_value = attr_value.encode(
'ascii', 'salt-cloud-force-ascii'
)
ret[img_name][attr] = attr_value
return ret | python | def avail_locations(conn=None, call=None):
'''
Return a dict of all available VM locations on the cloud provider with
relevant data
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
if not conn:
conn = get_conn() # pylint: disable=E0602
locations = conn.list_locations()
ret = {}
for img in locations:
if isinstance(img.name, six.string_types) and not six.PY3:
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name) # future lint: disable=blacklisted-function
ret[img_name] = {}
for attr in dir(img):
if attr.startswith('_') or attr == 'driver':
continue
attr_value = getattr(img, attr)
if isinstance(attr_value, six.string_types) and not six.PY3:
attr_value = attr_value.encode(
'ascii', 'salt-cloud-force-ascii'
)
ret[img_name][attr] = attr_value
return ret | [
"def",
"avail_locations",
"(",
"conn",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The avail_locations function must be called with '",
"'-f or --function, or with the --list-locations option'"... | Return a dict of all available VM locations on the cloud provider with
relevant data | [
"Return",
"a",
"dict",
"of",
"all",
"available",
"VM",
"locations",
"on",
"the",
"cloud",
"provider",
"with",
"relevant",
"data"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/libcloudfuncs.py#L129-L163 | train |
saltstack/salt | salt/cloud/libcloudfuncs.py | avail_images | def avail_images(conn=None, call=None):
'''
Return a dict of all available VM images on the cloud provider with
relevant data
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not conn:
conn = get_conn() # pylint: disable=E0602
images = conn.list_images()
ret = {}
for img in images:
if isinstance(img.name, six.string_types) and not six.PY3:
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name) # future lint: disable=blacklisted-function
ret[img_name] = {}
for attr in dir(img):
if attr.startswith('_') or attr in ('driver', 'get_uuid'):
continue
attr_value = getattr(img, attr)
if isinstance(attr_value, six.string_types) and not six.PY3:
attr_value = attr_value.encode(
'ascii', 'salt-cloud-force-ascii'
)
ret[img_name][attr] = attr_value
return ret | python | def avail_images(conn=None, call=None):
'''
Return a dict of all available VM images on the cloud provider with
relevant data
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not conn:
conn = get_conn() # pylint: disable=E0602
images = conn.list_images()
ret = {}
for img in images:
if isinstance(img.name, six.string_types) and not six.PY3:
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name) # future lint: disable=blacklisted-function
ret[img_name] = {}
for attr in dir(img):
if attr.startswith('_') or attr in ('driver', 'get_uuid'):
continue
attr_value = getattr(img, attr)
if isinstance(attr_value, six.string_types) and not six.PY3:
attr_value = attr_value.encode(
'ascii', 'salt-cloud-force-ascii'
)
ret[img_name][attr] = attr_value
return ret | [
"def",
"avail_images",
"(",
"conn",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The avail_images function must be called with '",
"'-f or --function, or with the --list-images option'",
")",
... | Return a dict of all available VM images on the cloud provider with
relevant data | [
"Return",
"a",
"dict",
"of",
"all",
"available",
"VM",
"images",
"on",
"the",
"cloud",
"provider",
"with",
"relevant",
"data"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/libcloudfuncs.py#L166-L198 | train |
saltstack/salt | salt/cloud/libcloudfuncs.py | avail_sizes | def avail_sizes(conn=None, call=None):
'''
Return a dict of all available VM images on the cloud provider with
relevant data
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
if not conn:
conn = get_conn() # pylint: disable=E0602
sizes = conn.list_sizes()
ret = {}
for size in sizes:
if isinstance(size.name, six.string_types) and not six.PY3:
size_name = size.name.encode('ascii', 'salt-cloud-force-ascii')
else:
size_name = str(size.name) # future lint: disable=blacklisted-function
ret[size_name] = {}
for attr in dir(size):
if attr.startswith('_') or attr in ('driver', 'get_uuid'):
continue
try:
attr_value = getattr(size, attr)
except Exception:
pass
if isinstance(attr_value, six.string_types) and not six.PY3:
attr_value = attr_value.encode(
'ascii', 'salt-cloud-force-ascii'
)
ret[size_name][attr] = attr_value
return ret | python | def avail_sizes(conn=None, call=None):
'''
Return a dict of all available VM images on the cloud provider with
relevant data
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
if not conn:
conn = get_conn() # pylint: disable=E0602
sizes = conn.list_sizes()
ret = {}
for size in sizes:
if isinstance(size.name, six.string_types) and not six.PY3:
size_name = size.name.encode('ascii', 'salt-cloud-force-ascii')
else:
size_name = str(size.name) # future lint: disable=blacklisted-function
ret[size_name] = {}
for attr in dir(size):
if attr.startswith('_') or attr in ('driver', 'get_uuid'):
continue
try:
attr_value = getattr(size, attr)
except Exception:
pass
if isinstance(attr_value, six.string_types) and not six.PY3:
attr_value = attr_value.encode(
'ascii', 'salt-cloud-force-ascii'
)
ret[size_name][attr] = attr_value
return ret | [
"def",
"avail_sizes",
"(",
"conn",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The avail_sizes function must be called with '",
"'-f or --function, or with the --list-sizes option'",
")",
"... | Return a dict of all available VM images on the cloud provider with
relevant data | [
"Return",
"a",
"dict",
"of",
"all",
"available",
"VM",
"images",
"on",
"the",
"cloud",
"provider",
"with",
"relevant",
"data"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/libcloudfuncs.py#L201-L238 | train |
saltstack/salt | salt/cloud/libcloudfuncs.py | get_location | def get_location(conn, vm_):
'''
Return the location object to use
'''
locations = conn.list_locations()
vm_location = config.get_cloud_config_value('location', vm_, __opts__)
if not six.PY3:
vm_location = vm_location.encode(
'ascii', 'salt-cloud-force-ascii'
)
for img in locations:
if isinstance(img.id, six.string_types) and not six.PY3:
img_id = img.id.encode('ascii', 'salt-cloud-force-ascii')
else:
img_id = str(img.id) # future lint: disable=blacklisted-function
if isinstance(img.name, six.string_types) and not six.PY3:
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name) # future lint: disable=blacklisted-function
if vm_location and vm_location in (img_id, img_name):
return img
raise SaltCloudNotFound(
'The specified location, \'{0}\', could not be found.'.format(
vm_location
)
) | python | def get_location(conn, vm_):
'''
Return the location object to use
'''
locations = conn.list_locations()
vm_location = config.get_cloud_config_value('location', vm_, __opts__)
if not six.PY3:
vm_location = vm_location.encode(
'ascii', 'salt-cloud-force-ascii'
)
for img in locations:
if isinstance(img.id, six.string_types) and not six.PY3:
img_id = img.id.encode('ascii', 'salt-cloud-force-ascii')
else:
img_id = str(img.id) # future lint: disable=blacklisted-function
if isinstance(img.name, six.string_types) and not six.PY3:
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name) # future lint: disable=blacklisted-function
if vm_location and vm_location in (img_id, img_name):
return img
raise SaltCloudNotFound(
'The specified location, \'{0}\', could not be found.'.format(
vm_location
)
) | [
"def",
"get_location",
"(",
"conn",
",",
"vm_",
")",
":",
"locations",
"=",
"conn",
".",
"list_locations",
"(",
")",
"vm_location",
"=",
"config",
".",
"get_cloud_config_value",
"(",
"'location'",
",",
"vm_",
",",
"__opts__",
")",
"if",
"not",
"six",
".",
... | Return the location object to use | [
"Return",
"the",
"location",
"object",
"to",
"use"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/libcloudfuncs.py#L241-L270 | train |
saltstack/salt | salt/cloud/libcloudfuncs.py | get_image | def get_image(conn, vm_):
'''
Return the image object to use
'''
images = conn.list_images()
vm_image = config.get_cloud_config_value('image', vm_, __opts__)
if not six.PY3:
vm_image = vm_image.encode('ascii', 'salt-cloud-force-ascii')
for img in images:
if isinstance(img.id, six.string_types) and not six.PY3:
img_id = img.id.encode('ascii', 'salt-cloud-force-ascii')
else:
img_id = str(img.id) # future lint: disable=blacklisted-function
if isinstance(img.name, six.string_types) and not six.PY3:
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name) # future lint: disable=blacklisted-function
if vm_image and vm_image in (img_id, img_name):
return img
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found.'.format(vm_image)
) | python | def get_image(conn, vm_):
'''
Return the image object to use
'''
images = conn.list_images()
vm_image = config.get_cloud_config_value('image', vm_, __opts__)
if not six.PY3:
vm_image = vm_image.encode('ascii', 'salt-cloud-force-ascii')
for img in images:
if isinstance(img.id, six.string_types) and not six.PY3:
img_id = img.id.encode('ascii', 'salt-cloud-force-ascii')
else:
img_id = str(img.id) # future lint: disable=blacklisted-function
if isinstance(img.name, six.string_types) and not six.PY3:
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name) # future lint: disable=blacklisted-function
if vm_image and vm_image in (img_id, img_name):
return img
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found.'.format(vm_image)
) | [
"def",
"get_image",
"(",
"conn",
",",
"vm_",
")",
":",
"images",
"=",
"conn",
".",
"list_images",
"(",
")",
"vm_image",
"=",
"config",
".",
"get_cloud_config_value",
"(",
"'image'",
",",
"vm_",
",",
"__opts__",
")",
"if",
"not",
"six",
".",
"PY3",
":",... | Return the image object to use | [
"Return",
"the",
"image",
"object",
"to",
"use"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/libcloudfuncs.py#L273-L299 | train |
saltstack/salt | salt/cloud/libcloudfuncs.py | get_size | def get_size(conn, vm_):
'''
Return the VM's size object
'''
sizes = conn.list_sizes()
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
if not vm_size:
return sizes[0]
for size in sizes:
if vm_size and str(vm_size) in (str(size.id), str(size.name)): # pylint: disable=blacklisted-function
return size
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
) | python | def get_size(conn, vm_):
'''
Return the VM's size object
'''
sizes = conn.list_sizes()
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
if not vm_size:
return sizes[0]
for size in sizes:
if vm_size and str(vm_size) in (str(size.id), str(size.name)): # pylint: disable=blacklisted-function
return size
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
) | [
"def",
"get_size",
"(",
"conn",
",",
"vm_",
")",
":",
"sizes",
"=",
"conn",
".",
"list_sizes",
"(",
")",
"vm_size",
"=",
"config",
".",
"get_cloud_config_value",
"(",
"'size'",
",",
"vm_",
",",
"__opts__",
")",
"if",
"not",
"vm_size",
":",
"return",
"s... | Return the VM's size object | [
"Return",
"the",
"VM",
"s",
"size",
"object"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/libcloudfuncs.py#L302-L316 | train |
saltstack/salt | salt/cloud/libcloudfuncs.py | destroy | def destroy(name, conn=None, call=None):
'''
Delete a single VM
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if not conn:
conn = get_conn() # pylint: disable=E0602
node = get_node(conn, name)
profiles = get_configured_provider()['profiles'] # pylint: disable=E0602
if node is None:
log.error('Unable to find the VM %s', name)
profile = None
if 'metadata' in node.extra and 'profile' in node.extra['metadata']:
profile = node.extra['metadata']['profile']
flush_mine_on_destroy = False
if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]:
flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy']
if flush_mine_on_destroy:
log.info('Clearing Salt Mine: %s', name)
mopts_ = salt.config.DEFAULT_MINION_OPTS
conf_path = '/'.join(__opts__['conf_file'].split('/')[:-1])
mopts_.update(
salt.config.minion_config(os.path.join(conf_path, 'minion'))
)
client = salt.client.get_local_client(mopts_)
minions = client.cmd(name, 'mine.flush')
log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy)
log.info('Destroying VM: %s', name)
ret = conn.destroy_node(node)
if ret:
log.info('Destroyed VM: %s', name)
# Fire destroy action
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__['delete_sshkeys'] is True:
public_ips = getattr(node, __opts__.get('ssh_interface', 'public_ips'))
if public_ips:
salt.utils.cloud.remove_sshkey(public_ips[0])
private_ips = getattr(node, __opts__.get('ssh_interface', 'private_ips'))
if private_ips:
salt.utils.cloud.remove_sshkey(private_ips[0])
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return True
log.error('Failed to Destroy VM: %s', name)
return False | python | def destroy(name, conn=None, call=None):
'''
Delete a single VM
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if not conn:
conn = get_conn() # pylint: disable=E0602
node = get_node(conn, name)
profiles = get_configured_provider()['profiles'] # pylint: disable=E0602
if node is None:
log.error('Unable to find the VM %s', name)
profile = None
if 'metadata' in node.extra and 'profile' in node.extra['metadata']:
profile = node.extra['metadata']['profile']
flush_mine_on_destroy = False
if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]:
flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy']
if flush_mine_on_destroy:
log.info('Clearing Salt Mine: %s', name)
mopts_ = salt.config.DEFAULT_MINION_OPTS
conf_path = '/'.join(__opts__['conf_file'].split('/')[:-1])
mopts_.update(
salt.config.minion_config(os.path.join(conf_path, 'minion'))
)
client = salt.client.get_local_client(mopts_)
minions = client.cmd(name, 'mine.flush')
log.info('Clearing Salt Mine: %s, %s', name, flush_mine_on_destroy)
log.info('Destroying VM: %s', name)
ret = conn.destroy_node(node)
if ret:
log.info('Destroyed VM: %s', name)
# Fire destroy action
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__['delete_sshkeys'] is True:
public_ips = getattr(node, __opts__.get('ssh_interface', 'public_ips'))
if public_ips:
salt.utils.cloud.remove_sshkey(public_ips[0])
private_ips = getattr(node, __opts__.get('ssh_interface', 'private_ips'))
if private_ips:
salt.utils.cloud.remove_sshkey(private_ips[0])
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return True
log.error('Failed to Destroy VM: %s', name)
return False | [
"def",
"destroy",
"(",
"name",
",",
"conn",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'function'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The destroy action must be called with -d, --destroy, '",
"'-a or --action.'",
")",
"__utils__"... | Delete a single VM | [
"Delete",
"a",
"single",
"VM"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/libcloudfuncs.py#L335-L409 | train |
saltstack/salt | salt/cloud/libcloudfuncs.py | reboot | def reboot(name, conn=None):
'''
Reboot a single VM
'''
if not conn:
conn = get_conn() # pylint: disable=E0602
node = get_node(conn, name)
if node is None:
log.error('Unable to find the VM %s', name)
log.info('Rebooting VM: %s', name)
ret = conn.reboot_node(node)
if ret:
log.info('Rebooted VM: %s', name)
# Fire reboot action
__utils__['cloud.fire_event'](
'event',
'{0} has been rebooted'.format(name), 'salt-cloud'
'salt/cloud/{0}/rebooting'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return True
log.error('Failed to reboot VM: %s', name)
return False | python | def reboot(name, conn=None):
'''
Reboot a single VM
'''
if not conn:
conn = get_conn() # pylint: disable=E0602
node = get_node(conn, name)
if node is None:
log.error('Unable to find the VM %s', name)
log.info('Rebooting VM: %s', name)
ret = conn.reboot_node(node)
if ret:
log.info('Rebooted VM: %s', name)
# Fire reboot action
__utils__['cloud.fire_event'](
'event',
'{0} has been rebooted'.format(name), 'salt-cloud'
'salt/cloud/{0}/rebooting'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return True
log.error('Failed to reboot VM: %s', name)
return False | [
"def",
"reboot",
"(",
"name",
",",
"conn",
"=",
"None",
")",
":",
"if",
"not",
"conn",
":",
"conn",
"=",
"get_conn",
"(",
")",
"# pylint: disable=E0602",
"node",
"=",
"get_node",
"(",
"conn",
",",
"name",
")",
"if",
"node",
"is",
"None",
":",
"log",
... | Reboot a single VM | [
"Reboot",
"a",
"single",
"VM"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/libcloudfuncs.py#L412-L438 | train |
saltstack/salt | salt/cloud/libcloudfuncs.py | list_nodes | def list_nodes(conn=None, call=None):
'''
Return a list of the VMs that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
if not conn:
conn = get_conn() # pylint: disable=E0602
nodes = conn.list_nodes()
ret = {}
for node in nodes:
ret[node.name] = {
'id': node.id,
'image': node.image,
'name': node.name,
'private_ips': node.private_ips,
'public_ips': node.public_ips,
'size': node.size,
'state': node_state(node.state)
}
return ret | python | def list_nodes(conn=None, call=None):
'''
Return a list of the VMs that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
if not conn:
conn = get_conn() # pylint: disable=E0602
nodes = conn.list_nodes()
ret = {}
for node in nodes:
ret[node.name] = {
'id': node.id,
'image': node.image,
'name': node.name,
'private_ips': node.private_ips,
'public_ips': node.public_ips,
'size': node.size,
'state': node_state(node.state)
}
return ret | [
"def",
"list_nodes",
"(",
"conn",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The list_nodes function must be called with -f or --function.'",
")",
"if",
"not",
"conn",
":",
"conn",
... | Return a list of the VMs that are on the provider | [
"Return",
"a",
"list",
"of",
"the",
"VMs",
"that",
"are",
"on",
"the",
"provider"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/libcloudfuncs.py#L441-L465 | train |
saltstack/salt | salt/cloud/libcloudfuncs.py | list_nodes_full | def list_nodes_full(conn=None, call=None):
'''
Return a list of the VMs that are on the provider, with all fields
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
if not conn:
conn = get_conn() # pylint: disable=E0602
nodes = conn.list_nodes()
ret = {}
for node in nodes:
pairs = {}
for key, value in zip(node.__dict__, six.itervalues(node.__dict__)):
pairs[key] = value
ret[node.name] = pairs
del ret[node.name]['driver']
__utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__)
return ret | python | def list_nodes_full(conn=None, call=None):
'''
Return a list of the VMs that are on the provider, with all fields
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
if not conn:
conn = get_conn() # pylint: disable=E0602
nodes = conn.list_nodes()
ret = {}
for node in nodes:
pairs = {}
for key, value in zip(node.__dict__, six.itervalues(node.__dict__)):
pairs[key] = value
ret[node.name] = pairs
del ret[node.name]['driver']
__utils__['cloud.cache_node_list'](ret, __active_provider_name__.split(':')[0], __opts__)
return ret | [
"def",
"list_nodes_full",
"(",
"conn",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The list_nodes_full function must be called with -f or --function.'",
")",
"if",
"not",
"conn",
":",
... | Return a list of the VMs that are on the provider, with all fields | [
"Return",
"a",
"list",
"of",
"the",
"VMs",
"that",
"are",
"on",
"the",
"provider",
"with",
"all",
"fields"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/libcloudfuncs.py#L468-L490 | train |
saltstack/salt | salt/cloud/libcloudfuncs.py | list_nodes_select | def list_nodes_select(conn=None, call=None):
'''
Return a list of the VMs that are on the provider, with select fields
'''
if not conn:
conn = get_conn() # pylint: disable=E0602
return salt.utils.cloud.list_nodes_select(
list_nodes_full(conn, 'function'), __opts__['query.selection'], call,
) | python | def list_nodes_select(conn=None, call=None):
'''
Return a list of the VMs that are on the provider, with select fields
'''
if not conn:
conn = get_conn() # pylint: disable=E0602
return salt.utils.cloud.list_nodes_select(
list_nodes_full(conn, 'function'), __opts__['query.selection'], call,
) | [
"def",
"list_nodes_select",
"(",
"conn",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"if",
"not",
"conn",
":",
"conn",
"=",
"get_conn",
"(",
")",
"# pylint: disable=E0602",
"return",
"salt",
".",
"utils",
".",
"cloud",
".",
"list_nodes_select",
"(",
... | Return a list of the VMs that are on the provider, with select fields | [
"Return",
"a",
"list",
"of",
"the",
"VMs",
"that",
"are",
"on",
"the",
"provider",
"with",
"select",
"fields"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/libcloudfuncs.py#L493-L502 | train |
saltstack/salt | salt/cloud/libcloudfuncs.py | conn_has_method | def conn_has_method(conn, method_name):
'''
Find if the provided connection object has a specific method
'''
if method_name in dir(conn):
return True
log.error('Method \'%s\' not yet supported!', method_name)
return False | python | def conn_has_method(conn, method_name):
'''
Find if the provided connection object has a specific method
'''
if method_name in dir(conn):
return True
log.error('Method \'%s\' not yet supported!', method_name)
return False | [
"def",
"conn_has_method",
"(",
"conn",
",",
"method_name",
")",
":",
"if",
"method_name",
"in",
"dir",
"(",
"conn",
")",
":",
"return",
"True",
"log",
".",
"error",
"(",
"'Method \\'%s\\' not yet supported!'",
",",
"method_name",
")",
"return",
"False"
] | Find if the provided connection object has a specific method | [
"Find",
"if",
"the",
"provided",
"connection",
"object",
"has",
"a",
"specific",
"method"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/libcloudfuncs.py#L519-L527 | train |
saltstack/salt | salt/modules/influxdbmod.py | db_exists | def db_exists(name, **client_args):
'''
Checks if a database exists in InfluxDB.
name
Name of the database to check.
CLI Example:
.. code-block:: bash
salt '*' influxdb.db_exists <name>
'''
if name in [db['name'] for db in list_dbs(**client_args)]:
return True
return False | python | def db_exists(name, **client_args):
'''
Checks if a database exists in InfluxDB.
name
Name of the database to check.
CLI Example:
.. code-block:: bash
salt '*' influxdb.db_exists <name>
'''
if name in [db['name'] for db in list_dbs(**client_args)]:
return True
return False | [
"def",
"db_exists",
"(",
"name",
",",
"*",
"*",
"client_args",
")",
":",
"if",
"name",
"in",
"[",
"db",
"[",
"'name'",
"]",
"for",
"db",
"in",
"list_dbs",
"(",
"*",
"*",
"client_args",
")",
"]",
":",
"return",
"True",
"return",
"False"
] | Checks if a database exists in InfluxDB.
name
Name of the database to check.
CLI Example:
.. code-block:: bash
salt '*' influxdb.db_exists <name> | [
"Checks",
"if",
"a",
"database",
"exists",
"in",
"InfluxDB",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L95-L111 | train |
saltstack/salt | salt/modules/influxdbmod.py | create_db | def create_db(name, **client_args):
'''
Create a database.
name
Name of the database to create.
CLI Example:
.. code-block:: bash
salt '*' influxdb.create_db <name>
'''
if db_exists(name, **client_args):
log.info('DB \'%s\' already exists', name)
return False
client = _client(**client_args)
client.create_database(name)
return True | python | def create_db(name, **client_args):
'''
Create a database.
name
Name of the database to create.
CLI Example:
.. code-block:: bash
salt '*' influxdb.create_db <name>
'''
if db_exists(name, **client_args):
log.info('DB \'%s\' already exists', name)
return False
client = _client(**client_args)
client.create_database(name)
return True | [
"def",
"create_db",
"(",
"name",
",",
"*",
"*",
"client_args",
")",
":",
"if",
"db_exists",
"(",
"name",
",",
"*",
"*",
"client_args",
")",
":",
"log",
".",
"info",
"(",
"'DB \\'%s\\' already exists'",
",",
"name",
")",
"return",
"False",
"client",
"=",
... | Create a database.
name
Name of the database to create.
CLI Example:
.. code-block:: bash
salt '*' influxdb.create_db <name> | [
"Create",
"a",
"database",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L114-L134 | train |
saltstack/salt | salt/modules/influxdbmod.py | drop_db | def drop_db(name, **client_args):
'''
Drop a database.
name
Name of the database to drop.
CLI Example:
.. code-block:: bash
salt '*' influxdb.drop_db <name>
'''
if not db_exists(name, **client_args):
log.info('DB \'%s\' does not exist', name)
return False
client = _client(**client_args)
client.drop_database(name)
return True | python | def drop_db(name, **client_args):
'''
Drop a database.
name
Name of the database to drop.
CLI Example:
.. code-block:: bash
salt '*' influxdb.drop_db <name>
'''
if not db_exists(name, **client_args):
log.info('DB \'%s\' does not exist', name)
return False
client = _client(**client_args)
client.drop_database(name)
return True | [
"def",
"drop_db",
"(",
"name",
",",
"*",
"*",
"client_args",
")",
":",
"if",
"not",
"db_exists",
"(",
"name",
",",
"*",
"*",
"client_args",
")",
":",
"log",
".",
"info",
"(",
"'DB \\'%s\\' does not exist'",
",",
"name",
")",
"return",
"False",
"client",
... | Drop a database.
name
Name of the database to drop.
CLI Example:
.. code-block:: bash
salt '*' influxdb.drop_db <name> | [
"Drop",
"a",
"database",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L137-L157 | train |
saltstack/salt | salt/modules/influxdbmod.py | user_info | def user_info(name, **client_args):
'''
Get information about given user.
name
Name of the user for which to get information.
CLI Example:
.. code-block:: bash
salt '*' influxdb.user_info <name>
'''
matching_users = (user for user in list_users(**client_args)
if user.get('user') == name)
try:
return next(matching_users)
except StopIteration:
pass | python | def user_info(name, **client_args):
'''
Get information about given user.
name
Name of the user for which to get information.
CLI Example:
.. code-block:: bash
salt '*' influxdb.user_info <name>
'''
matching_users = (user for user in list_users(**client_args)
if user.get('user') == name)
try:
return next(matching_users)
except StopIteration:
pass | [
"def",
"user_info",
"(",
"name",
",",
"*",
"*",
"client_args",
")",
":",
"matching_users",
"=",
"(",
"user",
"for",
"user",
"in",
"list_users",
"(",
"*",
"*",
"client_args",
")",
"if",
"user",
".",
"get",
"(",
"'user'",
")",
"==",
"name",
")",
"try",... | Get information about given user.
name
Name of the user for which to get information.
CLI Example:
.. code-block:: bash
salt '*' influxdb.user_info <name> | [
"Get",
"information",
"about",
"given",
"user",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L194-L213 | train |
saltstack/salt | salt/modules/influxdbmod.py | create_user | def create_user(name, passwd, admin=False, **client_args):
'''
Create a user.
name
Name of the user to create.
passwd
Password of the new user.
admin : False
Whether the user should have cluster administration
privileges or not.
CLI Example:
.. code-block:: bash
salt '*' influxdb.create_user <name> <password>
salt '*' influxdb.create_user <name> <password> admin=True
'''
if user_exists(name, **client_args):
log.info("User '%s' already exists", name)
return False
client = _client(**client_args)
client.create_user(name, passwd, admin)
return True | python | def create_user(name, passwd, admin=False, **client_args):
'''
Create a user.
name
Name of the user to create.
passwd
Password of the new user.
admin : False
Whether the user should have cluster administration
privileges or not.
CLI Example:
.. code-block:: bash
salt '*' influxdb.create_user <name> <password>
salt '*' influxdb.create_user <name> <password> admin=True
'''
if user_exists(name, **client_args):
log.info("User '%s' already exists", name)
return False
client = _client(**client_args)
client.create_user(name, passwd, admin)
return True | [
"def",
"create_user",
"(",
"name",
",",
"passwd",
",",
"admin",
"=",
"False",
",",
"*",
"*",
"client_args",
")",
":",
"if",
"user_exists",
"(",
"name",
",",
"*",
"*",
"client_args",
")",
":",
"log",
".",
"info",
"(",
"\"User '%s' already exists\"",
",",
... | Create a user.
name
Name of the user to create.
passwd
Password of the new user.
admin : False
Whether the user should have cluster administration
privileges or not.
CLI Example:
.. code-block:: bash
salt '*' influxdb.create_user <name> <password>
salt '*' influxdb.create_user <name> <password> admin=True | [
"Create",
"a",
"user",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L216-L244 | train |
saltstack/salt | salt/modules/influxdbmod.py | set_user_password | def set_user_password(name, passwd, **client_args):
'''
Change password of a user.
name
Name of the user for whom to set the password.
passwd
New password of the user.
CLI Example:
.. code-block:: bash
salt '*' influxdb.set_user_password <name> <password>
'''
if not user_exists(name, **client_args):
log.info('User \'%s\' does not exist', name)
return False
client = _client(**client_args)
client.set_user_password(name, passwd)
return True | python | def set_user_password(name, passwd, **client_args):
'''
Change password of a user.
name
Name of the user for whom to set the password.
passwd
New password of the user.
CLI Example:
.. code-block:: bash
salt '*' influxdb.set_user_password <name> <password>
'''
if not user_exists(name, **client_args):
log.info('User \'%s\' does not exist', name)
return False
client = _client(**client_args)
client.set_user_password(name, passwd)
return True | [
"def",
"set_user_password",
"(",
"name",
",",
"passwd",
",",
"*",
"*",
"client_args",
")",
":",
"if",
"not",
"user_exists",
"(",
"name",
",",
"*",
"*",
"client_args",
")",
":",
"log",
".",
"info",
"(",
"'User \\'%s\\' does not exist'",
",",
"name",
")",
... | Change password of a user.
name
Name of the user for whom to set the password.
passwd
New password of the user.
CLI Example:
.. code-block:: bash
salt '*' influxdb.set_user_password <name> <password> | [
"Change",
"password",
"of",
"a",
"user",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L247-L270 | train |
saltstack/salt | salt/modules/influxdbmod.py | grant_admin_privileges | def grant_admin_privileges(name, **client_args):
'''
Grant cluster administration privileges to a user.
name
Name of the user to whom admin privileges will be granted.
CLI Example:
.. code-block:: bash
salt '*' influxdb.grant_admin_privileges <name>
'''
client = _client(**client_args)
client.grant_admin_privileges(name)
return True | python | def grant_admin_privileges(name, **client_args):
'''
Grant cluster administration privileges to a user.
name
Name of the user to whom admin privileges will be granted.
CLI Example:
.. code-block:: bash
salt '*' influxdb.grant_admin_privileges <name>
'''
client = _client(**client_args)
client.grant_admin_privileges(name)
return True | [
"def",
"grant_admin_privileges",
"(",
"name",
",",
"*",
"*",
"client_args",
")",
":",
"client",
"=",
"_client",
"(",
"*",
"*",
"client_args",
")",
"client",
".",
"grant_admin_privileges",
"(",
"name",
")",
"return",
"True"
] | Grant cluster administration privileges to a user.
name
Name of the user to whom admin privileges will be granted.
CLI Example:
.. code-block:: bash
salt '*' influxdb.grant_admin_privileges <name> | [
"Grant",
"cluster",
"administration",
"privileges",
"to",
"a",
"user",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L273-L289 | train |
saltstack/salt | salt/modules/influxdbmod.py | revoke_admin_privileges | def revoke_admin_privileges(name, **client_args):
'''
Revoke cluster administration privileges from a user.
name
Name of the user from whom admin privileges will be revoked.
CLI Example:
.. code-block:: bash
salt '*' influxdb.revoke_admin_privileges <name>
'''
client = _client(**client_args)
client.revoke_admin_privileges(name)
return True | python | def revoke_admin_privileges(name, **client_args):
'''
Revoke cluster administration privileges from a user.
name
Name of the user from whom admin privileges will be revoked.
CLI Example:
.. code-block:: bash
salt '*' influxdb.revoke_admin_privileges <name>
'''
client = _client(**client_args)
client.revoke_admin_privileges(name)
return True | [
"def",
"revoke_admin_privileges",
"(",
"name",
",",
"*",
"*",
"client_args",
")",
":",
"client",
"=",
"_client",
"(",
"*",
"*",
"client_args",
")",
"client",
".",
"revoke_admin_privileges",
"(",
"name",
")",
"return",
"True"
] | Revoke cluster administration privileges from a user.
name
Name of the user from whom admin privileges will be revoked.
CLI Example:
.. code-block:: bash
salt '*' influxdb.revoke_admin_privileges <name> | [
"Revoke",
"cluster",
"administration",
"privileges",
"from",
"a",
"user",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L292-L308 | train |
saltstack/salt | salt/modules/influxdbmod.py | remove_user | def remove_user(name, **client_args):
'''
Remove a user.
name
Name of the user to remove
CLI Example:
.. code-block:: bash
salt '*' influxdb.remove_user <name>
'''
if not user_exists(name, **client_args):
log.info('User \'%s\' does not exist', name)
return False
client = _client(**client_args)
client.drop_user(name)
return True | python | def remove_user(name, **client_args):
'''
Remove a user.
name
Name of the user to remove
CLI Example:
.. code-block:: bash
salt '*' influxdb.remove_user <name>
'''
if not user_exists(name, **client_args):
log.info('User \'%s\' does not exist', name)
return False
client = _client(**client_args)
client.drop_user(name)
return True | [
"def",
"remove_user",
"(",
"name",
",",
"*",
"*",
"client_args",
")",
":",
"if",
"not",
"user_exists",
"(",
"name",
",",
"*",
"*",
"client_args",
")",
":",
"log",
".",
"info",
"(",
"'User \\'%s\\' does not exist'",
",",
"name",
")",
"return",
"False",
"c... | Remove a user.
name
Name of the user to remove
CLI Example:
.. code-block:: bash
salt '*' influxdb.remove_user <name> | [
"Remove",
"a",
"user",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L311-L331 | train |
saltstack/salt | salt/modules/influxdbmod.py | get_retention_policy | def get_retention_policy(database, name, **client_args):
'''
Get an existing retention policy.
database
Name of the database for which the retention policy was
defined.
name
Name of the retention policy.
CLI Example:
.. code-block:: bash
salt '*' influxdb.get_retention_policy metrics default
'''
client = _client(**client_args)
try:
return next((p for p in client.get_list_retention_policies(database)
if p.get('name') == name))
except StopIteration:
return {} | python | def get_retention_policy(database, name, **client_args):
'''
Get an existing retention policy.
database
Name of the database for which the retention policy was
defined.
name
Name of the retention policy.
CLI Example:
.. code-block:: bash
salt '*' influxdb.get_retention_policy metrics default
'''
client = _client(**client_args)
try:
return next((p for p in client.get_list_retention_policies(database)
if p.get('name') == name))
except StopIteration:
return {} | [
"def",
"get_retention_policy",
"(",
"database",
",",
"name",
",",
"*",
"*",
"client_args",
")",
":",
"client",
"=",
"_client",
"(",
"*",
"*",
"client_args",
")",
"try",
":",
"return",
"next",
"(",
"(",
"p",
"for",
"p",
"in",
"client",
".",
"get_list_re... | Get an existing retention policy.
database
Name of the database for which the retention policy was
defined.
name
Name of the retention policy.
CLI Example:
.. code-block:: bash
salt '*' influxdb.get_retention_policy metrics default | [
"Get",
"an",
"existing",
"retention",
"policy",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L334-L357 | train |
saltstack/salt | salt/modules/influxdbmod.py | retention_policy_exists | def retention_policy_exists(database, name, **client_args):
'''
Check if retention policy with given name exists.
database
Name of the database for which the retention policy was
defined.
name
Name of the retention policy to check.
CLI Example:
.. code-block:: bash
salt '*' influxdb.retention_policy_exists metrics default
'''
if get_retention_policy(database, name, **client_args):
return True
return False | python | def retention_policy_exists(database, name, **client_args):
'''
Check if retention policy with given name exists.
database
Name of the database for which the retention policy was
defined.
name
Name of the retention policy to check.
CLI Example:
.. code-block:: bash
salt '*' influxdb.retention_policy_exists metrics default
'''
if get_retention_policy(database, name, **client_args):
return True
return False | [
"def",
"retention_policy_exists",
"(",
"database",
",",
"name",
",",
"*",
"*",
"client_args",
")",
":",
"if",
"get_retention_policy",
"(",
"database",
",",
"name",
",",
"*",
"*",
"client_args",
")",
":",
"return",
"True",
"return",
"False"
] | Check if retention policy with given name exists.
database
Name of the database for which the retention policy was
defined.
name
Name of the retention policy to check.
CLI Example:
.. code-block:: bash
salt '*' influxdb.retention_policy_exists metrics default | [
"Check",
"if",
"retention",
"policy",
"with",
"given",
"name",
"exists",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L360-L380 | train |
saltstack/salt | salt/modules/influxdbmod.py | drop_retention_policy | def drop_retention_policy(database, name, **client_args):
'''
Drop a retention policy.
database
Name of the database for which the retention policy will be dropped.
name
Name of the retention policy to drop.
CLI Example:
.. code-block:: bash
salt '*' influxdb.drop_retention_policy mydb mypr
'''
client = _client(**client_args)
client.drop_retention_policy(name, database)
return True | python | def drop_retention_policy(database, name, **client_args):
'''
Drop a retention policy.
database
Name of the database for which the retention policy will be dropped.
name
Name of the retention policy to drop.
CLI Example:
.. code-block:: bash
salt '*' influxdb.drop_retention_policy mydb mypr
'''
client = _client(**client_args)
client.drop_retention_policy(name, database)
return True | [
"def",
"drop_retention_policy",
"(",
"database",
",",
"name",
",",
"*",
"*",
"client_args",
")",
":",
"client",
"=",
"_client",
"(",
"*",
"*",
"client_args",
")",
"client",
".",
"drop_retention_policy",
"(",
"name",
",",
"database",
")",
"return",
"True"
] | Drop a retention policy.
database
Name of the database for which the retention policy will be dropped.
name
Name of the retention policy to drop.
CLI Example:
.. code-block:: bash
salt '*' influxdb.drop_retention_policy mydb mypr | [
"Drop",
"a",
"retention",
"policy",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L383-L402 | train |
saltstack/salt | salt/modules/influxdbmod.py | create_retention_policy | def create_retention_policy(database,
name,
duration,
replication,
default=False,
**client_args):
'''
Create a retention policy.
database
Name of the database for which the retention policy will be created.
name
Name of the new retention policy.
duration
Duration of the new retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported and mean
1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks, respectively. For
infinite retention – meaning the data will never be deleted – use 'INF'
for duration. The minimum retention period is 1 hour.
replication
Replication factor of the retention policy.
This determines how many independent copies of each data point are
stored in a cluster.
default : False
Whether or not the policy as default will be set as default.
CLI Example:
.. code-block:: bash
salt '*' influxdb.create_retention_policy metrics default 1d 1
'''
client = _client(**client_args)
client.create_retention_policy(name, duration, replication, database,
default)
return True | python | def create_retention_policy(database,
name,
duration,
replication,
default=False,
**client_args):
'''
Create a retention policy.
database
Name of the database for which the retention policy will be created.
name
Name of the new retention policy.
duration
Duration of the new retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported and mean
1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks, respectively. For
infinite retention – meaning the data will never be deleted – use 'INF'
for duration. The minimum retention period is 1 hour.
replication
Replication factor of the retention policy.
This determines how many independent copies of each data point are
stored in a cluster.
default : False
Whether or not the policy as default will be set as default.
CLI Example:
.. code-block:: bash
salt '*' influxdb.create_retention_policy metrics default 1d 1
'''
client = _client(**client_args)
client.create_retention_policy(name, duration, replication, database,
default)
return True | [
"def",
"create_retention_policy",
"(",
"database",
",",
"name",
",",
"duration",
",",
"replication",
",",
"default",
"=",
"False",
",",
"*",
"*",
"client_args",
")",
":",
"client",
"=",
"_client",
"(",
"*",
"*",
"client_args",
")",
"client",
".",
"create_r... | Create a retention policy.
database
Name of the database for which the retention policy will be created.
name
Name of the new retention policy.
duration
Duration of the new retention policy.
Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported and mean
1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks, respectively. For
infinite retention – meaning the data will never be deleted – use 'INF'
for duration. The minimum retention period is 1 hour.
replication
Replication factor of the retention policy.
This determines how many independent copies of each data point are
stored in a cluster.
default : False
Whether or not the policy as default will be set as default.
CLI Example:
.. code-block:: bash
salt '*' influxdb.create_retention_policy metrics default 1d 1 | [
"Create",
"a",
"retention",
"policy",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L405-L447 | train |
saltstack/salt | salt/modules/influxdbmod.py | list_privileges | def list_privileges(name, **client_args):
'''
List privileges from a user.
name
Name of the user from whom privileges will be listed.
CLI Example:
.. code-block:: bash
salt '*' influxdb.list_privileges <name>
'''
client = _client(**client_args)
res = {}
for item in client.get_list_privileges(name):
res[item['database']] = item['privilege'].split()[0].lower()
return res | python | def list_privileges(name, **client_args):
'''
List privileges from a user.
name
Name of the user from whom privileges will be listed.
CLI Example:
.. code-block:: bash
salt '*' influxdb.list_privileges <name>
'''
client = _client(**client_args)
res = {}
for item in client.get_list_privileges(name):
res[item['database']] = item['privilege'].split()[0].lower()
return res | [
"def",
"list_privileges",
"(",
"name",
",",
"*",
"*",
"client_args",
")",
":",
"client",
"=",
"_client",
"(",
"*",
"*",
"client_args",
")",
"res",
"=",
"{",
"}",
"for",
"item",
"in",
"client",
".",
"get_list_privileges",
"(",
"name",
")",
":",
"res",
... | List privileges from a user.
name
Name of the user from whom privileges will be listed.
CLI Example:
.. code-block:: bash
salt '*' influxdb.list_privileges <name> | [
"List",
"privileges",
"from",
"a",
"user",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L495-L513 | train |
saltstack/salt | salt/modules/influxdbmod.py | grant_privilege | def grant_privilege(database, privilege, username, **client_args):
'''
Grant a privilege on a database to a user.
database
Name of the database to grant the privilege on.
privilege
Privilege to grant. Can be one of 'read', 'write' or 'all'.
username
Name of the user to grant the privilege to.
'''
client = _client(**client_args)
client.grant_privilege(privilege, database, username)
return True | python | def grant_privilege(database, privilege, username, **client_args):
'''
Grant a privilege on a database to a user.
database
Name of the database to grant the privilege on.
privilege
Privilege to grant. Can be one of 'read', 'write' or 'all'.
username
Name of the user to grant the privilege to.
'''
client = _client(**client_args)
client.grant_privilege(privilege, database, username)
return True | [
"def",
"grant_privilege",
"(",
"database",
",",
"privilege",
",",
"username",
",",
"*",
"*",
"client_args",
")",
":",
"client",
"=",
"_client",
"(",
"*",
"*",
"client_args",
")",
"client",
".",
"grant_privilege",
"(",
"privilege",
",",
"database",
",",
"us... | Grant a privilege on a database to a user.
database
Name of the database to grant the privilege on.
privilege
Privilege to grant. Can be one of 'read', 'write' or 'all'.
username
Name of the user to grant the privilege to. | [
"Grant",
"a",
"privilege",
"on",
"a",
"database",
"to",
"a",
"user",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L516-L532 | train |
saltstack/salt | salt/modules/influxdbmod.py | revoke_privilege | def revoke_privilege(database, privilege, username, **client_args):
'''
Revoke a privilege on a database from a user.
database
Name of the database to grant the privilege on.
privilege
Privilege to grant. Can be one of 'read', 'write' or 'all'.
username
Name of the user to grant the privilege to.
'''
client = _client(**client_args)
client.revoke_privilege(privilege, database, username)
return True | python | def revoke_privilege(database, privilege, username, **client_args):
'''
Revoke a privilege on a database from a user.
database
Name of the database to grant the privilege on.
privilege
Privilege to grant. Can be one of 'read', 'write' or 'all'.
username
Name of the user to grant the privilege to.
'''
client = _client(**client_args)
client.revoke_privilege(privilege, database, username)
return True | [
"def",
"revoke_privilege",
"(",
"database",
",",
"privilege",
",",
"username",
",",
"*",
"*",
"client_args",
")",
":",
"client",
"=",
"_client",
"(",
"*",
"*",
"client_args",
")",
"client",
".",
"revoke_privilege",
"(",
"privilege",
",",
"database",
",",
"... | Revoke a privilege on a database from a user.
database
Name of the database to grant the privilege on.
privilege
Privilege to grant. Can be one of 'read', 'write' or 'all'.
username
Name of the user to grant the privilege to. | [
"Revoke",
"a",
"privilege",
"on",
"a",
"database",
"from",
"a",
"user",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L535-L551 | train |
saltstack/salt | salt/modules/influxdbmod.py | continuous_query_exists | def continuous_query_exists(database, name, **client_args):
'''
Check if continuous query with given name exists on the database.
database
Name of the database for which the continuous query was
defined.
name
Name of the continuous query to check.
CLI Example:
.. code-block:: bash
salt '*' influxdb.continuous_query_exists metrics default
'''
if get_continuous_query(database, name, **client_args):
return True
return False | python | def continuous_query_exists(database, name, **client_args):
'''
Check if continuous query with given name exists on the database.
database
Name of the database for which the continuous query was
defined.
name
Name of the continuous query to check.
CLI Example:
.. code-block:: bash
salt '*' influxdb.continuous_query_exists metrics default
'''
if get_continuous_query(database, name, **client_args):
return True
return False | [
"def",
"continuous_query_exists",
"(",
"database",
",",
"name",
",",
"*",
"*",
"client_args",
")",
":",
"if",
"get_continuous_query",
"(",
"database",
",",
"name",
",",
"*",
"*",
"client_args",
")",
":",
"return",
"True",
"return",
"False"
] | Check if continuous query with given name exists on the database.
database
Name of the database for which the continuous query was
defined.
name
Name of the continuous query to check.
CLI Example:
.. code-block:: bash
salt '*' influxdb.continuous_query_exists metrics default | [
"Check",
"if",
"continuous",
"query",
"with",
"given",
"name",
"exists",
"on",
"the",
"database",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L554-L574 | train |
saltstack/salt | salt/modules/influxdbmod.py | get_continuous_query | def get_continuous_query(database, name, **client_args):
'''
Get an existing continuous query.
database
Name of the database for which the continuous query was
defined.
name
Name of the continuous query to get.
CLI Example:
.. code-block:: bash
salt '*' influxdb.get_continuous_query mydb cq_month
'''
client = _client(**client_args)
try:
for db, cqs in client.query('SHOW CONTINUOUS QUERIES').items():
if db[0] == database:
return next((cq for cq in cqs if cq.get('name') == name))
except StopIteration:
return {}
return {} | python | def get_continuous_query(database, name, **client_args):
'''
Get an existing continuous query.
database
Name of the database for which the continuous query was
defined.
name
Name of the continuous query to get.
CLI Example:
.. code-block:: bash
salt '*' influxdb.get_continuous_query mydb cq_month
'''
client = _client(**client_args)
try:
for db, cqs in client.query('SHOW CONTINUOUS QUERIES').items():
if db[0] == database:
return next((cq for cq in cqs if cq.get('name') == name))
except StopIteration:
return {}
return {} | [
"def",
"get_continuous_query",
"(",
"database",
",",
"name",
",",
"*",
"*",
"client_args",
")",
":",
"client",
"=",
"_client",
"(",
"*",
"*",
"client_args",
")",
"try",
":",
"for",
"db",
",",
"cqs",
"in",
"client",
".",
"query",
"(",
"'SHOW CONTINUOUS QU... | Get an existing continuous query.
database
Name of the database for which the continuous query was
defined.
name
Name of the continuous query to get.
CLI Example:
.. code-block:: bash
salt '*' influxdb.get_continuous_query mydb cq_month | [
"Get",
"an",
"existing",
"continuous",
"query",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L577-L602 | train |
saltstack/salt | salt/modules/influxdbmod.py | create_continuous_query | def create_continuous_query(database, name, query, resample_time=None, coverage_period=None, **client_args):
'''
Create a continuous query.
database
Name of the database for which the continuous query will be
created on.
name
Name of the continuous query to create.
query
The continuous query string.
resample_time : None
Duration between continuous query resampling.
coverage_period : None
Duration specifying time period per sample.
CLI Example:
.. code-block:: bash
salt '*' influxdb.create_continuous_query mydb cq_month 'SELECT mean(*) INTO mydb.a_month.:MEASUREMENT FROM mydb.a_week./.*/ GROUP BY time(5m), *' '''
client = _client(**client_args)
full_query = 'CREATE CONTINUOUS QUERY {name} ON {database}'
if resample_time:
full_query += ' RESAMPLE EVERY {resample_time}'
if coverage_period:
full_query += ' FOR {coverage_period}'
full_query += ' BEGIN {query} END'
query = full_query.format(
name=name,
database=database,
query=query,
resample_time=resample_time,
coverage_period=coverage_period
)
client.query(query)
return True | python | def create_continuous_query(database, name, query, resample_time=None, coverage_period=None, **client_args):
'''
Create a continuous query.
database
Name of the database for which the continuous query will be
created on.
name
Name of the continuous query to create.
query
The continuous query string.
resample_time : None
Duration between continuous query resampling.
coverage_period : None
Duration specifying time period per sample.
CLI Example:
.. code-block:: bash
salt '*' influxdb.create_continuous_query mydb cq_month 'SELECT mean(*) INTO mydb.a_month.:MEASUREMENT FROM mydb.a_week./.*/ GROUP BY time(5m), *' '''
client = _client(**client_args)
full_query = 'CREATE CONTINUOUS QUERY {name} ON {database}'
if resample_time:
full_query += ' RESAMPLE EVERY {resample_time}'
if coverage_period:
full_query += ' FOR {coverage_period}'
full_query += ' BEGIN {query} END'
query = full_query.format(
name=name,
database=database,
query=query,
resample_time=resample_time,
coverage_period=coverage_period
)
client.query(query)
return True | [
"def",
"create_continuous_query",
"(",
"database",
",",
"name",
",",
"query",
",",
"resample_time",
"=",
"None",
",",
"coverage_period",
"=",
"None",
",",
"*",
"*",
"client_args",
")",
":",
"client",
"=",
"_client",
"(",
"*",
"*",
"client_args",
")",
"full... | Create a continuous query.
database
Name of the database for which the continuous query will be
created on.
name
Name of the continuous query to create.
query
The continuous query string.
resample_time : None
Duration between continuous query resampling.
coverage_period : None
Duration specifying time period per sample.
CLI Example:
.. code-block:: bash
salt '*' influxdb.create_continuous_query mydb cq_month 'SELECT mean(*) INTO mydb.a_month.:MEASUREMENT FROM mydb.a_week./.*/ GROUP BY time(5m), *' | [
"Create",
"a",
"continuous",
"query",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L605-L645 | train |
saltstack/salt | salt/modules/influxdbmod.py | drop_continuous_query | def drop_continuous_query(database, name, **client_args):
'''
Drop a continuous query.
database
Name of the database for which the continuous query will
be drop from.
name
Name of the continuous query to drop.
CLI Example:
.. code-block:: bash
salt '*' influxdb.drop_continuous_query mydb my_cq
'''
client = _client(**client_args)
query = 'DROP CONTINUOUS QUERY {0} ON {1}'.format(name, database)
client.query(query)
return True | python | def drop_continuous_query(database, name, **client_args):
'''
Drop a continuous query.
database
Name of the database for which the continuous query will
be drop from.
name
Name of the continuous query to drop.
CLI Example:
.. code-block:: bash
salt '*' influxdb.drop_continuous_query mydb my_cq
'''
client = _client(**client_args)
query = 'DROP CONTINUOUS QUERY {0} ON {1}'.format(name, database)
client.query(query)
return True | [
"def",
"drop_continuous_query",
"(",
"database",
",",
"name",
",",
"*",
"*",
"client_args",
")",
":",
"client",
"=",
"_client",
"(",
"*",
"*",
"client_args",
")",
"query",
"=",
"'DROP CONTINUOUS QUERY {0} ON {1}'",
".",
"format",
"(",
"name",
",",
"database",
... | Drop a continuous query.
database
Name of the database for which the continuous query will
be drop from.
name
Name of the continuous query to drop.
CLI Example:
.. code-block:: bash
salt '*' influxdb.drop_continuous_query mydb my_cq | [
"Drop",
"a",
"continuous",
"query",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L648-L669 | train |
saltstack/salt | salt/modules/influxdbmod.py | _pull_query_results | def _pull_query_results(resultset):
'''
Parses a ResultSet returned from InfluxDB into a dictionary of results,
grouped by series names and optional JSON-encoded grouping tags.
'''
_results = collections.defaultdict(lambda: {})
for _header, _values in resultset.items():
_header, _group_tags = _header
if _group_tags:
_results[_header][salt.utils.json.dumps(_group_tags)] = [_value for _value in _values]
else:
_results[_header] = [_value for _value in _values]
return dict(sorted(_results.items())) | python | def _pull_query_results(resultset):
'''
Parses a ResultSet returned from InfluxDB into a dictionary of results,
grouped by series names and optional JSON-encoded grouping tags.
'''
_results = collections.defaultdict(lambda: {})
for _header, _values in resultset.items():
_header, _group_tags = _header
if _group_tags:
_results[_header][salt.utils.json.dumps(_group_tags)] = [_value for _value in _values]
else:
_results[_header] = [_value for _value in _values]
return dict(sorted(_results.items())) | [
"def",
"_pull_query_results",
"(",
"resultset",
")",
":",
"_results",
"=",
"collections",
".",
"defaultdict",
"(",
"lambda",
":",
"{",
"}",
")",
"for",
"_header",
",",
"_values",
"in",
"resultset",
".",
"items",
"(",
")",
":",
"_header",
",",
"_group_tags"... | Parses a ResultSet returned from InfluxDB into a dictionary of results,
grouped by series names and optional JSON-encoded grouping tags. | [
"Parses",
"a",
"ResultSet",
"returned",
"from",
"InfluxDB",
"into",
"a",
"dictionary",
"of",
"results",
"grouped",
"by",
"series",
"names",
"and",
"optional",
"JSON",
"-",
"encoded",
"grouping",
"tags",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L672-L684 | train |
saltstack/salt | salt/modules/influxdbmod.py | query | def query(database, query, **client_args):
'''
Execute a query.
database
Name of the database to query on.
query
InfluxQL query string.
'''
client = _client(**client_args)
_result = client.query(query, database=database)
if isinstance(_result, collections.Sequence):
return [_pull_query_results(_query_result) for _query_result in _result if _query_result]
return [_pull_query_results(_result) if _result else {}] | python | def query(database, query, **client_args):
'''
Execute a query.
database
Name of the database to query on.
query
InfluxQL query string.
'''
client = _client(**client_args)
_result = client.query(query, database=database)
if isinstance(_result, collections.Sequence):
return [_pull_query_results(_query_result) for _query_result in _result if _query_result]
return [_pull_query_results(_result) if _result else {}] | [
"def",
"query",
"(",
"database",
",",
"query",
",",
"*",
"*",
"client_args",
")",
":",
"client",
"=",
"_client",
"(",
"*",
"*",
"client_args",
")",
"_result",
"=",
"client",
".",
"query",
"(",
"query",
",",
"database",
"=",
"database",
")",
"if",
"is... | Execute a query.
database
Name of the database to query on.
query
InfluxQL query string. | [
"Execute",
"a",
"query",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/influxdbmod.py#L687-L702 | train |
saltstack/salt | salt/utils/listdiffer.py | ListDictDiffer._get_recursive_difference | def _get_recursive_difference(self, type):
'''Returns the recursive diff between dict values'''
if type == 'intersect':
return [recursive_diff(item['old'], item['new']) for item in self._intersect]
elif type == 'added':
return [recursive_diff({}, item) for item in self._added]
elif type == 'removed':
return [recursive_diff(item, {}, ignore_missing_keys=False)
for item in self._removed]
elif type == 'all':
recursive_list = []
recursive_list.extend([recursive_diff(item['old'], item['new']) for item in self._intersect])
recursive_list.extend([recursive_diff({}, item) for item in self._added])
recursive_list.extend([recursive_diff(item, {},
ignore_missing_keys=False)
for item in self._removed])
return recursive_list
else:
raise ValueError('The given type for recursive list matching '
'is not supported.') | python | def _get_recursive_difference(self, type):
'''Returns the recursive diff between dict values'''
if type == 'intersect':
return [recursive_diff(item['old'], item['new']) for item in self._intersect]
elif type == 'added':
return [recursive_diff({}, item) for item in self._added]
elif type == 'removed':
return [recursive_diff(item, {}, ignore_missing_keys=False)
for item in self._removed]
elif type == 'all':
recursive_list = []
recursive_list.extend([recursive_diff(item['old'], item['new']) for item in self._intersect])
recursive_list.extend([recursive_diff({}, item) for item in self._added])
recursive_list.extend([recursive_diff(item, {},
ignore_missing_keys=False)
for item in self._removed])
return recursive_list
else:
raise ValueError('The given type for recursive list matching '
'is not supported.') | [
"def",
"_get_recursive_difference",
"(",
"self",
",",
"type",
")",
":",
"if",
"type",
"==",
"'intersect'",
":",
"return",
"[",
"recursive_diff",
"(",
"item",
"[",
"'old'",
"]",
",",
"item",
"[",
"'new'",
"]",
")",
"for",
"item",
"in",
"self",
".",
"_in... | Returns the recursive diff between dict values | [
"Returns",
"the",
"recursive",
"diff",
"between",
"dict",
"values"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/listdiffer.py#L69-L88 | train |
saltstack/salt | salt/utils/listdiffer.py | ListDictDiffer.remove_diff | def remove_diff(self, diff_key=None, diff_list='intersect'):
'''Deletes an attribute from all of the intersect objects'''
if diff_list == 'intersect':
for item in self._intersect:
item['old'].pop(diff_key, None)
item['new'].pop(diff_key, None)
if diff_list == 'removed':
for item in self._removed:
item.pop(diff_key, None) | python | def remove_diff(self, diff_key=None, diff_list='intersect'):
'''Deletes an attribute from all of the intersect objects'''
if diff_list == 'intersect':
for item in self._intersect:
item['old'].pop(diff_key, None)
item['new'].pop(diff_key, None)
if diff_list == 'removed':
for item in self._removed:
item.pop(diff_key, None) | [
"def",
"remove_diff",
"(",
"self",
",",
"diff_key",
"=",
"None",
",",
"diff_list",
"=",
"'intersect'",
")",
":",
"if",
"diff_list",
"==",
"'intersect'",
":",
"for",
"item",
"in",
"self",
".",
"_intersect",
":",
"item",
"[",
"'old'",
"]",
".",
"pop",
"(... | Deletes an attribute from all of the intersect objects | [
"Deletes",
"an",
"attribute",
"from",
"all",
"of",
"the",
"intersect",
"objects"
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/listdiffer.py#L105-L113 | train |
saltstack/salt | salt/utils/listdiffer.py | ListDictDiffer.diffs | def diffs(self):
'''
Returns a list of dictionaries with key value pairs.
The values are the differences between the items identified by the key.
'''
differences = []
for item in self._get_recursive_difference(type='all'):
if item.diffs:
if item.past_dict:
differences.append({item.past_dict[self._key]: item.diffs})
elif item.current_dict:
differences.append({item.current_dict[self._key]: item.diffs})
return differences | python | def diffs(self):
'''
Returns a list of dictionaries with key value pairs.
The values are the differences between the items identified by the key.
'''
differences = []
for item in self._get_recursive_difference(type='all'):
if item.diffs:
if item.past_dict:
differences.append({item.past_dict[self._key]: item.diffs})
elif item.current_dict:
differences.append({item.current_dict[self._key]: item.diffs})
return differences | [
"def",
"diffs",
"(",
"self",
")",
":",
"differences",
"=",
"[",
"]",
"for",
"item",
"in",
"self",
".",
"_get_recursive_difference",
"(",
"type",
"=",
"'all'",
")",
":",
"if",
"item",
".",
"diffs",
":",
"if",
"item",
".",
"past_dict",
":",
"differences"... | Returns a list of dictionaries with key value pairs.
The values are the differences between the items identified by the key. | [
"Returns",
"a",
"list",
"of",
"dictionaries",
"with",
"key",
"value",
"pairs",
".",
"The",
"values",
"are",
"the",
"differences",
"between",
"the",
"items",
"identified",
"by",
"the",
"key",
"."
] | e8541fd6e744ab0df786c0f76102e41631f45d46 | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/listdiffer.py#L116-L128 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.