docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Return the full reddit URL associated with the usernote.
Arguments:
subreddit: the subreddit name for the note (PRAW Subreddit object)
|
def full_url(self):
if self.link == '':
return None
else:
return Note._expand_url(self.link, self.subreddit)
| 736,522
|
Convert a reddit URL into the short-hand used by usernotes.
Arguments:
link: a link to a comment, submission, or message (str)
Returns a String of the shorthand URL
|
def _compress_url(link):
comment_re = re.compile(r'/comments/([A-Za-z\d]{2,})(?:/[^\s]+/([A-Za-z\d]+))?')
message_re = re.compile(r'/message/messages/([A-Za-z\d]+)')
matches = re.findall(comment_re, link)
if len(matches) == 0:
matches = re.findall(message_re, link)
if len(matches) == 0:
return None
else:
return 'm,' + matches[0]
else:
if matches[0][1] == '':
return 'l,' + matches[0][0]
else:
return 'l,' + matches[0][0] + ',' + matches[0][1]
| 736,523
|
Convert a usernote's URL short-hand into a full reddit URL.
Arguments:
subreddit: the subreddit the URL is for (PRAW Subreddit object or str)
short_link: the compressed link from a usernote (str)
Returns a String of the full URL.
|
def _expand_url(short_link, subreddit=None):
# Some URL structures for notes
message_scheme = 'https://reddit.com/message/messages/{}'
comment_scheme = 'https://reddit.com/r/{}/comments/{}/-/{}'
post_scheme = 'https://reddit.com/r/{}/comments/{}/'
if short_link == '':
return None
else:
parts = short_link.split(',')
if parts[0] == 'm':
return message_scheme.format(parts[1])
if parts[0] == 'l' and subreddit:
if len(parts) > 2:
return comment_scheme.format(subreddit, parts[1], parts[2])
else:
return post_scheme.format(subreddit, parts[1])
elif not subreddit:
raise ValueError('Subreddit name must be provided')
else:
return None
| 736,524
|
Constuctor for the UserNotes class.
Arguments:
r: the authenticated reddit instance (PRAW Reddit Object)
subreddit: the subreddit the usernotes will be pulled from (PRAW
Subreddit object)
lazy_start: whether to download the usernotes immediately upon
instantiation (bool)
|
def __init__(self, r, subreddit, lazy_start=False):
self.r = r
self.subreddit = subreddit
self.cached_json = {}
if not lazy_start:
self.get_json()
| 736,525
|
Send the JSON from the cache to the usernotes wiki page.
Arguments:
reason: the change reason that will be posted to the wiki changelog
(str)
Raises:
OverflowError if the new JSON data is greater than max_page_size
|
def set_json(self, reason='', new_page=False):
compressed_json = json.dumps(self._compress_json(self.cached_json))
if len(compressed_json) > self.max_page_size:
raise OverflowError(
'Usernotes page is too large (>{0} characters)'.
format(self.max_page_size)
)
if new_page:
self.subreddit.wiki.create(
self.page_name,
compressed_json,
reason
)
# Set the page as hidden and available to moderators only
self.subreddit.wiki[self.page_name].mod.update(False, permlevel=2)
else:
self.subreddit.wiki[self.page_name].edit(
compressed_json,
reason
)
| 736,528
|
Return a list of Note objects for the given user.
Return an empty list if no notes are found.
Arguments:
user: the user to search for in the usernotes (str)
|
def get_notes(self, user):
# Try to search for all notes on a user, return an empty list if none
# are found.
try:
users_notes = []
for note in self.cached_json['users'][user]['ns']:
users_notes.append(Note(
user=user,
note=note['n'],
subreddit=self.subreddit,
mod=self._mod_from_index(note['m']),
link=note['l'],
warning=self._warning_from_index(note['w']),
note_time=note['t']
))
return users_notes
except KeyError:
# User not found
return []
| 736,529
|
Decompress the BLOB portion of the usernotes.
Arguments:
j: the JSON returned from the wiki page (dict)
Returns a Dict with the 'blob' key removed and a 'users' key added
|
def _expand_json(self, j):
decompressed_json = copy.copy(j)
decompressed_json.pop('blob', None) # Remove BLOB portion of JSON
# Decode and decompress JSON
compressed_data = base64.b64decode(j['blob'])
original_json = zlib.decompress(compressed_data).decode('utf-8')
decompressed_json['users'] = json.loads(original_json) # Insert users
return decompressed_json
| 736,530
|
Compress the BLOB data portion of the usernotes.
Arguments:
j: the JSON in Schema v5 format (dict)
Returns a dict with the 'users' key removed and 'blob' key added
|
def _compress_json(self, j):
compressed_json = copy.copy(j)
compressed_json.pop('users', None)
compressed_data = zlib.compress(
json.dumps(j['users']).encode('utf-8'),
self.zlib_compression_strength
)
b64_data = base64.b64encode(compressed_data).decode('utf-8')
compressed_json['blob'] = b64_data
return compressed_json
| 736,531
|
Add a note to the usernotes wiki page.
Arguments:
note: the note to be added (Note)
Returns the update message for the usernotes wiki
Raises:
ValueError when the warning type of the note can not be found in the
stored list of warnings.
|
def add_note(self, note):
notes = self.cached_json
if not note.moderator:
note.moderator = self.r.user.me().name
# Get index of moderator in mod list from usernotes
# Add moderator to list if not already there
try:
mod_index = notes['constants']['users'].index(note.moderator)
except ValueError:
notes['constants']['users'].append(note.moderator)
mod_index = notes['constants']['users'].index(note.moderator)
# Get index of warning type from warnings list
# Add warning type to list if not already there
try:
warn_index = notes['constants']['warnings'].index(note.warning)
except ValueError:
if note.warning in Note.warnings:
notes['constants']['warnings'].append(note.warning)
warn_index = notes['constants']['warnings'].index(note.warning)
else:
raise ValueError('Warning type not valid: ' + note.warning)
new_note = {
'n': note.note,
't': note.time,
'm': mod_index,
'l': note.link,
'w': warn_index
}
try:
notes['users'][note.username]['ns'].insert(0, new_note)
except KeyError:
notes['users'][note.username] = {'ns': [new_note]}
return '"create new note on user {}" via puni'.format(note.username)
| 736,532
|
Remove a single usernote from the usernotes.
Arguments:
username: the user that for whom you're removing a note (str)
index: the index of the note which is to be removed (int)
Returns the update message for the usernotes wiki
|
def remove_note(self, username, index):
self.cached_json['users'][username]['ns'].pop(index)
# Go ahead and remove the user's entry if they have no more notes left
if len(self.cached_json['users'][username]['ns']) == 0:
del self.cached_json['users'][username]
return '"delete note #{} on user {}" via puni'.format(index, username)
| 736,533
|
Decorate functions that modify the internally stored usernotes JSON.
Ensures that updates are mirrored onto reddit.
Arguments:
func: the function being decorated
|
def update_cache(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
lazy = kwargs.get('lazy', False)
kwargs.pop('lazy', None)
if not lazy:
self.get_json()
ret = func(self, *args, **kwargs)
# If returning a string assume it is an update message
if isinstance(ret, str) and not lazy:
self.set_json(ret)
else:
return ret
return wrapper
| 736,684
|
Updates line types for a block's span.
Args:
span: First and last relative line number of a Block.
line_type: The type of line to update to.
Raises:
ValidationError: A special error on collision. This prevents Flake8
from crashing because it is converted to a Flake8 error tuple,
but it indicates to the user that something went wrong with
processing the function.
|
def update(self, span: typing.Tuple[int, int], line_type: LineType) -> None:
first_block_line, last_block_line = span
for i in range(first_block_line, last_block_line + 1):
try:
self.__setitem__(i, line_type)
except ValueError as error:
raise ValidationError(i + self.fn_offset, 1, 'AAA99 {}'.format(error))
| 737,284
|
Collect functions that look like tests.
Args:
tree
skip_noqa: Flag used by command line debugger to skip functions that
are marked with "# noqa". Defaults to ``False``.
|
def find_test_functions(tree: ast.AST, skip_noqa: bool = False) -> List[ast.FunctionDef]:
function_finder = TestFuncLister(skip_noqa)
function_finder.visit(tree)
return function_finder.get_found_funcs()
| 737,844
|
Get location of the `obj`
Arguments:
:obj: self.Model instance.
|
def _location(self, obj):
field_name = self.clean_id_name
return self.request.route_url(
self._resource.uid,
**{self._resource.id_name: getattr(obj, field_name)})
| 738,330
|
Initialize instance of ``Constraint``.
Args:
selector (string): URL decoded constraint ``selector``.
comparison (string, optional): Parsed/mapped ``comparison``
operator. Defaults to ``None``.
argument (string, optional): URL decoded constraint ``argument``.
Defaults to ``None``.
Raises:
FiqlObjectException: Not a valid FIQL comparison.
|
def __init__(self, selector, comparison=None, argument=None):
super(Constraint, self).__init__()
self.selector = selector
# Validate comparison format.
if comparison and COMPARISON_COMP.match(comparison) is None:
raise FiqlObjectException(
"'%s' is not a valid FIQL comparison" % comparison)
self.comparison = comparison
self.argument = argument
| 739,087
|
Split the string s using shell-like syntax.
Args:
s (str): String to split
posix (bool): Use posix split
Returns:
list of str: List of string parts
|
def split(s, posix=True):
if isinstance(s, six.binary_type):
s = s.decode("utf-8")
return shlex.split(s, posix=posix)
| 739,097
|
Recursive search function.
Args:
path (str): Path to search recursively
matcher (str or callable): String pattern to search for or function
that returns True/False for a file argument
dirs (bool): if True returns directories that match the pattern
files(bool): if True returns files that match the patter
Yields:
str: Found files and directories
|
def search(path, matcher="*", dirs=False, files=True):
if callable(matcher):
def fnmatcher(items):
return list(filter(matcher, items))
else:
def fnmatcher(items):
return fnmatch.filter(items, matcher)
for root, directories, filenames in os.walk(os.path.abspath(path)):
to_match = []
if dirs:
to_match.extend(directories)
if files:
to_match.extend(filenames)
for item in fnmatcher(to_match):
yield os.path.join(root, item)
| 739,098
|
Change the current working directory.
Args:
directory (str): Directory to go to.
|
def chdir(directory):
directory = os.path.abspath(directory)
logger.info("chdir -> %s" % directory)
try:
if not os.path.isdir(directory):
logger.error(
"chdir -> %s failed! Directory does not exist!", directory
)
return False
os.chdir(directory)
return True
except Exception as e:
logger.error("chdir -> %s failed! %s" % (directory, e))
return False
| 739,099
|
Context object for changing directory.
Args:
directory (str): Directory to go to.
create (bool): Create directory if it doesn't exists.
Usage::
>>> with goto(directory) as ok:
... if not ok:
... print 'Error'
... else:
... print 'All OK'
|
def goto(directory, create=False):
current = os.getcwd()
directory = os.path.abspath(directory)
if os.path.isdir(directory) or (create and mkdir(directory)):
logger.info("goto -> %s", directory)
os.chdir(directory)
try:
yield True
finally:
logger.info("goto <- %s", directory)
os.chdir(current)
else:
logger.info(
"goto(%s) - directory does not exist, or cannot be " "created.",
directory,
)
yield False
| 739,100
|
Make a directory.
Create a leaf directory and all intermediate ones.
Works like ``mkdir``, except that any intermediate path segment (not just
the rightmost) will be created if it does not exist. This is recursive.
Args:
path (str): Directory to create
mode (int): Directory mode
delete (bool): Delete directory/file if exists
Returns:
bool: True if succeeded else False
|
def mkdir(path, mode=0o755, delete=False):
logger.info("mkdir: %s" % path)
if os.path.isdir(path):
if not delete:
return True
if not remove(path):
return False
try:
os.makedirs(path, mode)
return True
except Exception:
logger.exception("Failed to mkdir: %s" % path)
return False
| 739,101
|
Copy data and mode bits ("cp source destination").
The destination may be a directory.
Args:
source (str): Source file (file to copy).
destination (str): Destination file or directory (where to copy).
Returns:
bool: True if the operation is successful, False otherwise.
|
def __copyfile(source, destination):
logger.info("copyfile: %s -> %s" % (source, destination))
try:
__create_destdir(destination)
shutil.copy(source, destination)
return True
except Exception as e:
logger.error(
"copyfile: %s -> %s failed! Error: %s", source, destination, e
)
return False
| 739,103
|
Copy data and all stat info ("cp -p source destination").
The destination may be a directory.
Args:
source (str): Source file (file to copy).
destination (str): Destination file or directory (where to copy).
Returns:
bool: True if the operation is successful, False otherwise.
|
def __copyfile2(source, destination):
logger.info("copyfile2: %s -> %s" % (source, destination))
try:
__create_destdir(destination)
shutil.copy2(source, destination)
return True
except Exception as e:
logger.error(
"copyfile2: %s -> %s failed! Error: %s", source, destination, e
)
return False
| 739,104
|
Copy file or directory.
Args:
source (str): Source file or directory
destination (str): Destination file or directory (where to copy).
Returns:
bool: True if the operation is successful, False otherwise.
|
def copy(source, destination):
if os.path.isdir(source):
return __copytree(source, destination)
else:
return __copyfile2(source, destination)
| 739,106
|
Copy all file found by glob.glob(pattern) to destination directory.
Args:
pattern (str): Glob pattern
destination (str): Path to the destination directory.
Returns:
bool: True if the operation is successful, False otherwise.
|
def gcopy(pattern, destination):
for item in glob.glob(pattern):
if not copy(item, destination):
return False
return True
| 739,107
|
Move a file or directory (recursively) to another location.
If the destination is on our current file system, then simply use
rename. Otherwise, copy source to the destination and then remove
source.
Args:
source (str): Source file or directory (file or directory to move).
destination (str): Destination file or directory (where to move).
Returns:
bool: True if the operation is successful, False otherwise.
|
def move(source, destination):
logger.info("Move: %s -> %s" % (source, destination))
try:
__create_destdir(destination)
shutil.move(source, destination)
return True
except Exception:
logger.exception("Failed to Move: %s -> %s" % (source, destination))
return False
| 739,108
|
Move all file found by glob.glob(pattern) to destination directory.
Args:
pattern (str): Glob pattern
destination (str): Path to the destination directory.
Returns:
bool: True if the operation is successful, False otherwise.
|
def gmove(pattern, destination):
for item in glob.glob(pattern):
if not move(item, destination):
return False
return True
| 739,109
|
Delete a file.
Args:
path (str): Path to the file that needs to be deleted.
Returns:
bool: True if the operation is successful, False otherwise.
|
def __rmfile(path):
logger.info("rmfile: %s" % path)
try:
os.remove(path)
return True
except Exception as e:
logger.error("rmfile: %s failed! Error: %s" % (path, e))
return False
| 739,110
|
Recursively delete a directory tree.
Args:
path (str): Path to the directory that needs to be deleted.
Returns:
bool: True if the operation is successful, False otherwise.
|
def __rmtree(path):
logger.info("rmtree: %s" % path)
try:
shutil.rmtree(path)
return True
except Exception as e:
logger.error("rmtree: %s failed! Error: %s" % (path, e))
return False
| 739,111
|
Delete a file or directory.
Args:
path (str): Path to the file or directory that needs to be deleted.
Returns:
bool: True if the operation is successful, False otherwise.
|
def remove(path):
if os.path.isdir(path):
return __rmtree(path)
else:
return __rmfile(path)
| 739,112
|
Remove all file found by glob.glob(pattern).
Args:
pattern (str): Pattern of files to remove
Returns:
bool: True if the operation is successful, False otherwise.
|
def gremove(pattern):
for item in glob.glob(pattern):
if not remove(item):
return False
return True
| 739,113
|
Read the content of the file.
Args:
path (str): Path to the file
encoding (str): File encoding. Default: utf-8
Returns:
str: File content or empty string if there was an error
|
def read(path, encoding="utf-8"):
try:
with io.open(path, encoding=encoding) as f:
return f.read()
except Exception as e:
logger.error("read: %s failed. Error: %s", path, e)
return ""
| 739,114
|
Create a file at the given path if it does not already exists.
Args:
path (str): Path to the file.
content (str): Optional content that will be written in the file.
encoding (str): Encoding in which to write the content.
Default: ``utf-8``
overwrite (bool): Overwrite the file if exists.
Returns:
bool: True if the operation is successful, False otherwise.
|
def touch(path, content="", encoding="utf-8", overwrite=False):
path = os.path.abspath(path)
if not overwrite and os.path.exists(path):
logger.warning('touch: "%s" already exists', path)
return False
try:
logger.info("touch: %s", path)
with io.open(path, "wb") as f:
if not isinstance(content, six.binary_type):
content = content.encode(encoding)
f.write(content)
return True
except Exception as e:
logger.error("touch: %s failed. Error: %s", path, e)
return False
| 739,115
|
Load one or more modules.
Args:
modules: Either a string full path to a module or an actual module
object.
|
def load(self, *modules):
for module in modules:
if isinstance(module, six.string_types):
try:
module = get_object(module)
except Exception as e:
self.errors[module] = e
continue
self.modules[module.__package__] = module
for (loader, module_name, is_pkg) in pkgutil.walk_packages(
module.__path__
):
full_name = "{}.{}".format(_package(module), module_name)
try:
self.modules[full_name] = get_object(full_name)
if is_pkg:
self.load(self.modules[full_name])
except Exception as e:
self.errors[full_name] = e
| 739,126
|
Parse a FIQL formatted string into an ``Expression``.
Args:
fiql_str (string): The FIQL formatted string we want to parse.
Returns:
Expression: An ``Expression`` object representing the parsed FIQL
string.
Raises:
FiqlFormatException: Unable to parse string due to incorrect
formatting.
Example:
>>> expression = parse_str_to_expression(
... "name==bar,dob=gt=1990-01-01")
|
def parse_str_to_expression(fiql_str):
#pylint: disable=too-many-branches
nesting_lvl = 0
last_element = None
expression = Expression()
for (preamble, selector, comparison, argument) in iter_parse(fiql_str):
if preamble:
for char in preamble:
if char == '(':
if isinstance(last_element, BaseExpression):
raise FiqlFormatException(
"%s can not be followed by %s" % (
last_element.__class__, Expression))
expression = expression.create_nested_expression()
nesting_lvl += 1
elif char == ')':
expression = expression.get_parent()
last_element = expression
nesting_lvl -= 1
else:
if not expression.has_constraint():
raise FiqlFormatException(
"%s proceeding initial %s" % (
Operator, Constraint))
if isinstance(last_element, Operator):
raise FiqlFormatException(
"%s can not be followed by %s" % (
Operator, Operator))
last_element = Operator(char)
expression = expression.add_operator(last_element)
if selector:
if isinstance(last_element, BaseExpression):
raise FiqlFormatException("%s can not be followed by %s" % (
last_element.__class__, Constraint))
last_element = Constraint(selector, comparison, argument)
expression.add_element(last_element)
if nesting_lvl != 0:
raise FiqlFormatException(
"At least one nested expression was not correctly closed")
if not expression.has_constraint():
raise FiqlFormatException(
"Parsed string '%s' contained no constraint" % fiql_str)
return expression
| 739,292
|
Initialize instance of ``Operator``.
Args:
fiql_op_str (string): The FIQL operator (e.g., ";").
Raises:
FiqlObjectException: Invalid FIQL operator.
|
def __init__(self, fiql_op_str):
if not fiql_op_str in OPERATOR_MAP:
raise FiqlObjectException(
"'%s' is not a valid FIQL operator" % fiql_op_str)
self.value = fiql_op_str
| 739,387
|
Compare using operator precedence.
Args:
other (Operator): The ``Operator`` we are comparing precedence
against.
Returns:
integer: ``1`` if greater than ``other``, ``-1`` if less than
``other``, and ``0`` if of equal precedence of ``other``.
|
def __cmp__(self, other):
prec_self = OPERATOR_MAP[self.value][1]
prec_other = OPERATOR_MAP[other.value][1]
if prec_self < prec_other:
return -1
if prec_self > prec_other:
return 1
return 0
| 739,388
|
Recursively zip a directory.
Args:
archive (zipfile.ZipFile or str): ZipFile object add to or path to the
output zip archive.
items (str or list of str): Single item or list of items (files and
directories) to be added to zipfile.
mode (str): w for create new and write a for append to.
save_full_paths (bool): Preserve full paths.
|
def mkzip(archive, items, mode="w", save_full_paths=False):
close = False
try:
if not isinstance(archive, zipfile.ZipFile):
archive = zipfile.ZipFile(archive, mode, allowZip64=True)
close = True
logger.info("mkdzip: Creating %s, from: %s", archive.filename, items)
if isinstance(items, str):
items = [items]
for item in items:
item = os.path.abspath(item)
basename = os.path.basename(item)
if os.path.isdir(item):
for root, directoires, filenames in os.walk(item):
for filename in filenames:
path = os.path.join(root, filename)
if save_full_paths:
archive_path = path.encode("utf-8")
else:
archive_path = os.path.join(
basename, path.replace(item, "").strip("\\/")
).encode("utf-8")
archive.write(path, archive_path)
elif os.path.isfile(item):
if save_full_paths:
archive_name = item.encode("utf-8")
else:
archive_name = basename.encode("utf-8")
archive.write(item, archive_name) # , zipfile.ZIP_DEFLATED)
return True
except Exception as e:
logger.error("Error occurred during mkzip: %s" % e)
return False
finally:
if close:
archive.close()
| 739,435
|
Create a text report.
Arguments:
report_path -- Path to report file
mode -- a for append, w (default) for create from scratch (overwrite existing file)
|
def __init__(self, path=None, mode='w', name=None, auto_flush=True, encoding='utf8'):
if not path or path == TextReport.STDOUT:
self.__path = TextReport.STDOUT
self.__report_file = sys.stdout
self.name = 'stdout'
self.mode = None
self.auto_flush = False
elif path == TextReport.STRINGIO:
self.__path = TextReport.STRINGIO
self.__report_file = io.StringIO()
self.name = 'StringIO'
self.mode = None
self.auto_flush = False
else:
if path == '/dev/null':
self.__path = '/dev/null'
self.__report_file = open(os.devnull, mode=mode, encoding=encoding)
else:
self.__path = os.path.expanduser(path)
self.__report_file = open(self.__path, mode, encoding=encoding)
self.name = name if name else FileHelper.getfilename(self.__path)
self.auto_flush = auto_flush
self.mode = mode
self.print = self.writeline
| 739,571
|
Retrieve a list of processes sorted by name.
Args:
sort_by_name (bool): Sort the list by name or by process ID's.
Returns:
list of (int, str) or list of (int, str, str): List of process id,
process name and optional cmdline tuples.
|
def get_processes(sort_by_name=True):
if sort_by_name:
return sorted(
_list_processes(),
key=cmp_to_key(
lambda p1, p2: (cmp(p1.name, p2.name) or cmp(p1.pid, p2.pid))
),
)
else:
return sorted(
_list_processes(),
key=cmp_to_key(
lambda p1, p2: (cmp(p1.pid, p2.pid) or cmp(p1.name, p2.name))
),
)
| 739,607
|
Find process by name or by argument in command line.
Args:
name (str): Process name to search for.
arg (str): Command line argument for a process to search for.
Returns:
tea.process.base.IProcess: Process object if found.
|
def find(name, arg=None):
for p in get_processes():
if p.name.lower().find(name.lower()) != -1:
if arg is not None:
for a in p.cmdline or []:
if a.lower().find(arg.lower()) != -1:
return p
else:
return p
return None
| 739,608
|
Read public keys from specified user's authorized_keys file.
args:
username (str): username.
returns:
list: Authorised keys for the specified user.
|
def read_authorized_keys(username=None):
authorized_keys_path = '{0}/.ssh/authorized_keys'.format(os.path.expanduser('~{0}'.format(username)))
rnd_chars = random_string(length=RANDOM_FILE_EXT_LENGTH)
tmp_authorized_keys_path = '/tmp/authorized_keys_{0}_{1}'.format(username, rnd_chars)
authorized_keys = list()
copy_result = execute_command(
shlex.split(str('{0} cp {1} {2}'.format(sudo_check(), authorized_keys_path, tmp_authorized_keys_path))))
result_message = copy_result[0][1].decode('UTF-8')
if 'you must have a tty to run sudo' in result_message: # pragma: no cover
raise OSError("/etc/sudoers is blocked sudo. Remove entry: 'Defaults requiretty'.")
elif 'No such file or directory' not in result_message:
execute_command(shlex.split(str('{0} chmod 755 {1}'.format(sudo_check(), tmp_authorized_keys_path))))
with open(tmp_authorized_keys_path) as keys_file:
for key in keys_file:
authorized_keys.append(PublicKey(raw=key))
execute_command(shlex.split(str('{0} rm {1}'.format(sudo_check(), tmp_authorized_keys_path))))
return authorized_keys
| 739,617
|
Write public keys back to authorized_keys file. Create keys directory if it doesn't already exist.
args:
user (User): Instance of User containing keys.
returns:
list: Authorised keys for the specified user.
|
def write_authorized_keys(user=None):
authorized_keys = list()
authorized_keys_dir = '{0}/.ssh'.format(os.path.expanduser('~{0}'.format(user.name)))
rnd_chars = random_string(length=RANDOM_FILE_EXT_LENGTH)
authorized_keys_path = '{0}/authorized_keys'.format(authorized_keys_dir)
tmp_authorized_keys_path = '/tmp/authorized_keys_{0}_{1}'.format(user.name, rnd_chars)
if not os.path.isdir(authorized_keys_dir):
execute_command(shlex.split(str('{0} mkdir -p {1}'.format(sudo_check(), authorized_keys_dir))))
for key in user.public_keys:
authorized_keys.append('{0}\n'.format(key.raw))
with open(tmp_authorized_keys_path, mode=text_type('w+')) as keys_file:
keys_file.writelines(authorized_keys)
execute_command(
shlex.split(str('{0} cp {1} {2}'.format(sudo_check(), tmp_authorized_keys_path, authorized_keys_path))))
execute_command(shlex.split(str('{0} chown -R {1} {2}'.format(sudo_check(), user.name, authorized_keys_dir))))
execute_command(shlex.split(str('{0} chmod 700 {1}'.format(sudo_check(), authorized_keys_dir))))
execute_command(shlex.split(str('{0} chmod 600 {1}'.format(sudo_check(), authorized_keys_path))))
execute_command(shlex.split(str('{0} rm {1}'.format(sudo_check(), tmp_authorized_keys_path))))
| 739,618
|
Make a public key.
args:
raw (str): raw public key
b64encoded (str): base64 encoded public key
|
def __init__(self, raw=None, b64encoded=None):
if not any((raw, b64encoded)):
raise AttributeError('Key not provided')
self._raw = raw
self._b64encoded = b64encoded
| 739,619
|
Read the sudoers entry for the specified user.
args:
username (str): username.
returns:`r
str: sudoers entry for the specified user.
|
def read_sudoers():
sudoers_path = '/etc/sudoers'
rnd_chars = random_string(length=RANDOM_FILE_EXT_LENGTH)
tmp_sudoers_path = '/tmp/sudoers_{0}'.format(rnd_chars)
sudoers_entries = list()
copy_result = execute_command(
shlex.split(str('{0} cp {1} {2}'.format(sudo_check(), sudoers_path, tmp_sudoers_path))))
result_message = copy_result[0][1].decode('UTF-8')
if 'No such file or directory' not in result_message:
execute_command(shlex.split(str('{0} chmod 755 {1}'.format(sudo_check(), tmp_sudoers_path))))
with open(tmp_sudoers_path) as tmp_sudoers_file:
for line in tmp_sudoers_file:
stripped = line.strip().replace(os.linesep, '')
if stripped and not stripped.startswith('#'):
sudoers_entries.append(stripped)
execute_command(shlex.split(str('{0} rm {1}'.format(sudo_check(), tmp_sudoers_path))))
return sudoers_entries
| 739,652
|
Write sudoers entry.
args:
user (User): Instance of User containing sudoers entry.
returns:
str: sudoers entry for the specified user.
|
def write_sudoers_entry(username=None, sudoers_entry=None):
sudoers_path = '/etc/sudoers'
rnd_chars = random_string(length=RANDOM_FILE_EXT_LENGTH)
tmp_sudoers_path = '/tmp/sudoers_{0}'.format(rnd_chars)
execute_command(
shlex.split(str('{0} cp {1} {2}'.format(sudo_check(), sudoers_path, tmp_sudoers_path))))
execute_command(
shlex.split(str('{0} chmod 777 {1}'.format(sudo_check(), tmp_sudoers_path))))
with open(tmp_sudoers_path, mode=text_type('r')) as tmp_sudoers_file:
sudoers_entries = tmp_sudoers_file.readlines()
sudoers_output = list()
for entry in sudoers_entries:
if entry and not entry.startswith(username):
sudoers_output.append(entry)
if sudoers_entry:
sudoers_output.append('{0} {1}'.format(username, sudoers_entry))
sudoers_output.append('\n')
with open(tmp_sudoers_path, mode=text_type('w+')) as tmp_sudoers_file:
tmp_sudoers_file.writelines(sudoers_output)
sudoers_check_result = execute_command(
shlex.split(str('{0} {1} -cf {2}'.format(sudo_check(), LINUX_CMD_VISUDO, tmp_sudoers_path))))
if sudoers_check_result[1] > 0:
raise ValueError(sudoers_check_result[0][1])
execute_command(
shlex.split(str('{0} cp {1} {2}'.format(sudo_check(), tmp_sudoers_path, sudoers_path))))
execute_command(shlex.split(str('{0} chown root:root {1}'.format(sudo_check(), sudoers_path))))
execute_command(shlex.split(str('{0} chmod 440 {1}'.format(sudo_check(), sudoers_path))))
execute_command(shlex.split(str('{0} rm {1}'.format(sudo_check(), tmp_sudoers_path))))
| 739,653
|
Find the sudoers entry in the sudoers file for the specified user.
args:
username (str): username.
sudoers_entries (list): list of lines from the sudoers file.
returns:`r
str: sudoers entry for the specified user.
|
def get_sudoers_entry(username=None, sudoers_entries=None):
for entry in sudoers_entries:
if entry.startswith(username):
return entry.replace(username, '').strip()
| 739,654
|
Print string in to stdout using colored font.
See L{set_color} for more details about colors.
Args:
text (str): Text that needs to be printed.
|
def cprint(
text,
fg=Color.normal,
bg=Color.normal,
fg_dark=False,
bg_dark=False,
underlined=False,
parse=False,
):
if parse:
color_re = Color.color_re()
lines = text.splitlines()
count = len(lines)
for i, line in enumerate(lines):
previous = 0
end = len(line)
for match in color_re.finditer(line):
sys.stdout.write(line[previous : match.start()])
d = match.groupdict()
set_color(
d["color"], fg_dark=False if d["dark"] is None else True
)
previous = match.end()
sys.stdout.write(
line[previous:end]
+ ("\n" if (i < (count - 1) or text[-1] == "\n") else "")
)
else:
set_color(fg, bg, fg_dark, bg_dark, underlined)
sys.stdout.write(text)
set_color()
| 739,683
|
Attach an attachment to a message as a side effect.
Arguments:
message: MIMEMultipart instance.
attachment: Attachment instance.
|
def add_attachment(message, attachment, rfc2231=True):
data = attachment.read()
part = MIMEBase('application', 'octet-stream')
part.set_payload(data)
encoders.encode_base64(part)
filename = attachment.name if rfc2231 else Header(attachment.name).encode()
part.add_header('Content-Disposition', 'attachment',
filename=filename)
message.attach(part)
| 739,730
|
Send an email. Connect/Disconnect if not already connected
Arguments:
email: Email instance to send.
attachments: iterable containing Attachment instances
|
def send(self, email, attachments=()):
msg = email.as_mime(attachments)
if 'From' not in msg:
msg['From'] = self.sender_address()
if self._conn:
self._conn.sendmail(self.username, email.recipients,
msg.as_string())
else:
with self:
self._conn.sendmail(self.username, email.recipients,
msg.as_string())
| 739,736
|
Set parent ``Expression`` for this object.
Args:
parent (Expression): The ``Expression`` which contains this object.
Raises:
FiqlObjectException: Parent must be of type ``Expression``.
|
def set_parent(self, parent):
if not isinstance(parent, Expression):
raise FiqlObjectException("Parent must be of %s not %s" % (
Expression, type(parent)))
self.parent = parent
| 739,783
|
Add an element of type ``Operator``, ``Constraint``, or
``Expression`` to the ``Expression``.
Args:
element: ``Constraint``, ``Expression``, or ``Operator``.
Returns:
Expression: ``self``
Raises:
FiqlObjectException: Element is not a valid type.
|
def add_element(self, element):
if isinstance(element, BaseExpression):
element.set_parent(self._working_fragment)
self._working_fragment.elements.append(element)
return self
else:
return self.add_operator(element)
| 739,787
|
Update the ``Expression`` by joining the specified additional
``elements`` using an "AND" ``Operator``
Args:
*elements (BaseExpression): The ``Expression`` and/or
``Constraint`` elements which the "AND" ``Operator`` applies
to.
Returns:
Expression: ``self`` or related ``Expression``.
|
def op_and(self, *elements):
expression = self.add_operator(Operator(';'))
for element in elements:
expression.add_element(element)
return expression
| 739,788
|
Update the ``Expression`` by joining the specified additional
``elements`` using an "OR" ``Operator``
Args:
*elements (BaseExpression): The ``Expression`` and/or
``Constraint`` elements which the "OR" ``Operator`` applies
to.
Returns:
Expression: ``self`` or related ``Expression``.
|
def op_or(self, *elements):
expression = self.add_operator(Operator(','))
for element in elements:
expression.add_element(element)
return expression
| 739,789
|
Generate command to add a user.
args:
proposed_user (User): User
manage_home: bool
returns:
list: The command string split into shell-like syntax
|
def generate_add_user_command(proposed_user=None, manage_home=None):
command = None
if get_platform() in ('Linux', 'OpenBSD'):
command = '{0} {1}'.format(sudo_check(), LINUX_CMD_USERADD)
if proposed_user.uid:
command = '{0} -u {1}'.format(command, proposed_user.uid)
if proposed_user.gid:
command = '{0} -g {1}'.format(command, proposed_user.gid)
if proposed_user.gecos:
command = '{0} -c \'{1}\''.format(command, proposed_user.gecos)
if manage_home:
if proposed_user.home_dir:
if os.path.exists(proposed_user.home_dir):
command = '{0} -d {1}'.format(command, proposed_user.home_dir)
elif not os.path.exists('/home/{0}'.format(proposed_user.name)):
command = '{0} -m'.format(command)
if proposed_user.shell:
command = '{0} -s {1}'.format(command, proposed_user.shell)
command = '{0} {1}'.format(command, proposed_user.name)
elif get_platform() == 'FreeBSD': # pragma: FreeBSD
command = '{0} {1} useradd'.format(sudo_check(), FREEBSD_CMD_PW)
if proposed_user.uid:
command = '{0} -u {1}'.format(command, proposed_user.uid)
if proposed_user.gid:
command = '{0} -g {1}'.format(command, proposed_user.gid)
if proposed_user.gecos:
command = '{0} -c \'{1}\''.format(command, proposed_user.gecos)
if manage_home:
if proposed_user.home_dir:
command = '{0} -d {1}'.format(command, proposed_user.home_dir)
else:
command = '{0} -m'.format(command)
if proposed_user.shell:
command = '{0} -s {1}'.format(command, proposed_user.shell)
command = '{0} -n {1}'.format(command, proposed_user.name)
if command:
return shlex.split(str(command))
| 739,824
|
Generate command to modify existing user to become the proposed user.
args:
task (dict): A proposed user and the differences between it and the existing user
returns:
list: The command string split into shell-like syntax
|
def generate_modify_user_command(task=None, manage_home=None):
name = task['proposed_user'].name
comparison_result = task['user_comparison']['result']
command = None
if get_platform() in ('Linux', 'OpenBSD'):
command = '{0} {1}'.format(sudo_check(), LINUX_CMD_USERMOD)
if comparison_result.get('replacement_uid_value'):
command = '{0} -u {1}'.format(command, comparison_result.get('replacement_uid_value'))
if comparison_result.get('replacement_gid_value'):
command = '{0} -g {1}'.format(command, comparison_result.get('replacement_gid_value'))
if comparison_result.get('replacement_gecos_value'):
command = '{0} -c {1}'.format(command, comparison_result.get('replacement_gecos_value'))
if comparison_result.get('replacement_shell_value'):
command = '{0} -s {1}'.format(command, comparison_result.get('replacement_shell_value'))
if manage_home and comparison_result.get('replacement_home_dir_value'):
command = '{0} -d {1}'.format(command, comparison_result.get('replacement_home_dir_value'))
command = '{0} {1}'.format(command, name)
if get_platform() == 'FreeBSD': # pragma: FreeBSD
command = '{0} {1} usermod'.format(sudo_check(), FREEBSD_CMD_PW)
if comparison_result.get('replacement_uid_value'):
command = '{0} -u {1}'.format(command, comparison_result.get('replacement_uid_value'))
if comparison_result.get('replacement_gid_value'):
command = '{0} -g {1}'.format(command, comparison_result.get('replacement_gid_value'))
if comparison_result.get('replacement_gecos_value'):
command = '{0} -c {1}'.format(command, comparison_result.get('replacement_gecos_value'))
if comparison_result.get('replacement_shell_value'):
command = '{0} -s {1}'.format(command, comparison_result.get('replacement_shell_value'))
if manage_home and comparison_result.get('replacement_home_dir_value'):
command = '{0} -d {1}'.format(command, comparison_result.get('replacement_home_dir_value'))
command = '{0} -n {1}'.format(command, name)
if command:
return shlex.split(str(command))
| 739,825
|
Generate command to delete a user.
args:
username (str): user name
manage_home (bool): manage home directory
returns:
list: The user delete command string split into shell-like syntax
|
def generate_delete_user_command(username=None, manage_home=None):
command = None
remove_home = '-r' if manage_home else ''
if get_platform() in ('Linux', 'OpenBSD'):
command = '{0} {1} {2} {3}'.format(sudo_check(), LINUX_CMD_USERDEL, remove_home, username)
elif get_platform() == 'FreeBSD': # pragma: FreeBSD
command = '{0} {1} userdel {2} -n {3}'.format(sudo_check(), FREEBSD_CMD_PW, remove_home, username)
if command:
return shlex.split(str(command))
| 739,826
|
Check if supplied User instance exists in supplied Users list and, if so, return the differences.
args:
passed_user (User): the user instance to check for differences
user_list (Users): the Users instance containing a list of Users instances
returns:
dict: Details of the matching user and a list of differences
|
def compare_user(passed_user=None, user_list=None):
# Check if user exists
returned = user_list.describe_users(users_filter=dict(name=passed_user.name))
replace_keys = False
# User exists, so compare attributes
comparison_result = dict()
if passed_user.uid and (not returned[0].uid == passed_user.uid):
comparison_result['uid_action'] = 'modify'
comparison_result['current_uid_value'] = returned[0].uid
comparison_result['replacement_uid_value'] = passed_user.uid
if passed_user.gid and (not returned[0].gid == passed_user.gid):
comparison_result['gid_action'] = 'modify'
comparison_result['current_gid_value'] = returned[0].gid
comparison_result['replacement_gid_value'] = passed_user.gid
if passed_user.gecos and (not returned[0].gecos == passed_user.gecos):
comparison_result['gecos_action'] = 'modify'
comparison_result['current_gecos_value'] = returned[0].gecos
comparison_result['replacement_gecos_value'] = passed_user.gecos
if passed_user.home_dir and (not returned[0].home_dir == passed_user.home_dir):
comparison_result['home_dir_action'] = 'modify'
comparison_result['current_home_dir_value'] = returned[0].home_dir
comparison_result['replacement_home_dir_value'] = passed_user.home_dir
# (Re)set keys if home dir changed
replace_keys = True
if passed_user.shell and (not returned[0].shell == passed_user.shell):
comparison_result['shell_action'] = 'modify'
comparison_result['current_shell_value'] = returned[0].shell
comparison_result['replacement_shell_value'] = passed_user.shell
if passed_user.sudoers_entry and (not returned[0].sudoers_entry == passed_user.sudoers_entry):
comparison_result['sudoers_entry_action'] = 'modify'
comparison_result['current_sudoers_entry'] = returned[0].sudoers_entry
comparison_result['replacement_sudoers_entry'] = passed_user.sudoers_entry
# if passed_user.public_keys and (not returned[0].public_keys == passed_user.public_keys):
existing_keys = returned[0].public_keys
passed_keys = passed_user.public_keys
# Check if existing and passed keys exist, and if so, compare
if all((existing_keys, passed_keys)) and len(existing_keys) == len(passed_user.public_keys):
# Compare each key, and if any differences, replace
existing = set(key.raw for key in existing_keys)
replacement = set(key.raw for key in passed_keys)
if set.difference(existing, replacement):
replace_keys = True
# If not existing keys but keys passed set, then
elif passed_keys and not existing_keys:
replace_keys = True
if replace_keys:
comparison_result['public_keys_action'] = 'modify'
comparison_result['current_public_keys_value'] = existing_keys
comparison_result['replacement_public_keys_value'] = passed_keys
return dict(state='existing', result=comparison_result, existing_user=returned)
| 739,827
|
Make a user.
args:
name (str): user name.
passwd (str, optional): password
uid (int, optional): user id
gid (int, optional): group id
gecos (str): GECOS field
home_dir (str): home directory
shell (str): shell
public_keys (list): list of public key instances
sudoers_entry (str): an entry in sudoers
|
def __init__(self, name=None, passwd=None, uid=None, gid=None, gecos=None,
home_dir=None, shell=None, public_keys=None, sudoers_entry=None):
self.name = name
self.passwd = passwd
self.uid = uid
self.gid = gid
self._gecos = gecos
self.home_dir = home_dir
self.shell = shell
self.public_keys = public_keys
self.sudoers_entry = sudoers_entry
| 739,828
|
Create instance of Users collection.
args:
oktypes (type): The acceptable types of instances..
|
def __init__(self, oktypes=User):
platform = get_platform()
# Check platform is supported
if not platform in SUPPORTED_PLATFORMS:
sys.exit('Linux, FreeBSD and OpenBSD are currently the only supported platforms for this library.')
# Check OS commands are available for managing users
missing_commands = get_missing_commands(platform)
if missing_commands:
sys.exit('Unable to find commands: {0}.\nPlease check PATH.'.format(', '.join(missing_commands)))
self.oktypes = oktypes
self._user_list = list()
| 739,831
|
Format the text for output adding ASCII frame around the text.
Args:
text (str): Text that needs to be formatted.
Returns:
str: Formatted string.
|
def format_page(text):
width = max(map(len, text.splitlines()))
page = "+-" + "-" * width + "-+\n"
for line in text.splitlines():
page += "| " + line.ljust(width) + " |\n"
page += "+-" + "-" * width + "-+\n"
return page
| 739,935
|
Format the text as a table.
Text in format:
first | second
row 2 col 1 | 4
Will be formatted as::
+-------------+--------+
| first | second |
+-------------+--------+
| row 2 col 1 | 4 |
+-------------+--------+
Args:
text (str): Text that needs to be formatted.
Returns:
str: Formatted string.
|
def table(text):
def table_bar(col_lengths):
return "+-%s-+%s" % (
"-+-".join(["-" * length for length in col_lengths]),
os.linesep,
)
rows = []
for line in text.splitlines():
rows.append([part.strip() for part in line.split("|")])
max_cols = max(map(len, rows))
col_lengths = [0] * max_cols
for row in rows:
cols = len(row)
if cols < max_cols:
row.extend([""] * (max_cols - cols))
for i, col in enumerate(row):
col_length = len(col)
if col_length > col_lengths[i]:
col_lengths[i] = col_length
text = table_bar(col_lengths)
for i, row in enumerate(rows):
cols = []
for i, col in enumerate(row):
cols.append(col.ljust(col_lengths[i]))
text += "| %s |%s" % (" | ".join(cols), os.linesep)
text += table_bar(col_lengths)
return text
| 739,936
|
Wrap text lines to maximum *width* characters.
Wrapped text is aligned against the left text border.
Args:
text (str): Text to wrap.
width (int): Maximum number of characters per line.
Returns:
str: Wrapped text.
|
def wrap_text(text, width=80):
text = re.sub(r"\s+", " ", text).strip()
wrapper = TextWrapper(
width=width, break_long_words=False, replace_whitespace=True
)
return wrapper.fill(text)
| 739,938
|
Wrap text and adjust it to right border.
Same as L{wrap_text} with the difference that the text is aligned against
the right text border.
Args:
text (str): Text to wrap and align.
width (int): Maximum number of characters per line.
indent (int): Indentation of the first line.
subsequent (int or None): Indentation of all other lines, if it is
``None``, then the indentation will be same as for the first line.
|
def rjust_text(text, width=80, indent=0, subsequent=None):
text = re.sub(r"\s+", " ", text).strip()
if subsequent is None:
subsequent = indent
wrapper = TextWrapper(
width=width,
break_long_words=False,
replace_whitespace=True,
initial_indent=" " * (indent + subsequent),
subsequent_indent=" " * subsequent,
)
return wrapper.fill(text)[subsequent:]
| 739,939
|
Center all lines of the text.
It is assumed that all lines width is smaller then B{width}, because the
line width will not be checked.
Args:
text (str): Text to wrap.
width (int): Maximum number of characters per line.
Returns:
str: Centered text.
|
def center_text(text, width=80):
centered = []
for line in text.splitlines():
centered.append(line.center(width))
return "\n".join(centered)
| 739,940
|
Call Python 3 raise from or emulate it for Python 2
Args:
exc_type (Any): Type of Exception
message (str): Error message to display
exc (BaseException): original exception
Returns:
None
|
def raisefrom(exc_type, message, exc):
# type: (Any, str, BaseException) -> None
if sys.version_info[:2] >= (3, 2):
six.raise_from(exc_type(message), exc)
else:
six.reraise(exc_type, '%s - %s' % (message, exc), sys.exc_info()[2])
| 740,539
|
Expand short URL or keyword to long URL.
Parameters:
short: Short URL (``http://example.com/abc``) or keyword (abc).
:return: Expanded/long URL, e.g.
``https://www.youtube.com/watch?v=dQw4w9WgXcQ``
Raises:
~yourls.exceptions.YOURLSHTTPError: HTTP error with response from
YOURLS API.
requests.exceptions.HTTPError: Generic HTTP error.
|
def expand(self, short):
data = dict(action='expand', shorturl=short)
jsondata = self._api_request(params=data)
return jsondata['longurl']
| 740,563
|
Get stats for short URL or keyword.
Parameters:
short: Short URL (http://example.com/abc) or keyword (abc).
Returns:
ShortenedURL: Shortened URL and associated data.
Raises:
~yourls.exceptions.YOURLSHTTPError: HTTP error with response from
YOURLS API.
requests.exceptions.HTTPError: Generic HTTP error.
|
def url_stats(self, short):
data = dict(action='url-stats', shorturl=short)
jsondata = self._api_request(params=data)
return _json_to_shortened_url(jsondata['link'])
| 740,564
|
Gets SQLAlchemy session given url. Your tables must inherit
from Base in hdx.utilities.database.
Args:
db_url (str): SQLAlchemy url
Returns:
sqlalchemy.orm.session.Session: SQLAlchemy session
|
def get_session(db_url):
# type: (str) -> Session
engine = create_engine(db_url, poolclass=NullPool, echo=False)
Session = sessionmaker(bind=engine)
Base.metadata.create_all(engine)
return Session()
| 740,718
|
Gets PostgreSQL database connection parameters from SQLAlchemy url
Args:
db_url (str): SQLAlchemy url
Returns:
Dict[str,Any]: Dictionary of database connection parameters
|
def get_params_from_sqlalchemy_url(db_url):
# type: (str) -> Dict[str,Any]
result = urlsplit(db_url)
return {'database': result.path[1:], 'host': result.hostname, 'port': result.port,
'username': result.username, 'password': result.password, 'driver': result.scheme}
| 740,719
|
Waits for PostgreSQL database to be up
Args:
database (Optional[str]): Database name
host (Optional[str]): Host where database is located
port (Union[int, str, None]): Database port
username (Optional[str]): Username to log into database
password (Optional[str]): Password to log into database
Returns:
None
|
def wait_for_postgres(database, host, port, username, password):
# type: (Optional[str], Optional[str], Union[int, str, None], Optional[str], Optional[str]) -> None
connecting_string = 'Checking for PostgreSQL...'
if port is not None:
port = int(port)
while True:
try:
logger.info(connecting_string)
connection = psycopg2.connect(
database=database,
host=host,
port=port,
user=username,
password=password,
connect_timeout=3
)
connection.close()
logger.info('PostgreSQL is running!')
break
except psycopg2.OperationalError:
time.sleep(1)
| 740,721
|
Get full url including any additional parameters
Args:
url (str): URL for which to get full url
Returns:
str: Full url including any additional parameters
|
def get_full_url(self, url):
# type: (str) -> str
request = Request('GET', url)
preparedrequest = self.session.prepare_request(request)
return preparedrequest.url
| 740,757
|
Get full url for GET request including parameters
Args:
url (str): URL to download
parameters (Optional[Dict]): Parameters to pass. Defaults to None.
Returns:
str: Full url
|
def get_url_for_get(url, parameters=None):
# type: (str, Optional[Dict]) -> str
spliturl = urlsplit(url)
getparams = OrderedDict(parse_qsl(spliturl.query))
if parameters is not None:
getparams.update(parameters)
spliturl = spliturl._replace(query=urlencode(getparams))
return urlunsplit(spliturl)
| 740,758
|
Get full url for POST request and all parameters including any in the url
Args:
url (str): URL to download
parameters (Optional[Dict]): Parameters to pass. Defaults to None.
Returns:
Tuple[str, Dict]: (Full url, parameters)
|
def get_url_params_for_post(url, parameters=None):
# type: (str, Optional[Dict]) -> Tuple[str, Dict]
spliturl = urlsplit(url)
getparams = OrderedDict(parse_qsl(spliturl.query))
if parameters is not None:
getparams.update(parameters)
spliturl = spliturl._replace(query='')
full_url = urlunsplit(spliturl)
return full_url, getparams
| 740,759
|
Stream file from url and hash it using MD5. Must call setup method first.
Args:
url (str): URL to download
Returns:
str: MD5 hash of file
|
def hash_stream(self, url):
# type: (str) -> str
md5hash = hashlib.md5()
try:
for chunk in self.response.iter_content(chunk_size=10240):
if chunk: # filter out keep-alive new chunks
md5hash.update(chunk)
return md5hash.hexdigest()
except Exception as e:
raisefrom(DownloadError, 'Download of %s failed in retrieval of stream!' % url, e)
| 740,761
|
Download url
Args:
url (str): URL to download
post (bool): Whether to use POST instead of GET. Defaults to False.
parameters (Optional[Dict]): Parameters to pass. Defaults to None.
timeout (Optional[float]): Timeout for connecting to URL. Defaults to None (no timeout).
Returns:
requests.Response: Response
|
def download(self, url, post=False, parameters=None, timeout=None):
# type: (str, bool, Optional[Dict], Optional[float]) -> requests.Response
return self.setup(url, stream=False, post=post, parameters=parameters, timeout=timeout)
| 740,764
|
Get Tabulator stream.
Args:
url (str): URL to download
**kwargs:
headers (Union[int, List[int], List[str]]): Number of row(s) containing headers or list of headers
file_type (Optional[str]): Type of file. Defaults to inferring.
delimiter (Optional[str]): Delimiter used for values in each row. Defaults to inferring.
Returns:
tabulator.Stream: Tabulator Stream object
|
def get_tabular_stream(self, url, **kwargs):
# type: (str, Any) -> tabulator.Stream
self.close_response()
file_type = kwargs.get('file_type')
if file_type is not None:
kwargs['format'] = file_type
del kwargs['file_type']
try:
self.response = tabulator.Stream(url, **kwargs)
self.response.open()
return self.response
except TabulatorException as e:
raisefrom(DownloadError, 'Getting tabular stream for %s failed!' % url, e)
| 740,765
|
Extract HTML table as list of dictionaries
Args:
tabletag (Tag): BeautifulSoup tag
Returns:
str: Text of tag stripped of leading and trailing whitespace and newlines and with   replaced with space
|
def extract_table(tabletag):
# type: (Tag) -> List[Dict]
theadtag = tabletag.find_next('thead')
headertags = theadtag.find_all('th')
if len(headertags) == 0:
headertags = theadtag.find_all('td')
headers = []
for tag in headertags:
headers.append(get_text(tag))
tbodytag = tabletag.find_next('tbody')
trtags = tbodytag.find_all('tr')
table = list()
for trtag in trtags:
row = dict()
tdtags = trtag.find_all('td')
for i, tag in enumerate(tdtags):
row[headers[i]] = get_text(tag)
table.append(row)
return table
| 740,795
|
Wraps function-based callable_obj into a `Route` instance, else
proxies a `bottle_neck.handlers.BaseHandler` subclass instance.
Args:
uri (str): The uri relative path.
methods (tuple): A tuple of valid method strings.
callable_obj (instance): The callable object.
Returns:
A route instance.
Raises:
RouteError for invalid callable object type.
|
def wrap_callable(cls, uri, methods, callable_obj):
if isinstance(callable_obj, HandlerMeta):
callable_obj.base_endpoint = uri
callable_obj.is_valid = True
return callable_obj
if isinstance(callable_obj, types.FunctionType):
return cls(uri=uri, methods=methods, callable_obj=callable_obj)
raise RouteError("Invalid handler type.")
| 740,806
|
Register the route object to a `bottle.Bottle` app instance.
Args:
app (instance):
Returns:
Route instance (for chaining purposes)
|
def register_app(self, app):
app.route(self.uri, methods=self.methods)(self.callable_obj)
return self
| 740,809
|
Register a handler callable to a specific route.
Args:
entrypoint (str): The uri relative path.
methods (tuple): A tuple of valid method strings.
callable_obj (callable): The callable object.
Returns:
The Router instance (for chaining purposes).
Raises:
RouteError, for missing routing params or invalid callable
object type.
|
def register_handler(self, callable_obj, entrypoint, methods=('GET',)):
router_obj = Route.wrap_callable(
uri=entrypoint,
methods=methods,
callable_obj=callable_obj
)
if router_obj.is_valid:
self._routes.add(router_obj)
return self
raise RouteError( # pragma: no cover
"Missing params: methods: {} - entrypoint: {}".format(
methods, entrypoint
)
)
| 740,810
|
Mounts all registered routes to a bottle.py application instance.
Args:
app (instance): A `bottle.Bottle()` application instance.
Returns:
The Router instance (for chaining purposes).
|
def mount(self, app=None):
for endpoint in self._routes:
endpoint.register_app(app)
return self
| 740,811
|
Shortcut API for HTTP 201 `Created` response.
Args:
data (object): Response key/value data.
Returns:
WSResponse Instance.
|
def created(cls, data=None):
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '201 Created'
return cls(201, data=data).to_json
| 740,821
|
Shortcut API for HTTP 304 `Not Modified` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
|
def not_modified(cls, errors=None):
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '304 Not Modified'
return cls(304, None, errors).to_json
| 740,822
|
Shortcut API for HTTP 400 `Bad Request` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
|
def bad_request(cls, errors=None):
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '400 Bad Request'
return cls(400, errors=errors).to_json
| 740,823
|
Shortcut API for HTTP 401 `Unauthorized` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
|
def unauthorized(cls, errors=None):
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '401 Unauthorized'
return cls(401, errors=errors).to_json
| 740,824
|
Shortcut API for HTTP 403 `Forbidden` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
|
def forbidden(cls, errors=None):
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '403 Forbidden'
return cls(403, errors=errors).to_json
| 740,825
|
Shortcut API for HTTP 404 `Not found` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
|
def not_found(cls, errors=None):
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '404 Not Found'
return cls(404, None, errors).to_json
| 740,826
|
Shortcut API for HTTP 405 `Method not allowed` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
|
def method_not_allowed(cls, errors=None):
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '405 Method Not Allowed'
return cls(405, None, errors).to_json
| 740,827
|
Shortcut API for HTTP 501 `Not Implemented` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
|
def not_implemented(cls, errors=None):
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '501 Not Implemented'
return cls(501, None, errors).to_json
| 740,828
|
Shortcut API for HTTP 503 `Service Unavailable` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
|
def service_unavailable(cls, errors=None):
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '503 Service Unavailable'
return cls(503, None, errors).to_json
| 740,829
|
Compute pagination info for collection filtering.
Args:
limit (int): Collection filter limit.
offset (int): Collection filter offset.
record_count (int): Collection filter total record count.
base_uri (str): Collection filter base uri (without limit, offset)
page_nav_tpl (str): Pagination template.
Returns:
A mapping of pagination info.
|
def paginator(limit, offset, record_count, base_uri, page_nav_tpl='&limit={}&offset={}'):
total_pages = int(math.ceil(record_count / limit))
next_cond = limit + offset <= record_count
prev_cond = offset >= limit
next_page = base_uri + page_nav_tpl.format(limit, offset + limit) if next_cond else None
prev_page = base_uri + page_nav_tpl.format(limit, offset - limit) if prev_cond else None
return OrderedDict([
('total_count', record_count),
('total_pages', total_pages),
('next_page', next_page),
('prev_page', prev_page)
])
| 740,974
|
Plugin Method decorator.
Signs a web handler function with the plugins to be applied as attributes.
Args:
plugin_names (list): A list of plugin callable names
Returns:
A wrapped handler callable.
Examples:
>>> @plugin_method('json', 'bill')
... def method():
... return "Hello!"
...
>>> print method.json
True
>>> print method.bill
True
|
def plugin_method(*plugin_names):
def wrapper(callable_obj):
for plugin_name in plugin_names:
if not hasattr(callable_obj, plugin_name):
setattr(callable_obj, plugin_name, True)
return callable_obj
return wrapper
| 741,008
|
Get a temporary directory optionally with folder appended (and created if it doesn't exist)
Args:
folder (Optional[str]): Folder to create in temporary folder. Defaults to None.
delete (bool): Whether to delete folder on exiting with statement
Returns:
str: A temporary directory
|
def temp_dir(folder=None, delete=True):
# type: (Optional[str], bool) -> str
tempdir = get_temp_dir()
if folder:
tempdir = join(tempdir, folder)
if not exists(tempdir):
makedirs(tempdir)
try:
yield tempdir
finally:
if delete:
rmtree(tempdir)
| 741,057
|
Simultaneously replace multiple strigns in a string
Args:
string (str): Input string
replacements (Dict[str,str]): Replacements dictionary
Returns:
str: String with replacements
|
def multiple_replace(string, replacements):
# type: (str, Dict[str,str]) -> str
pattern = re.compile("|".join([re.escape(k) for k in sorted(replacements, key=len, reverse=True)]), flags=re.DOTALL)
return pattern.sub(lambda x: replacements[x.group(0)], string)
| 741,075
|
Returns a list of matching blocks of text in a and b
Args:
a (str): First string to match
b (str): Second string to match
match_min_size (int): Minimum block size to match on. Defaults to 30.
ignore (str): Any characters to ignore in matching. Defaults to ''.
end_characters (str): End characters to look for. Defaults to ''.
Returns:
List[str]: List of matching blocks of text
|
def get_matching_text_in_strs(a, b, match_min_size=30, ignore='', end_characters=''):
# type: (str, str, int, str, str) -> List[str]
compare = difflib.SequenceMatcher(lambda x: x in ignore)
compare.set_seqs(a=a, b=b)
matching_text = list()
for match in compare.get_matching_blocks():
start = match.a
text = a[start: start+match.size]
if end_characters:
prev_text = text
while len(text) != 0 and text[0] in end_characters:
text = text[1:]
while len(text) != 0 and text[-1] not in end_characters:
text = text[:-1]
if len(text) == 0:
text = prev_text
if len(text) >= match_min_size:
matching_text.append(text)
return matching_text
| 741,076
|
Returns a string containing matching blocks of text in a list of strings followed by non-matching.
Args:
string_list (List[str]): List of strings to match
match_min_size (int): Minimum block size to match on. Defaults to 30.
ignore (str): Any characters to ignore in matching. Defaults to ''.
end_characters (str): End characters to look for. Defaults to '.\r\n'.
Returns:
str: String containing matching blocks of text followed by non-matching
|
def get_matching_text(string_list, match_min_size=30, ignore='', end_characters='.!\r\n'):
# type: (List[str], int, str, str) -> str
a = string_list[0]
for i in range(1, len(string_list)):
b = string_list[i]
result = get_matching_text_in_strs(a, b, match_min_size=match_min_size, ignore=ignore,
end_characters=end_characters)
a = ''.join(result)
return a
| 741,077
|
Merges b into a and returns merged result
NOTE: tuples and arbitrary objects are not handled as it is totally ambiguous what should happen
Args:
a (DictUpperBound): dictionary to merge into
b (DictUpperBound): dictionary to merge from
merge_lists (bool): Whether to merge lists (True) or replace lists (False). Default is False.
Returns:
DictUpperBound: Merged dictionary
|
def merge_two_dictionaries(a, b, merge_lists=False):
# type: (DictUpperBound, DictUpperBound, bool) -> DictUpperBound
key = None
# ## debug output
# sys.stderr.write('DEBUG: %s to %s\n' %(b,a))
try:
if a is None or isinstance(a, (six.string_types, six.text_type, six.integer_types, float)):
# border case for first run or if a is a primitive
a = b
elif isinstance(a, list):
# lists can be appended or replaced
if isinstance(b, list):
if merge_lists:
# merge lists
a.extend(b)
else:
# replace list
a = b
else:
# append to list
a.append(b)
elif isinstance(a, (dict, UserDict)):
# dicts must be merged
if isinstance(b, (dict, UserDict)):
for key in b:
if key in a:
a[key] = merge_two_dictionaries(a[key], b[key], merge_lists=merge_lists)
else:
a[key] = b[key]
else:
raise ValueError('Cannot merge non-dict "%s" into dict "%s"' % (b, a))
else:
raise ValueError('NOT IMPLEMENTED "%s" into "%s"' % (b, a))
except TypeError as e:
raise ValueError('TypeError "%s" in key "%s" when merging "%s" into "%s"' % (e, key, b, a))
return a
| 741,104
|
Merges all dictionaries in dicts into a single dictionary and returns result
Args:
dicts (List[DictUpperBound]): Dictionaries to merge into the first one in the list
merge_lists (bool): Whether to merge lists (True) or replace lists (False). Default is False.
Returns:
DictUpperBound: Merged dictionary
|
def merge_dictionaries(dicts, merge_lists=False):
# type: (List[DictUpperBound], bool) -> DictUpperBound
dict1 = dicts[0]
for other_dict in dicts[1:]:
merge_two_dictionaries(dict1, other_dict, merge_lists=merge_lists)
return dict1
| 741,105
|
Compares two dictionaries
Args:
d1 (DictUpperBound): First dictionary to compare
d2 (DictUpperBound): Second dictionary to compare
no_key (str): What value to use if key is not found Defaults to '<KEYNOTFOUND>'.
Returns:
Dict: Comparison dictionary
|
def dict_diff(d1, d2, no_key='<KEYNOTFOUND>'):
# type: (DictUpperBound, DictUpperBound, str) -> Dict
d1keys = set(d1.keys())
d2keys = set(d2.keys())
both = d1keys & d2keys
diff = {k: (d1[k], d2[k]) for k in both if d1[k] != d2[k]}
diff.update({k: (d1[k], no_key) for k in d1keys - both})
diff.update({k: (no_key, d2[k]) for k in d2keys - both})
return diff
| 741,106
|
Add value to a list in a dictionary by key
Args:
dictionary (DictUpperBound): Dictionary to which to add values
key (Any): Key within dictionary
value (Any): Value to add to list in dictionary
Returns:
None
|
def dict_of_lists_add(dictionary, key, value):
# type: (DictUpperBound, Any, Any) -> None
list_objs = dictionary.get(key, list())
list_objs.append(value)
dictionary[key] = list_objs
| 741,107
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.