_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q265700 | MoveFile | validation | def MoveFile(source_filename, target_filename):
'''
Moves a file.
:param unicode source_filename:
:param unicode target_filename:
:raises NotImplementedForRemotePathError:
If trying to operate with non-local files.
'''
_AssertIsLocal(source_filename)
_AssertIsLocal(target_filename)
import shutil
shutil.move(source_filename, target_filename) | python | {
"resource": ""
} |
q265701 | MoveDirectory | validation | def MoveDirectory(source_dir, target_dir):
'''
Moves a directory.
:param unicode source_dir:
:param unicode target_dir:
:raises NotImplementedError:
If trying to move anything other than:
Local dir -> local dir
FTP dir -> FTP dir (same host)
'''
if not IsDir(source_dir):
from ._exceptions import DirectoryNotFoundError
raise DirectoryNotFoundError(source_dir)
if Exists(target_dir):
from ._exceptions import DirectoryAlreadyExistsError
raise DirectoryAlreadyExistsError(target_dir)
from six.moves.urllib.parse import urlparse
source_url = urlparse(source_dir)
target_url = urlparse(target_dir)
# Local to local
if _UrlIsLocal(source_url) and _UrlIsLocal(target_url):
import shutil
shutil.move(source_dir, target_dir)
# FTP to FTP
elif source_url.scheme == 'ftp' and target_url.scheme == 'ftp':
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme)
else:
raise NotImplementedError('Can only move directories local->local or ftp->ftp') | python | {
"resource": ""
} |
q265702 | GetFileContents | validation | def GetFileContents(filename, binary=False, encoding=None, newline=None):
'''
Reads a file and returns its contents. Works for both local and remote files.
:param unicode filename:
:param bool binary:
If True returns the file as is, ignore any EOL conversion.
:param unicode encoding:
File's encoding. If not None, contents obtained from file will be decoded using this
`encoding`.
:param None|''|'\n'|'\r'|'\r\n' newline:
Controls universal newlines.
See 'io.open' newline parameter documentation for more details.
:returns str|unicode:
The file's contents.
Returns unicode string when `encoding` is not None.
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
source_file = OpenFile(filename, binary=binary, encoding=encoding, newline=newline)
try:
contents = source_file.read()
finally:
source_file.close()
return contents | python | {
"resource": ""
} |
q265703 | GetFileLines | validation | def GetFileLines(filename, newline=None, encoding=None):
'''
Reads a file and returns its contents as a list of lines. Works for both local and remote files.
:param unicode filename:
:param None|''|'\n'|'\r'|'\r\n' newline:
Controls universal newlines.
See 'io.open' newline parameter documentation for more details.
:param unicode encoding:
File's encoding. If not None, contents obtained from file will be decoded using this
`encoding`.
:returns list(unicode):
The file's lines
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
return GetFileContents(
filename,
binary=False,
encoding=encoding,
newline=newline,
).split('\n') | python | {
"resource": ""
} |
q265704 | ListFiles | validation | def ListFiles(directory):
'''
Lists the files in the given directory
:type directory: unicode | unicode
:param directory:
A directory or URL
:rtype: list(unicode) | list(unicode)
:returns:
List of filenames/directories found in the given directory.
Returns None if the given directory does not exists.
If `directory` is a unicode string, all files returned will also be unicode
:raises NotImplementedProtocol:
If file protocol is not local or FTP
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
from six.moves.urllib.parse import urlparse
directory_url = urlparse(directory)
# Handle local
if _UrlIsLocal(directory_url):
if not os.path.isdir(directory):
return None
return os.listdir(directory)
# Handle FTP
elif directory_url.scheme == 'ftp':
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(directory_url.scheme)
else:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(directory_url.scheme) | python | {
"resource": ""
} |
q265705 | CreateFile | validation | def CreateFile(filename, contents, eol_style=EOL_STYLE_NATIVE, create_dir=True, encoding=None, binary=False):
'''
Create a file with the given contents.
:param unicode filename:
Filename and path to be created.
:param unicode contents:
The file contents as a string.
:type eol_style: EOL_STYLE_XXX constant
:param eol_style:
Replaces the EOL by the appropriate EOL depending on the eol_style value.
Considers that all content is using only "\n" as EOL.
:param bool create_dir:
If True, also creates directories needed in filename's path
:param unicode encoding:
Target file's content encoding. Defaults to sys.getfilesystemencoding()
Ignored if `binary` = True
:param bool binary:
If True, file is created in binary mode. In this case, `contents` must be `bytes` and not
`unicode`
:return unicode:
Returns the name of the file created.
:raises NotImplementedProtocol:
If file protocol is not local or FTP
:raises ValueError:
If trying to mix unicode `contents` without `encoding`, or `encoding` without
unicode `contents`
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
# Lots of checks when writing binary files
if binary:
if isinstance(contents, six.text_type):
raise TypeError('contents must be str (bytes) when binary=True')
else:
if not isinstance(contents, six.text_type):
raise TypeError('contents must be unicode when binary=False')
# Replaces eol on each line by the given eol_style.
contents = _HandleContentsEol(contents, eol_style)
# Encode string and pretend we are using binary to prevent 'open' from automatically
# changing Eols
encoding = encoding or sys.getfilesystemencoding()
contents = contents.encode(encoding)
binary = True
# If asked, creates directory containing file
if create_dir:
dirname = os.path.dirname(filename)
if dirname:
CreateDirectory(dirname)
from six.moves.urllib.parse import urlparse
filename_url = urlparse(filename)
# Handle local
if _UrlIsLocal(filename_url):
# Always writing as binary (see handling above)
with open(filename, 'wb') as oss:
oss.write(contents)
# Handle FTP
elif filename_url.scheme == 'ftp':
# Always writing as binary (see handling above)
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(directory_url.scheme)
else:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(filename_url.scheme)
return filename | python | {
"resource": ""
} |
q265706 | ReplaceInFile | validation | def ReplaceInFile(filename, old, new, encoding=None):
'''
Replaces all occurrences of "old" by "new" in the given file.
:param unicode filename:
The name of the file.
:param unicode old:
The string to search for.
:param unicode new:
Replacement string.
:return unicode:
The new contents of the file.
'''
contents = GetFileContents(filename, encoding=encoding)
contents = contents.replace(old, new)
CreateFile(filename, contents, encoding=encoding)
return contents | python | {
"resource": ""
} |
q265707 | CreateDirectory | validation | def CreateDirectory(directory):
'''
Create directory including any missing intermediate directory.
:param unicode directory:
:return unicode|urlparse.ParseResult:
Returns the created directory or url (see urlparse).
:raises NotImplementedProtocol:
If protocol is not local or FTP.
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
from six.moves.urllib.parse import urlparse
directory_url = urlparse(directory)
# Handle local
if _UrlIsLocal(directory_url):
if not os.path.exists(directory):
os.makedirs(directory)
return directory
# Handle FTP
elif directory_url.scheme == 'ftp':
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(directory_url.scheme)
else:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(directory_url.scheme) | python | {
"resource": ""
} |
q265708 | DeleteDirectory | validation | def DeleteDirectory(directory, skip_on_error=False):
'''
Deletes a directory.
:param unicode directory:
:param bool skip_on_error:
If True, ignore any errors when trying to delete directory (for example, directory not
found)
:raises NotImplementedForRemotePathError:
If trying to delete a remote directory.
'''
_AssertIsLocal(directory)
import shutil
def OnError(fn, path, excinfo):
'''
Remove the read-only flag and try to remove again.
On Windows, rmtree fails when trying to remove a read-only file. This fix it!
Another case: Read-only directories return True in os.access test. It seems that read-only
directories has it own flag (looking at the property windows on Explorer).
'''
if IsLink(path):
return
if fn is os.remove and os.access(path, os.W_OK):
raise
# Make the file WRITEABLE and executes the original delete function (osfunc)
import stat
os.chmod(path, stat.S_IWRITE)
fn(path)
try:
if not os.path.isdir(directory):
if skip_on_error:
return
from ._exceptions import DirectoryNotFoundError
raise DirectoryNotFoundError(directory)
shutil.rmtree(directory, onerror=OnError)
except:
if not skip_on_error:
raise | python | {
"resource": ""
} |
q265709 | ListMappedNetworkDrives | validation | def ListMappedNetworkDrives():
'''
On Windows, returns a list of mapped network drives
:return: tuple(string, string, bool)
For each mapped netword drive, return 3 values tuple:
- the local drive
- the remote path-
- True if the mapping is enabled (warning: not reliable)
'''
if sys.platform != 'win32':
raise NotImplementedError
drives_list = []
netuse = _CallWindowsNetCommand(['use'])
for line in netuse.split(EOL_STYLE_WINDOWS):
match = re.match("(\w*)\s+(\w:)\s+(.+)", line.rstrip())
if match:
drives_list.append((match.group(2), match.group(3), match.group(1) == 'OK'))
return drives_list | python | {
"resource": ""
} |
q265710 | CreateLink | validation | def CreateLink(target_path, link_path, override=True):
'''
Create a symbolic link at `link_path` pointing to `target_path`.
:param unicode target_path:
Link target
:param unicode link_path:
Fullpath to link name
:param bool override:
If True and `link_path` already exists as a link, that link is overridden.
'''
_AssertIsLocal(target_path)
_AssertIsLocal(link_path)
if override and IsLink(link_path):
DeleteLink(link_path)
# Create directories leading up to link
dirname = os.path.dirname(link_path)
if dirname:
CreateDirectory(dirname)
if sys.platform != 'win32':
return os.symlink(target_path, link_path) # @UndefinedVariable
else:
#import ntfsutils.junction
#return ntfsutils.junction.create(target_path, link_path)
import jaraco.windows.filesystem
return jaraco.windows.filesystem.symlink(target_path, link_path)
from ._easyfs_win32 import CreateSymbolicLink
try:
dw_flags = 0
if target_path and os.path.isdir(target_path):
dw_flags = 1
return CreateSymbolicLink(target_path, link_path, dw_flags)
except Exception as e:
reraise(e, 'Creating link "%(link_path)s" pointing to "%(target_path)s"' % locals()) | python | {
"resource": ""
} |
q265711 | ReadLink | validation | def ReadLink(path):
'''
Read the target of the symbolic link at `path`.
:param unicode path:
Path to a symbolic link
:returns unicode:
Target of a symbolic link
'''
_AssertIsLocal(path)
if sys.platform != 'win32':
return os.readlink(path) # @UndefinedVariable
if not IsLink(path):
from ._exceptions import FileNotFoundError
raise FileNotFoundError(path)
import jaraco.windows.filesystem
result = jaraco.windows.filesystem.readlink(path)
if '\\??\\' in result:
result = result.split('\\??\\')[1]
return result | python | {
"resource": ""
} |
q265712 | _AssertIsLocal | validation | def _AssertIsLocal(path):
'''
Checks if a given path is local, raise an exception if not.
This is used in filesystem functions that do not support remote operations yet.
:param unicode path:
:raises NotImplementedForRemotePathError:
If the given path is not local
'''
from six.moves.urllib.parse import urlparse
if not _UrlIsLocal(urlparse(path)):
from ._exceptions import NotImplementedForRemotePathError
raise NotImplementedForRemotePathError | python | {
"resource": ""
} |
q265713 | _HandleContentsEol | validation | def _HandleContentsEol(contents, eol_style):
'''
Replaces eol on each line by the given eol_style.
:param unicode contents:
:type eol_style: EOL_STYLE_XXX constant
:param eol_style:
'''
if eol_style == EOL_STYLE_NONE:
return contents
if eol_style == EOL_STYLE_UNIX:
return contents.replace('\r\n', eol_style).replace('\r', eol_style)
if eol_style == EOL_STYLE_MAC:
return contents.replace('\r\n', eol_style).replace('\n', eol_style)
if eol_style == EOL_STYLE_WINDOWS:
return contents.replace('\r\n', '\n').replace('\r', '\n').replace('\n', EOL_STYLE_WINDOWS)
raise ValueError('Unexpected eol style: %r' % (eol_style,)) | python | {
"resource": ""
} |
q265714 | MatchMasks | validation | def MatchMasks(filename, masks):
'''
Verifies if a filename match with given patterns.
:param str filename: The filename to match.
:param list(str) masks: The patterns to search in the filename.
:return bool:
True if the filename has matched with one pattern, False otherwise.
'''
import fnmatch
if not isinstance(masks, (list, tuple)):
masks = [masks]
for i_mask in masks:
if fnmatch.fnmatch(filename, i_mask):
return True
return False | python | {
"resource": ""
} |
q265715 | FindFiles | validation | def FindFiles(dir_, in_filters=None, out_filters=None, recursive=True, include_root_dir=True, standard_paths=False):
'''
Searches for files in a given directory that match with the given patterns.
:param str dir_: the directory root, to search the files.
:param list(str) in_filters: a list with patterns to match (default = all). E.g.: ['*.py']
:param list(str) out_filters: a list with patterns to ignore (default = none). E.g.: ['*.py']
:param bool recursive: if True search in subdirectories, otherwise, just in the root.
:param bool include_root_dir: if True, includes the directory being searched in the returned paths
:param bool standard_paths: if True, always uses unix path separators "/"
:return list(str):
A list of strings with the files that matched (with the full path in the filesystem).
'''
# all files
if in_filters is None:
in_filters = ['*']
if out_filters is None:
out_filters = []
result = []
# maintain just files that don't have a pattern that match with out_filters
# walk through all directories based on dir
for dir_root, directories, filenames in os.walk(dir_):
for i_directory in directories[:]:
if MatchMasks(i_directory, out_filters):
directories.remove(i_directory)
for filename in directories + filenames:
if MatchMasks(filename, in_filters) and not MatchMasks(filename, out_filters):
result.append(os.path.join(dir_root, filename))
if not recursive:
break
if not include_root_dir:
# Remove root dir from all paths
dir_prefix = len(dir_) + 1
result = [file[dir_prefix:] for file in result]
if standard_paths:
result = map(StandardizePath, result)
return result | python | {
"resource": ""
} |
q265716 | ExpandUser | validation | def ExpandUser(path):
'''
os.path.expanduser wrapper, necessary because it cannot handle unicode strings properly.
This is not necessary in Python 3.
:param path:
.. seealso:: os.path.expanduser
'''
if six.PY2:
encoding = sys.getfilesystemencoding()
path = path.encode(encoding)
result = os.path.expanduser(path)
if six.PY2:
result = result.decode(encoding)
return result | python | {
"resource": ""
} |
q265717 | DumpDirHashToStringIO | validation | def DumpDirHashToStringIO(directory, stringio, base='', exclude=None, include=None):
'''
Helper to iterate over the files in a directory putting those in the passed StringIO in ini
format.
:param unicode directory:
The directory for which the hash should be done.
:param StringIO stringio:
The string to which the dump should be put.
:param unicode base:
If provided should be added (along with a '/') before the name=hash of file.
:param unicode exclude:
Pattern to match files to exclude from the hashing. E.g.: *.gz
:param unicode include:
Pattern to match files to include in the hashing. E.g.: *.zip
'''
import fnmatch
import os
files = [(os.path.join(directory, i), i) for i in os.listdir(directory)]
files = [i for i in files if os.path.isfile(i[0])]
for fullname, filename in files:
if include is not None:
if not fnmatch.fnmatch(fullname, include):
continue
if exclude is not None:
if fnmatch.fnmatch(fullname, exclude):
continue
md5 = Md5Hex(fullname)
if base:
stringio.write('%s/%s=%s\n' % (base, filename, md5))
else:
stringio.write('%s=%s\n' % (filename, md5)) | python | {
"resource": ""
} |
q265718 | IterHashes | validation | def IterHashes(iterator_size, hash_length=7):
'''
Iterator for random hexadecimal hashes
:param iterator_size:
Amount of hashes return before this iterator stops.
Goes on forever if `iterator_size` is negative.
:param int hash_length:
Size of each hash returned.
:return generator(unicode):
'''
if not isinstance(iterator_size, int):
raise TypeError('iterator_size must be integer.')
count = 0
while count != iterator_size:
count += 1
yield GetRandomHash(hash_length) | python | {
"resource": ""
} |
q265719 | PushPopItem | validation | def PushPopItem(obj, key, value):
'''
A context manager to replace and restore a value using a getter and setter.
:param object obj: The object to replace/restore.
:param object key: The key to replace/restore in the object.
:param object value: The value to replace.
Example::
with PushPop2(sys.modules, 'alpha', None):
pytest.raises(ImportError):
import alpha
'''
if key in obj:
old_value = obj[key]
obj[key] = value
yield value
obj[key] = old_value
else:
obj[key] = value
yield value
del obj[key] | python | {
"resource": ""
} |
q265720 | db_to_specifier | validation | def db_to_specifier(db_string):
"""
Return the database specifier for a database string.
This accepts a database name or URL, and returns a database specifier in the
format accepted by ``specifier_to_db``. It is recommended that you consult
the documentation for that function for an explanation of the format.
"""
local_match = PLAIN_RE.match(db_string)
remote_match = URL_RE.match(db_string)
# If this looks like a local specifier:
if local_match:
return 'local:' + local_match.groupdict()['database']
# If this looks like a remote specifier:
elif remote_match:
# Just a fancy way of getting 3 variables in 2 lines...
hostname, portnum, database = map(remote_match.groupdict().get,
('hostname', 'portnum', 'database'))
local_url = settings._('COUCHDB_SERVER', 'http://127.0.0.1:5984/')
localhost, localport = urlparse.urlparse(local_url)[1].split(':')
# If it's the local server, then return a local specifier.
if (localhost == hostname) and (localport == portnum):
return 'local:' + database
# Otherwise, prepare and return the remote specifier.
return 'remote:%s:%s:%s' % (hostname, portnum, database)
# Throw a wobbly.
raise ValueError('Invalid database string: %r' % (db_string,)) | python | {
"resource": ""
} |
q265721 | get_db_from_db | validation | def get_db_from_db(db_string):
"""Return a CouchDB database instance from a database string."""
server = get_server_from_db(db_string)
local_match = PLAIN_RE.match(db_string)
remote_match = URL_RE.match(db_string)
# If this looks like a local specifier:
if local_match:
return server[local_match.groupdict()['database']]
elif remote_match:
return server[remote_match.groupdict()['database']]
raise ValueError('Invalid database string: %r' % (db_string,)) | python | {
"resource": ""
} |
q265722 | ensure_specifier_exists | validation | def ensure_specifier_exists(db_spec):
"""Make sure a DB specifier exists, creating it if necessary."""
local_match = LOCAL_RE.match(db_spec)
remote_match = REMOTE_RE.match(db_spec)
plain_match = PLAIN_RE.match(db_spec)
if local_match:
db_name = local_match.groupdict().get('database')
server = shortcuts.get_server()
if db_name not in server:
server.create(db_name)
return True
elif remote_match:
hostname, portnum, database = map(remote_match.groupdict().get,
('hostname', 'portnum', 'database'))
server = shortcuts.get_server(
server_url=('http://%s:%s' % (hostname, portnum)))
if database not in server:
server.create(database)
return True
elif plain_match:
db_name = plain_match.groupdict().get('database')
server = shortcuts.get_server()
if db_name not in server:
server.create(db_name)
return True
return False | python | {
"resource": ""
} |
q265723 | coerce | validation | def coerce(value1, value2, default=None):
"""Exclude NoSet objec
.. code-block::
>>> coerce(NoSet, 'value')
'value'
"""
if value1 is not NoSet:
return value1
elif value2 is not NoSet:
return value2
else:
return default | python | {
"resource": ""
} |
q265724 | parse_hub_key | validation | def parse_hub_key(key):
"""Parse a hub key into a dictionary of component parts
:param key: str, a hub key
:returns: dict, hub key split into parts
:raises: ValueError
"""
if key is None:
raise ValueError('Not a valid key')
match = re.match(PATTERN, key)
if not match:
match = re.match(PATTERN_S0, key)
if not match:
raise ValueError('Not a valid key')
return dict(map(normalise_part, zip([p for p in PARTS_S0.keys()], match.groups())))
return dict(zip(PARTS.keys(), match.groups())) | python | {
"resource": ""
} |
q265725 | match_part | validation | def match_part(string, part):
"""Raise an exception if string doesn't match a part's regex
:param string: str
:param part: a key in the PARTS dict
:raises: ValueError, TypeError
"""
if not string or not re.match('^(' + PARTS[part] + ')$', string):
raise ValueError('{} should match {}'.format(part, PARTS[part])) | python | {
"resource": ""
} |
q265726 | Clifier.apply_defaults | validation | def apply_defaults(self, commands):
""" apply default settings to commands
not static, shadow "self" in eval
"""
for command in commands:
if 'action' in command and "()" in command['action']:
command['action'] = eval("self.{}".format(command['action']))
if command['keys'][0].startswith('-'):
if 'required' not in command:
command['required'] = False | python | {
"resource": ""
} |
q265727 | Clifier.create_commands | validation | def create_commands(self, commands, parser):
""" add commands to parser """
self.apply_defaults(commands)
def create_single_command(command):
keys = command['keys']
del command['keys']
kwargs = {}
for item in command:
kwargs[item] = command[item]
parser.add_argument(*keys, **kwargs)
if len(commands) > 1:
for command in commands:
create_single_command(command)
else:
create_single_command(commands[0]) | python | {
"resource": ""
} |
q265728 | Clifier.create_subparsers | validation | def create_subparsers(self, parser):
""" get config for subparser and create commands"""
subparsers = parser.add_subparsers()
for name in self.config['subparsers']:
subparser = subparsers.add_parser(name)
self.create_commands(self.config['subparsers'][name], subparser) | python | {
"resource": ""
} |
q265729 | Clifier.show_version | validation | def show_version(self):
""" custom command line action to show version """
class ShowVersionAction(argparse.Action):
def __init__(inner_self, nargs=0, **kw):
super(ShowVersionAction, inner_self).__init__(nargs=nargs, **kw)
def __call__(inner_self, parser, args, value, option_string=None):
print("{parser_name} version: {version}".format(
parser_name=self.config.get(
"parser", {}).get("prog"),
version=self.prog_version))
return ShowVersionAction | python | {
"resource": ""
} |
q265730 | Clifier.check_path_action | validation | def check_path_action(self):
""" custom command line action to check file exist """
class CheckPathAction(argparse.Action):
def __call__(self, parser, args, value, option_string=None):
if type(value) is list:
value = value[0]
user_value = value
if option_string == 'None':
if not os.path.isdir(value):
_current_user = os.path.expanduser("~")
if not value.startswith(_current_user) \
and not value.startswith(os.getcwd()):
if os.path.isdir(os.path.join(_current_user, value)):
value = os.path.join(_current_user, value)
elif os.path.isdir(os.path.join(os.getcwd(), value)):
value = os.path.join(os.getcwd(), value)
else:
value = None
else:
value = None
elif option_string == '--template-name':
if not os.path.isdir(value):
if not os.path.isdir(os.path.join(args.target, value)):
value = None
if not value:
logger.error("Could not to find path %s. Please provide "
"correct path to %s option",
user_value, option_string)
exit(1)
setattr(args, self.dest, value)
return CheckPathAction | python | {
"resource": ""
} |
q265731 | new_user | validation | def new_user(yaml_path):
'''
Return the consumer and oauth tokens with three-legged OAuth process and
save in a yaml file in the user's home directory.
'''
print 'Retrieve API Key from https://www.shirts.io/accounts/api_console/'
api_key = raw_input('Shirts.io API Key: ')
tokens = {
'api_key': api_key,
}
yaml_file = open(yaml_path, 'w+')
yaml.dump(tokens, yaml_file, indent=2)
yaml_file.close()
return tokens | python | {
"resource": ""
} |
q265732 | _AddPropertiesForExtensions | validation | def _AddPropertiesForExtensions(descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
extension_dict = descriptor.extensions_by_name
for extension_name, extension_field in extension_dict.items():
constant_name = extension_name.upper() + "_FIELD_NUMBER"
setattr(cls, constant_name, extension_field.number) | python | {
"resource": ""
} |
q265733 | _InternalUnpackAny | validation | def _InternalUnpackAny(msg):
"""Unpacks Any message and returns the unpacked message.
This internal method is differnt from public Any Unpack method which takes
the target message as argument. _InternalUnpackAny method does not have
target message type and need to find the message type in descriptor pool.
Args:
msg: An Any message to be unpacked.
Returns:
The unpacked message.
"""
type_url = msg.type_url
db = symbol_database.Default()
if not type_url:
return None
# TODO(haberman): For now we just strip the hostname. Better logic will be
# required.
type_name = type_url.split("/")[-1]
descriptor = db.pool.FindMessageTypeByName(type_name)
if descriptor is None:
return None
message_class = db.GetPrototype(descriptor)
message = message_class()
message.ParseFromString(msg.value)
return message | python | {
"resource": ""
} |
q265734 | sina_xml_to_url_list | test | def sina_xml_to_url_list(xml_data):
"""str->list
Convert XML to URL List.
From Biligrab.
"""
rawurl = []
dom = parseString(xml_data)
for node in dom.getElementsByTagName('durl'):
url = node.getElementsByTagName('url')[0]
rawurl.append(url.childNodes[0].data)
return rawurl | python | {
"resource": ""
} |
q265735 | dailymotion_download | test | def dailymotion_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
"""Downloads Dailymotion videos by URL.
"""
html = get_content(rebuilt_url(url))
info = json.loads(match1(html, r'qualities":({.+?}),"'))
title = match1(html, r'"video_title"\s*:\s*"([^"]+)"') or \
match1(html, r'"title"\s*:\s*"([^"]+)"')
title = unicodize(title)
for quality in ['1080','720','480','380','240','144','auto']:
try:
real_url = info[quality][1]["url"]
if real_url:
break
except KeyError:
pass
mime, ext, size = url_info(real_url)
print_info(site_info, title, mime, size)
if not info_only:
download_urls([real_url], title, ext, size, output_dir=output_dir, merge=merge) | python | {
"resource": ""
} |
q265736 | sina_download | test | def sina_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
"""Downloads Sina videos by URL.
"""
if 'news.sina.com.cn/zxt' in url:
sina_zxt(url, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
return
vid = match1(url, r'vid=(\d+)')
if vid is None:
video_page = get_content(url)
vid = hd_vid = match1(video_page, r'hd_vid\s*:\s*\'([^\']+)\'')
if hd_vid == '0':
vids = match1(video_page, r'[^\w]vid\s*:\s*\'([^\']+)\'').split('|')
vid = vids[-1]
if vid is None:
vid = match1(video_page, r'vid:"?(\d+)"?')
if vid:
#title = match1(video_page, r'title\s*:\s*\'([^\']+)\'')
sina_download_by_vid(vid, output_dir=output_dir, merge=merge, info_only=info_only)
else:
vkey = match1(video_page, r'vkey\s*:\s*"([^"]+)"')
if vkey is None:
vid = match1(url, r'#(\d+)')
sina_download_by_vid(vid, output_dir=output_dir, merge=merge, info_only=info_only)
return
title = match1(video_page, r'title\s*:\s*"([^"]+)"')
sina_download_by_vkey(vkey, title=title, output_dir=output_dir, merge=merge, info_only=info_only) | python | {
"resource": ""
} |
q265737 | sprint | test | def sprint(text, *colors):
"""Format text with color or other effects into ANSI escaped string."""
return "\33[{}m{content}\33[{}m".format(";".join([str(color) for color in colors]), RESET, content=text) if IS_ANSI_TERMINAL and colors else text | python | {
"resource": ""
} |
q265738 | print_log | test | def print_log(text, *colors):
"""Print a log message to standard error."""
sys.stderr.write(sprint("{}: {}".format(script_name, text), *colors) + "\n") | python | {
"resource": ""
} |
q265739 | e | test | def e(message, exit_code=None):
"""Print an error log message."""
print_log(message, YELLOW, BOLD)
if exit_code is not None:
sys.exit(exit_code) | python | {
"resource": ""
} |
q265740 | wtf | test | def wtf(message, exit_code=1):
"""What a Terrible Failure!"""
print_log(message, RED, BOLD)
if exit_code is not None:
sys.exit(exit_code) | python | {
"resource": ""
} |
q265741 | detect_os | test | def detect_os():
"""Detect operating system.
"""
# Inspired by:
# https://github.com/scivision/pybashutils/blob/78b7f2b339cb03b1c37df94015098bbe462f8526/pybashutils/windows_linux_detect.py
syst = system().lower()
os = 'unknown'
if 'cygwin' in syst:
os = 'cygwin'
elif 'darwin' in syst:
os = 'mac'
elif 'linux' in syst:
os = 'linux'
# detect WSL https://github.com/Microsoft/BashOnWindows/issues/423
try:
with open('/proc/version', 'r') as f:
if 'microsoft' in f.read().lower():
os = 'wsl'
except: pass
elif 'windows' in syst:
os = 'windows'
elif 'bsd' in syst:
os = 'bsd'
return os | python | {
"resource": ""
} |
q265742 | vimeo_download_by_channel | test | def vimeo_download_by_channel(url, output_dir='.', merge=False, info_only=False, **kwargs):
"""str->None"""
# https://vimeo.com/channels/464686
channel_id = match1(url, r'http://vimeo.com/channels/(\w+)')
vimeo_download_by_channel_id(channel_id, output_dir, merge, info_only, **kwargs) | python | {
"resource": ""
} |
q265743 | ckplayer_get_info_by_xml | test | def ckplayer_get_info_by_xml(ckinfo):
"""str->dict
Information for CKPlayer API content."""
e = ET.XML(ckinfo)
video_dict = {'title': '',
#'duration': 0,
'links': [],
'size': 0,
'flashvars': '',}
dictified = dictify(e)['ckplayer']
if 'info' in dictified:
if '_text' in dictified['info'][0]['title'][0]: #title
video_dict['title'] = dictified['info'][0]['title'][0]['_text'].strip()
#if dictify(e)['ckplayer']['info'][0]['title'][0]['_text'].strip(): #duration
#video_dict['title'] = dictify(e)['ckplayer']['info'][0]['title'][0]['_text'].strip()
if '_text' in dictified['video'][0]['size'][0]: #size exists for 1 piece
video_dict['size'] = sum([int(i['size'][0]['_text']) for i in dictified['video']])
if '_text' in dictified['video'][0]['file'][0]: #link exist
video_dict['links'] = [i['file'][0]['_text'].strip() for i in dictified['video']]
if '_text' in dictified['flashvars'][0]:
video_dict['flashvars'] = dictified['flashvars'][0]['_text'].strip()
return video_dict | python | {
"resource": ""
} |
q265744 | get_video_url_from_video_id | test | def get_video_url_from_video_id(video_id):
"""Splicing URLs according to video ID to get video details"""
# from js
data = [""] * 256
for index, _ in enumerate(data):
t = index
for i in range(8):
t = -306674912 ^ unsigned_right_shitf(t, 1) if 1 & t else unsigned_right_shitf(t, 1)
data[index] = t
def tmp():
rand_num = random.random()
path = "/video/urls/v/1/toutiao/mp4/{video_id}?r={random_num}".format(video_id=video_id,
random_num=str(rand_num)[2:])
e = o = r = -1
i, a = 0, len(path)
while i < a:
e = ord(path[i])
i += 1
if e < 128:
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ e)]
else:
if e < 2048:
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (192 | e >> 6 & 31))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & e))]
else:
if 55296 <= e < 57344:
e = (1023 & e) + 64
i += 1
o = 1023 & t.url(i)
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (240 | e >> 8 & 7))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | e >> 2 & 63))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | o >> 6 & 15 | (3 & e) << 4))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & o))]
else:
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (224 | e >> 12 & 15))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | e >> 6 & 63))]
r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & e))]
return "https://ib.365yg.com{path}&s={param}".format(path=path, param=unsigned_right_shitf(r ^ -1, 0))
while 1:
url = tmp()
if url.split("=")[-1][0] != "-": # 参数s不能为负数
return url | python | {
"resource": ""
} |
q265745 | MGTV.get_mgtv_real_url | test | def get_mgtv_real_url(url):
"""str->list of str
Give you the real URLs."""
content = loads(get_content(url))
m3u_url = content['info']
split = urlsplit(m3u_url)
base_url = "{scheme}://{netloc}{path}/".format(scheme = split[0],
netloc = split[1],
path = dirname(split[2]))
content = get_content(content['info']) #get the REAL M3U url, maybe to be changed later?
segment_list = []
segments_size = 0
for i in content.split():
if not i.startswith('#'): #not the best way, better we use the m3u8 package
segment_list.append(base_url + i)
# use ext-info for fast size calculate
elif i.startswith('#EXT-MGTV-File-SIZE:'):
segments_size += int(i[i.rfind(':')+1:])
return m3u_url, segments_size, segment_list | python | {
"resource": ""
} |
q265746 | legitimize | test | def legitimize(text, os=detect_os()):
"""Converts a string to a valid filename.
"""
# POSIX systems
text = text.translate({
0: None,
ord('/'): '-',
ord('|'): '-',
})
# FIXME: do some filesystem detection
if os == 'windows' or os == 'cygwin' or os == 'wsl':
# Windows (non-POSIX namespace)
text = text.translate({
# Reserved in Windows VFAT and NTFS
ord(':'): '-',
ord('*'): '-',
ord('?'): '-',
ord('\\'): '-',
ord('\"'): '\'',
# Reserved in Windows VFAT
ord('+'): '-',
ord('<'): '-',
ord('>'): '-',
ord('['): '(',
ord(']'): ')',
ord('\t'): ' ',
})
else:
# *nix
if os == 'mac':
# Mac OS HFS+
text = text.translate({
ord(':'): '-',
})
# Remove leading .
if text.startswith("."):
text = text[1:]
text = text[:80] # Trim to 82 Unicode characters long
return text | python | {
"resource": ""
} |
q265747 | cbs_download | test | def cbs_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
"""Downloads CBS videos by URL.
"""
html = get_content(url)
pid = match1(html, r'video\.settings\.pid\s*=\s*\'([^\']+)\'')
title = match1(html, r'video\.settings\.title\s*=\s*\"([^\"]+)\"')
theplatform_download_by_pid(pid, title, output_dir=output_dir, merge=merge, info_only=info_only) | python | {
"resource": ""
} |
q265748 | Iqiyi.download | test | def download(self, **kwargs):
"""Override the original one
Ugly ugly dirty hack"""
if 'json_output' in kwargs and kwargs['json_output']:
json_output.output(self)
elif 'info_only' in kwargs and kwargs['info_only']:
if 'stream_id' in kwargs and kwargs['stream_id']:
# Display the stream
stream_id = kwargs['stream_id']
if 'index' not in kwargs:
self.p(stream_id)
else:
self.p_i(stream_id)
else:
# Display all available streams
if 'index' not in kwargs:
self.p([])
else:
stream_id = self.streams_sorted[0]['id'] if 'id' in self.streams_sorted[0] else self.streams_sorted[0]['itag']
self.p_i(stream_id)
else:
if 'stream_id' in kwargs and kwargs['stream_id']:
# Download the stream
stream_id = kwargs['stream_id']
else:
# Download stream with the best quality
stream_id = self.streams_sorted[0]['id'] if 'id' in self.streams_sorted[0] else self.streams_sorted[0]['itag']
if 'index' not in kwargs:
self.p(stream_id)
else:
self.p_i(stream_id)
if stream_id in self.streams:
urls = self.streams[stream_id]['src']
ext = self.streams[stream_id]['container']
total_size = self.streams[stream_id]['size']
else:
urls = self.dash_streams[stream_id]['src']
ext = self.dash_streams[stream_id]['container']
total_size = self.dash_streams[stream_id]['size']
if not urls:
log.wtf('[Failed] Cannot extract video source.')
# For legacy main()
#Here's the change!!
download_url_ffmpeg(urls[0], self.title, 'mp4', output_dir=kwargs['output_dir'], merge=kwargs['merge'], stream=False)
if not kwargs['caption']:
print('Skipping captions.')
return
for lang in self.caption_tracks:
filename = '%s.%s.srt' % (get_filename(self.title), lang)
print('Saving %s ... ' % filename, end="", flush=True)
srt = self.caption_tracks[lang]
with open(os.path.join(kwargs['output_dir'], filename),
'w', encoding='utf-8') as x:
x.write(srt)
print('Done.') | python | {
"resource": ""
} |
q265749 | acfun_download_by_vid | test | def acfun_download_by_vid(vid, title, output_dir='.', merge=True, info_only=False, **kwargs):
"""str, str, str, bool, bool ->None
Download Acfun video by vid.
Call Acfun API, decide which site to use, and pass the job to its
extractor.
"""
#first call the main parasing API
info = json.loads(get_content('http://www.acfun.cn/video/getVideo.aspx?id=' + vid))
sourceType = info['sourceType']
#decide sourceId to know which extractor to use
if 'sourceId' in info: sourceId = info['sourceId']
# danmakuId = info['danmakuId']
#call extractor decided by sourceId
if sourceType == 'sina':
sina_download_by_vid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'youku':
youku_download_by_vid(sourceId, title=title, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
elif sourceType == 'tudou':
tudou_download_by_iid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'qq':
qq_download_by_vid(sourceId, title, True, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'letv':
letvcloud_download_by_vu(sourceId, '2d8c027396', title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'zhuzhan':
#As in Jul.28.2016, Acfun is using embsig to anti hotlink so we need to pass this
#In Mar. 2017 there is a dedicated ``acfun_proxy'' in youku cloud player
#old code removed
url = 'http://www.acfun.cn/v/ac' + vid
yk_streams = youku_acfun_proxy(info['sourceId'], info['encode'], url)
seq = ['mp4hd3', 'mp4hd2', 'mp4hd', 'flvhd']
for t in seq:
if yk_streams.get(t):
preferred = yk_streams[t]
break
#total_size in the json could be incorrect(F.I. 0)
size = 0
for url in preferred[0]:
_, _, seg_size = url_info(url)
size += seg_size
#fallback to flvhd is not quite possible
if re.search(r'fid=[0-9A-Z\-]*.flv', preferred[0][0]):
ext = 'flv'
else:
ext = 'mp4'
print_info(site_info, title, ext, size)
if not info_only:
download_urls(preferred[0], title, ext, size, output_dir=output_dir, merge=merge)
else:
raise NotImplementedError(sourceType)
if not info_only and not dry_run:
if not kwargs['caption']:
print('Skipping danmaku.')
return
try:
title = get_filename(title)
print('Downloading %s ...\n' % (title + '.cmt.json'))
cmt = get_srt_json(vid)
with open(os.path.join(output_dir, title + '.cmt.json'), 'w', encoding='utf-8') as x:
x.write(cmt)
except:
pass | python | {
"resource": ""
} |
q265750 | matchall | test | def matchall(text, patterns):
"""Scans through a string for substrings matched some patterns.
Args:
text: A string to be scanned.
patterns: a list of regex pattern.
Returns:
a list if matched. empty if not.
"""
ret = []
for pattern in patterns:
match = re.findall(pattern, text)
ret += match
return ret | python | {
"resource": ""
} |
q265751 | parse_query_param | test | def parse_query_param(url, param):
"""Parses the query string of a URL and returns the value of a parameter.
Args:
url: A URL.
param: A string representing the name of the parameter.
Returns:
The value of the parameter.
"""
try:
return parse.parse_qs(parse.urlparse(url).query)[param][0]
except:
return None | python | {
"resource": ""
} |
q265752 | get_content | test | def get_content(url, headers={}, decoded=True):
"""Gets the content of a URL via sending a HTTP GET request.
Args:
url: A URL.
headers: Request headers used by the client.
decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type.
Returns:
The content as a string.
"""
logging.debug('get_content: %s' % url)
req = request.Request(url, headers=headers)
if cookies:
cookies.add_cookie_header(req)
req.headers.update(req.unredirected_hdrs)
response = urlopen_with_retry(req)
data = response.read()
# Handle HTTP compression for gzip and deflate (zlib)
content_encoding = response.getheader('Content-Encoding')
if content_encoding == 'gzip':
data = ungzip(data)
elif content_encoding == 'deflate':
data = undeflate(data)
# Decode the response body
if decoded:
charset = match1(
response.getheader('Content-Type', ''), r'charset=([\w-]+)'
)
if charset is not None:
data = data.decode(charset, 'ignore')
else:
data = data.decode('utf-8', 'ignore')
return data | python | {
"resource": ""
} |
q265753 | post_content | test | def post_content(url, headers={}, post_data={}, decoded=True, **kwargs):
"""Post the content of a URL via sending a HTTP POST request.
Args:
url: A URL.
headers: Request headers used by the client.
decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type.
Returns:
The content as a string.
"""
if kwargs.get('post_data_raw'):
logging.debug('post_content: %s\npost_data_raw: %s' % (url, kwargs['post_data_raw']))
else:
logging.debug('post_content: %s\npost_data: %s' % (url, post_data))
req = request.Request(url, headers=headers)
if cookies:
cookies.add_cookie_header(req)
req.headers.update(req.unredirected_hdrs)
if kwargs.get('post_data_raw'):
post_data_enc = bytes(kwargs['post_data_raw'], 'utf-8')
else:
post_data_enc = bytes(parse.urlencode(post_data), 'utf-8')
response = urlopen_with_retry(req, data=post_data_enc)
data = response.read()
# Handle HTTP compression for gzip and deflate (zlib)
content_encoding = response.getheader('Content-Encoding')
if content_encoding == 'gzip':
data = ungzip(data)
elif content_encoding == 'deflate':
data = undeflate(data)
# Decode the response body
if decoded:
charset = match1(
response.getheader('Content-Type'), r'charset=([\w-]+)'
)
if charset is not None:
data = data.decode(charset)
else:
data = data.decode('utf-8')
return data | python | {
"resource": ""
} |
q265754 | parse_host | test | def parse_host(host):
"""Parses host name and port number from a string.
"""
if re.match(r'^(\d+)$', host) is not None:
return ("0.0.0.0", int(host))
if re.match(r'^(\w+)://', host) is None:
host = "//" + host
o = parse.urlparse(host)
hostname = o.hostname or "0.0.0.0"
port = o.port or 0
return (hostname, port) | python | {
"resource": ""
} |
q265755 | showroom_get_roomid_by_room_url_key | test | def showroom_get_roomid_by_room_url_key(room_url_key):
"""str->str"""
fake_headers_mobile = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36'
}
webpage_url = 'https://www.showroom-live.com/' + room_url_key
html = get_content(webpage_url, headers = fake_headers_mobile)
roomid = match1(html, r'room\?room_id\=(\d+)')
assert roomid
return roomid | python | {
"resource": ""
} |
q265756 | _wanmen_get_title_by_json_topic_part | test | def _wanmen_get_title_by_json_topic_part(json_content, tIndex, pIndex):
"""JSON, int, int, int->str
Get a proper title with courseid+topicID+partID."""
return '_'.join([json_content[0]['name'],
json_content[0]['Topics'][tIndex]['name'],
json_content[0]['Topics'][tIndex]['Parts'][pIndex]['name']]) | python | {
"resource": ""
} |
q265757 | wanmen_download_by_course | test | def wanmen_download_by_course(json_api_content, output_dir='.', merge=True, info_only=False, **kwargs):
"""int->None
Download a WHOLE course.
Reuse the API call to save time."""
for tIndex in range(len(json_api_content[0]['Topics'])):
for pIndex in range(len(json_api_content[0]['Topics'][tIndex]['Parts'])):
wanmen_download_by_course_topic_part(json_api_content,
tIndex,
pIndex,
output_dir=output_dir,
merge=merge,
info_only=info_only,
**kwargs) | python | {
"resource": ""
} |
q265758 | wanmen_download_by_course_topic_part | test | def wanmen_download_by_course_topic_part(json_api_content, tIndex, pIndex, output_dir='.', merge=True, info_only=False, **kwargs):
"""int, int, int->None
Download ONE PART of the course."""
html = json_api_content
title = _wanmen_get_title_by_json_topic_part(html,
tIndex,
pIndex)
bokeccID = _wanmen_get_boke_id_by_json_topic_part(html,
tIndex,
pIndex)
bokecc_download_by_id(vid = bokeccID, title = title, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs) | python | {
"resource": ""
} |
q265759 | BaseExecutor.has_task | test | def has_task(self, task_instance):
"""
Checks if a task is either queued or running in this executor
:param task_instance: TaskInstance
:return: True if the task is known to this executor
"""
if task_instance.key in self.queued_tasks or task_instance.key in self.running:
return True | python | {
"resource": ""
} |
q265760 | BaseExecutor.get_event_buffer | test | def get_event_buffer(self, dag_ids=None):
"""
Returns and flush the event buffer. In case dag_ids is specified
it will only return and flush events for the given dag_ids. Otherwise
it returns and flushes all
:param dag_ids: to dag_ids to return events for, if None returns all
:return: a dict of events
"""
cleared_events = dict()
if dag_ids is None:
cleared_events = self.event_buffer
self.event_buffer = dict()
else:
for key in list(self.event_buffer.keys()):
dag_id, _, _, _ = key
if dag_id in dag_ids:
cleared_events[key] = self.event_buffer.pop(key)
return cleared_events | python | {
"resource": ""
} |
q265761 | SnowflakeHook.get_conn | test | def get_conn(self):
"""
Returns a snowflake.connection object
"""
conn_config = self._get_conn_params()
conn = snowflake.connector.connect(**conn_config)
return conn | python | {
"resource": ""
} |
q265762 | SnowflakeHook._get_aws_credentials | test | def _get_aws_credentials(self):
"""
returns aws_access_key_id, aws_secret_access_key
from extra
intended to be used by external import and export statements
"""
if self.snowflake_conn_id:
connection_object = self.get_connection(self.snowflake_conn_id)
if 'aws_secret_access_key' in connection_object.extra_dejson:
aws_access_key_id = connection_object.extra_dejson.get(
'aws_access_key_id')
aws_secret_access_key = connection_object.extra_dejson.get(
'aws_secret_access_key')
return aws_access_key_id, aws_secret_access_key | python | {
"resource": ""
} |
q265763 | GrpcHook._get_field | test | def _get_field(self, field_name, default=None):
"""
Fetches a field from extras, and returns it. This is some Airflow
magic. The grpc hook type adds custom UI elements
to the hook page, which allow admins to specify scopes, credential pem files, etc.
They get formatted as shown below.
"""
full_field_name = 'extra__grpc__{}'.format(field_name)
if full_field_name in self.extras:
return self.extras[full_field_name]
else:
return default | python | {
"resource": ""
} |
q265764 | PostgresHook.copy_expert | test | def copy_expert(self, sql, filename, open=open):
"""
Executes SQL using psycopg2 copy_expert method.
Necessary to execute COPY command without access to a superuser.
Note: if this method is called with a "COPY FROM" statement and
the specified input file does not exist, it creates an empty
file and no data is loaded, but the operation succeeds.
So if users want to be aware when the input file does not exist,
they have to check its existence by themselves.
"""
if not os.path.isfile(filename):
with open(filename, 'w'):
pass
with open(filename, 'r+') as f:
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
cur.copy_expert(sql, f)
f.truncate(f.tell())
conn.commit() | python | {
"resource": ""
} |
q265765 | PostgresHook.bulk_dump | test | def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
"""
self.copy_expert("COPY {table} TO STDOUT".format(table=table), tmp_file) | python | {
"resource": ""
} |
q265766 | FileToGoogleCloudStorageOperator.execute | test | def execute(self, context):
"""
Uploads the file to Google cloud storage
"""
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
hook.upload(
bucket_name=self.bucket,
object_name=self.dst,
mime_type=self.mime_type,
filename=self.src,
gzip=self.gzip,
) | python | {
"resource": ""
} |
q265767 | max_partition | test | def max_partition(
table, schema="default", field=None, filter_map=None,
metastore_conn_id='metastore_default'):
"""
Gets the max partition for a table.
:param schema: The hive schema the table lives in
:type schema: str
:param table: The hive table you are interested in, supports the dot
notation as in "my_database.my_table", if a dot is found,
the schema param is disregarded
:type table: str
:param metastore_conn_id: The hive connection you are interested in.
If your default is set you don't need to use this parameter.
:type metastore_conn_id: str
:param filter_map: partition_key:partition_value map used for partition filtering,
e.g. {'key1': 'value1', 'key2': 'value2'}.
Only partitions matching all partition_key:partition_value
pairs will be considered as candidates of max partition.
:type filter_map: map
:param field: the field to get the max value from. If there's only
one partition field, this will be inferred
:type field: str
>>> max_partition('airflow.static_babynames_partitioned')
'2015-01-01'
"""
from airflow.hooks.hive_hooks import HiveMetastoreHook
if '.' in table:
schema, table = table.split('.')
hh = HiveMetastoreHook(metastore_conn_id=metastore_conn_id)
return hh.max_partition(
schema=schema, table_name=table, field=field, filter_map=filter_map) | python | {
"resource": ""
} |
q265768 | MySqlHook.get_conn | test | def get_conn(self):
"""
Returns a mysql connection object
"""
conn = self.get_connection(self.mysql_conn_id)
conn_config = {
"user": conn.login,
"passwd": conn.password or '',
"host": conn.host or 'localhost',
"db": self.schema or conn.schema or ''
}
if not conn.port:
conn_config["port"] = 3306
else:
conn_config["port"] = int(conn.port)
if conn.extra_dejson.get('charset', False):
conn_config["charset"] = conn.extra_dejson["charset"]
if (conn_config["charset"]).lower() == 'utf8' or\
(conn_config["charset"]).lower() == 'utf-8':
conn_config["use_unicode"] = True
if conn.extra_dejson.get('cursor', False):
if (conn.extra_dejson["cursor"]).lower() == 'sscursor':
conn_config["cursorclass"] = MySQLdb.cursors.SSCursor
elif (conn.extra_dejson["cursor"]).lower() == 'dictcursor':
conn_config["cursorclass"] = MySQLdb.cursors.DictCursor
elif (conn.extra_dejson["cursor"]).lower() == 'ssdictcursor':
conn_config["cursorclass"] = MySQLdb.cursors.SSDictCursor
local_infile = conn.extra_dejson.get('local_infile', False)
if conn.extra_dejson.get('ssl', False):
# SSL parameter for MySQL has to be a dictionary and in case
# of extra/dejson we can get string if extra is passed via
# URL parameters
dejson_ssl = conn.extra_dejson['ssl']
if isinstance(dejson_ssl, six.string_types):
dejson_ssl = json.loads(dejson_ssl)
conn_config['ssl'] = dejson_ssl
if conn.extra_dejson.get('unix_socket'):
conn_config['unix_socket'] = conn.extra_dejson['unix_socket']
if local_infile:
conn_config["local_infile"] = 1
conn = MySQLdb.connect(**conn_config)
return conn | python | {
"resource": ""
} |
q265769 | task_state | test | def task_state(args):
"""
Returns the state of a TaskInstance at the command line.
>>> airflow task_state tutorial sleep 2015-01-01
success
"""
dag = get_dag(args)
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
print(ti.current_state()) | python | {
"resource": ""
} |
q265770 | restart_workers | test | def restart_workers(gunicorn_master_proc, num_workers_expected, master_timeout):
"""
Runs forever, monitoring the child processes of @gunicorn_master_proc and
restarting workers occasionally.
Each iteration of the loop traverses one edge of this state transition
diagram, where each state (node) represents
[ num_ready_workers_running / num_workers_running ]. We expect most time to
be spent in [n / n]. `bs` is the setting webserver.worker_refresh_batch_size.
The horizontal transition at ? happens after the new worker parses all the
dags (so it could take a while!)
V ────────────────────────────────────────────────────────────────────────┐
[n / n] ──TTIN──> [ [n, n+bs) / n + bs ] ────?───> [n + bs / n + bs] ──TTOU─┘
^ ^───────────────┘
│
│ ┌────────────────v
└──────┴────── [ [0, n) / n ] <─── start
We change the number of workers by sending TTIN and TTOU to the gunicorn
master process, which increases and decreases the number of child workers
respectively. Gunicorn guarantees that on TTOU workers are terminated
gracefully and that the oldest worker is terminated.
"""
def wait_until_true(fn, timeout=0):
"""
Sleeps until fn is true
"""
t = time.time()
while not fn():
if 0 < timeout <= time.time() - t:
raise AirflowWebServerTimeout(
"No response from gunicorn master within {0} seconds"
.format(timeout))
time.sleep(0.1)
def start_refresh(gunicorn_master_proc):
batch_size = conf.getint('webserver', 'worker_refresh_batch_size')
log.debug('%s doing a refresh of %s workers', state, batch_size)
sys.stdout.flush()
sys.stderr.flush()
excess = 0
for _ in range(batch_size):
gunicorn_master_proc.send_signal(signal.SIGTTIN)
excess += 1
wait_until_true(lambda: num_workers_expected + excess ==
get_num_workers_running(gunicorn_master_proc),
master_timeout)
try:
wait_until_true(lambda: num_workers_expected ==
get_num_workers_running(gunicorn_master_proc),
master_timeout)
while True:
num_workers_running = get_num_workers_running(gunicorn_master_proc)
num_ready_workers_running = \
get_num_ready_workers_running(gunicorn_master_proc)
state = '[{0} / {1}]'.format(num_ready_workers_running, num_workers_running)
# Whenever some workers are not ready, wait until all workers are ready
if num_ready_workers_running < num_workers_running:
log.debug('%s some workers are starting up, waiting...', state)
sys.stdout.flush()
time.sleep(1)
# Kill a worker gracefully by asking gunicorn to reduce number of workers
elif num_workers_running > num_workers_expected:
excess = num_workers_running - num_workers_expected
log.debug('%s killing %s workers', state, excess)
for _ in range(excess):
gunicorn_master_proc.send_signal(signal.SIGTTOU)
excess -= 1
wait_until_true(lambda: num_workers_expected + excess ==
get_num_workers_running(gunicorn_master_proc),
master_timeout)
# Start a new worker by asking gunicorn to increase number of workers
elif num_workers_running == num_workers_expected:
refresh_interval = conf.getint('webserver', 'worker_refresh_interval')
log.debug(
'%s sleeping for %ss starting doing a refresh...',
state, refresh_interval
)
time.sleep(refresh_interval)
start_refresh(gunicorn_master_proc)
else:
# num_ready_workers_running == num_workers_running < num_workers_expected
log.error((
"%s some workers seem to have died and gunicorn"
"did not restart them as expected"
), state)
time.sleep(10)
if len(
psutil.Process(gunicorn_master_proc.pid).children()
) < num_workers_expected:
start_refresh(gunicorn_master_proc)
except (AirflowWebServerTimeout, OSError) as err:
log.error(err)
log.error("Shutting down webserver")
try:
gunicorn_master_proc.terminate()
gunicorn_master_proc.wait()
finally:
sys.exit(1) | python | {
"resource": ""
} |
q265771 | CloudTranslateHook.get_conn | test | def get_conn(self):
"""
Retrieves connection to Cloud Translate
:return: Google Cloud Translate client object.
:rtype: Client
"""
if not self._client:
self._client = Client(credentials=self._get_credentials())
return self._client | python | {
"resource": ""
} |
q265772 | CloudTranslateHook.translate | test | def translate(
self, values, target_language, format_=None, source_language=None, model=None
):
"""Translate a string or list of strings.
See https://cloud.google.com/translate/docs/translating-text
:type values: str or list
:param values: String or list of strings to translate.
:type target_language: str
:param target_language: The language to translate results into. This
is required by the API and defaults to
the target language of the current instance.
:type format_: str
:param format_: (Optional) One of ``text`` or ``html``, to specify
if the input text is plain text or HTML.
:type source_language: str or None
:param source_language: (Optional) The language of the text to
be translated.
:type model: str or None
:param model: (Optional) The model used to translate the text, such
as ``'base'`` or ``'nmt'``.
:rtype: str or list
:returns: A list of dictionaries for each queried value. Each
dictionary typically contains three keys (though not
all will be present in all cases)
* ``detectedSourceLanguage``: The detected language (as an
ISO 639-1 language code) of the text.
* ``translatedText``: The translation of the text into the
target language.
* ``input``: The corresponding input value.
* ``model``: The model used to translate the text.
If only a single value is passed, then only a single
dictionary will be returned.
:raises: :class:`~exceptions.ValueError` if the number of
values and translations differ.
"""
client = self.get_conn()
return client.translate(
values=values,
target_language=target_language,
format_=format_,
source_language=source_language,
model=model,
) | python | {
"resource": ""
} |
q265773 | CloudSqlHook.get_instance | test | def get_instance(self, instance, project_id=None):
"""
Retrieves a resource containing information about a Cloud SQL instance.
:param instance: Database instance ID. This does not include the project ID.
:type instance: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: A Cloud SQL instance resource.
:rtype: dict
"""
return self.get_conn().instances().get(
project=project_id,
instance=instance
).execute(num_retries=self.num_retries) | python | {
"resource": ""
} |
q265774 | CloudSqlHook.create_instance | test | def create_instance(self, body, project_id=None):
"""
Creates a new Cloud SQL instance.
:param body: Body required by the Cloud SQL insert API, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/insert#request-body.
:type body: dict
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
response = self.get_conn().instances().insert(
project=project_id,
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name) | python | {
"resource": ""
} |
q265775 | CloudSqlHook.patch_instance | test | def patch_instance(self, body, instance, project_id=None):
"""
Updates settings of a Cloud SQL instance.
Caution: This is not a partial update, so you must include values for
all the settings that you want to retain.
:param body: Body required by the Cloud SQL patch API, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/patch#request-body.
:type body: dict
:param instance: Cloud SQL instance ID. This does not include the project ID.
:type instance: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
response = self.get_conn().instances().patch(
project=project_id,
instance=instance,
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name) | python | {
"resource": ""
} |
q265776 | CloudSqlHook.delete_instance | test | def delete_instance(self, instance, project_id=None):
"""
Deletes a Cloud SQL instance.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param instance: Cloud SQL instance ID. This does not include the project ID.
:type instance: str
:return: None
"""
response = self.get_conn().instances().delete(
project=project_id,
instance=instance,
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name) | python | {
"resource": ""
} |
q265777 | CloudSqlHook.get_database | test | def get_database(self, instance, database, project_id=None):
"""
Retrieves a database resource from a Cloud SQL instance.
:param instance: Database instance ID. This does not include the project ID.
:type instance: str
:param database: Name of the database in the instance.
:type database: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: A Cloud SQL database resource, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases#resource.
:rtype: dict
"""
return self.get_conn().databases().get(
project=project_id,
instance=instance,
database=database
).execute(num_retries=self.num_retries) | python | {
"resource": ""
} |
q265778 | CloudSqlHook.create_database | test | def create_database(self, instance, body, project_id=None):
"""
Creates a new database inside a Cloud SQL instance.
:param instance: Database instance ID. This does not include the project ID.
:type instance: str
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases/insert#request-body.
:type body: dict
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
response = self.get_conn().databases().insert(
project=project_id,
instance=instance,
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name) | python | {
"resource": ""
} |
q265779 | CloudSqlHook.patch_database | test | def patch_database(self, instance, database, body, project_id=None):
"""
Updates a database resource inside a Cloud SQL instance.
This method supports patch semantics.
See https://cloud.google.com/sql/docs/mysql/admin-api/how-tos/performance#patch.
:param instance: Database instance ID. This does not include the project ID.
:type instance: str
:param database: Name of the database to be updated in the instance.
:type database: str
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/databases/insert#request-body.
:type body: dict
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
response = self.get_conn().databases().patch(
project=project_id,
instance=instance,
database=database,
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name) | python | {
"resource": ""
} |
q265780 | CloudSqlHook.delete_database | test | def delete_database(self, instance, database, project_id=None):
"""
Deletes a database from a Cloud SQL instance.
:param instance: Database instance ID. This does not include the project ID.
:type instance: str
:param database: Name of the database to be deleted in the instance.
:type database: str
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
response = self.get_conn().databases().delete(
project=project_id,
instance=instance,
database=database
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name) | python | {
"resource": ""
} |
q265781 | CloudSqlHook.export_instance | test | def export_instance(self, instance, body, project_id=None):
"""
Exports data from a Cloud SQL instance to a Cloud Storage bucket as a SQL dump
or CSV file.
:param instance: Database instance ID of the Cloud SQL instance. This does not include the
project ID.
:type instance: str
:param body: The request body, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/export#request-body
:type body: dict
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
try:
response = self.get_conn().instances().export(
project=project_id,
instance=instance,
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name)
except HttpError as ex:
raise AirflowException(
'Exporting instance {} failed: {}'.format(instance, ex.content)
) | python | {
"resource": ""
} |
q265782 | CloudSqlProxyRunner.start_proxy | test | def start_proxy(self):
"""
Starts Cloud SQL Proxy.
You have to remember to stop the proxy if you started it!
"""
self._download_sql_proxy_if_needed()
if self.sql_proxy_process:
raise AirflowException("The sql proxy is already running: {}".format(
self.sql_proxy_process))
else:
command_to_run = [self.sql_proxy_path]
command_to_run.extend(self.command_line_parameters)
try:
self.log.info("Creating directory %s",
self.cloud_sql_proxy_socket_directory)
os.makedirs(self.cloud_sql_proxy_socket_directory)
except OSError:
# Needed for python 2 compatibility (exists_ok missing)
pass
command_to_run.extend(self._get_credential_parameters())
self.log.info("Running the command: `%s`", " ".join(command_to_run))
self.sql_proxy_process = Popen(command_to_run,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
self.log.info("The pid of cloud_sql_proxy: %s", self.sql_proxy_process.pid)
while True:
line = self.sql_proxy_process.stderr.readline().decode('utf-8')
return_code = self.sql_proxy_process.poll()
if line == '' and return_code is not None:
self.sql_proxy_process = None
raise AirflowException(
"The cloud_sql_proxy finished early with return code {}!".format(
return_code))
if line != '':
self.log.info(line)
if "googleapi: Error" in line or "invalid instance name:" in line:
self.stop_proxy()
raise AirflowException(
"Error when starting the cloud_sql_proxy {}!".format(
line))
if "Ready for new connections" in line:
return | python | {
"resource": ""
} |
q265783 | CloudSqlProxyRunner.stop_proxy | test | def stop_proxy(self):
"""
Stops running proxy.
You should stop the proxy after you stop using it.
"""
if not self.sql_proxy_process:
raise AirflowException("The sql proxy is not started yet")
else:
self.log.info("Stopping the cloud_sql_proxy pid: %s",
self.sql_proxy_process.pid)
self.sql_proxy_process.kill()
self.sql_proxy_process = None
# Cleanup!
self.log.info("Removing the socket directory: %s",
self.cloud_sql_proxy_socket_directory)
shutil.rmtree(self.cloud_sql_proxy_socket_directory, ignore_errors=True)
if self.sql_proxy_was_downloaded:
self.log.info("Removing downloaded proxy: %s", self.sql_proxy_path)
# Silently ignore if the file has already been removed (concurrency)
try:
os.remove(self.sql_proxy_path)
except OSError as e:
if not e.errno == errno.ENOENT:
raise
else:
self.log.info("Skipped removing proxy - it was not downloaded: %s",
self.sql_proxy_path)
if os.path.isfile(self.credentials_path):
self.log.info("Removing generated credentials file %s",
self.credentials_path)
# Here file cannot be delete by concurrent task (each task has its own copy)
os.remove(self.credentials_path) | python | {
"resource": ""
} |
q265784 | CloudSqlProxyRunner.get_proxy_version | test | def get_proxy_version(self):
"""
Returns version of the Cloud SQL Proxy.
"""
self._download_sql_proxy_if_needed()
command_to_run = [self.sql_proxy_path]
command_to_run.extend(['--version'])
command_to_run.extend(self._get_credential_parameters())
result = subprocess.check_output(command_to_run).decode('utf-8')
pattern = re.compile("^.*[V|v]ersion ([^;]*);.*$")
m = pattern.match(result)
if m:
return m.group(1)
else:
return None | python | {
"resource": ""
} |
q265785 | CloudSqlDatabaseHook.create_connection | test | def create_connection(self, session=None):
"""
Create connection in the Connection table, according to whether it uses
proxy, TCP, UNIX sockets, SSL. Connection ID will be randomly generated.
:param session: Session of the SQL Alchemy ORM (automatically generated with
decorator).
"""
connection = Connection(conn_id=self.db_conn_id)
uri = self._generate_connection_uri()
self.log.info("Creating connection %s", self.db_conn_id)
connection.parse_from_uri(uri)
session.add(connection)
session.commit() | python | {
"resource": ""
} |
q265786 | CloudSqlDatabaseHook.retrieve_connection | test | def retrieve_connection(self, session=None):
"""
Retrieves the dynamically created connection from the Connection table.
:param session: Session of the SQL Alchemy ORM (automatically generated with
decorator).
"""
self.log.info("Retrieving connection %s", self.db_conn_id)
connections = session.query(Connection).filter(
Connection.conn_id == self.db_conn_id)
if connections.count():
return connections[0]
return None | python | {
"resource": ""
} |
q265787 | CloudSqlDatabaseHook.delete_connection | test | def delete_connection(self, session=None):
"""
Delete the dynamically created connection from the Connection table.
:param session: Session of the SQL Alchemy ORM (automatically generated with
decorator).
"""
self.log.info("Deleting connection %s", self.db_conn_id)
connections = session.query(Connection).filter(
Connection.conn_id == self.db_conn_id)
if connections.count():
connection = connections[0]
session.delete(connection)
session.commit()
else:
self.log.info("Connection was already deleted!") | python | {
"resource": ""
} |
q265788 | CloudSqlDatabaseHook.get_sqlproxy_runner | test | def get_sqlproxy_runner(self):
"""
Retrieve Cloud SQL Proxy runner. It is used to manage the proxy
lifecycle per task.
:return: The Cloud SQL Proxy runner.
:rtype: CloudSqlProxyRunner
"""
if not self.use_proxy:
raise AirflowException("Proxy runner can only be retrieved in case of use_proxy = True")
return CloudSqlProxyRunner(
path_prefix=self.sql_proxy_unique_path,
instance_specification=self._get_sqlproxy_instance_specification(),
project_id=self.project_id,
sql_proxy_version=self.sql_proxy_version,
sql_proxy_binary_path=self.sql_proxy_binary_path
) | python | {
"resource": ""
} |
q265789 | CloudSqlDatabaseHook.get_database_hook | test | def get_database_hook(self):
"""
Retrieve database hook. This is the actual Postgres or MySQL database hook
that uses proxy or connects directly to the Google Cloud SQL database.
"""
if self.database_type == 'postgres':
self.db_hook = PostgresHook(postgres_conn_id=self.db_conn_id,
schema=self.database)
else:
self.db_hook = MySqlHook(mysql_conn_id=self.db_conn_id,
schema=self.database)
return self.db_hook | python | {
"resource": ""
} |
q265790 | CloudSqlDatabaseHook.cleanup_database_hook | test | def cleanup_database_hook(self):
"""
Clean up database hook after it was used.
"""
if self.database_type == 'postgres':
if hasattr(self.db_hook,
'conn') and self.db_hook.conn and self.db_hook.conn.notices:
for output in self.db_hook.conn.notices:
self.log.info(output) | python | {
"resource": ""
} |
q265791 | CloudSqlDatabaseHook.reserve_free_tcp_port | test | def reserve_free_tcp_port(self):
"""
Reserve free TCP port to be used by Cloud SQL Proxy
"""
self.reserved_tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.reserved_tcp_socket.bind(('127.0.0.1', 0))
self.sql_proxy_tcp_port = self.reserved_tcp_socket.getsockname()[1] | python | {
"resource": ""
} |
q265792 | _normalize_mlengine_job_id | test | def _normalize_mlengine_job_id(job_id):
"""
Replaces invalid MLEngine job_id characters with '_'.
This also adds a leading 'z' in case job_id starts with an invalid
character.
Args:
job_id: A job_id str that may have invalid characters.
Returns:
A valid job_id representation.
"""
# Add a prefix when a job_id starts with a digit or a template
match = re.search(r'\d|\{{2}', job_id)
if match and match.start() == 0:
job = 'z_{}'.format(job_id)
else:
job = job_id
# Clean up 'bad' characters except templates
tracker = 0
cleansed_job_id = ''
for m in re.finditer(r'\{{2}.+?\}{2}', job):
cleansed_job_id += re.sub(r'[^0-9a-zA-Z]+', '_',
job[tracker:m.start()])
cleansed_job_id += job[m.start():m.end()]
tracker = m.end()
# Clean up last substring or the full string if no templates
cleansed_job_id += re.sub(r'[^0-9a-zA-Z]+', '_', job[tracker:])
return cleansed_job_id | python | {
"resource": ""
} |
q265793 | FTPSensor._get_error_code | test | def _get_error_code(self, e):
"""Extract error code from ftp exception"""
try:
matches = self.error_code_pattern.match(str(e))
code = int(matches.group(0))
return code
except ValueError:
return e | python | {
"resource": ""
} |
q265794 | clear_dag_runs | test | def clear_dag_runs():
"""
Remove any existing DAG runs for the perf test DAGs.
"""
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id.in_(DAG_IDS),
).all()
for dr in drs:
logging.info('Deleting DagRun :: {}'.format(dr))
session.delete(dr) | python | {
"resource": ""
} |
q265795 | clear_dag_task_instances | test | def clear_dag_task_instances():
"""
Remove any existing task instances for the perf test DAGs.
"""
session = settings.Session()
TI = TaskInstance
tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.all()
)
for ti in tis:
logging.info('Deleting TaskInstance :: {}'.format(ti))
session.delete(ti)
session.commit() | python | {
"resource": ""
} |
q265796 | set_dags_paused_state | test | def set_dags_paused_state(is_paused):
"""
Toggle the pause state of the DAGs in the test.
"""
session = settings.Session()
dms = session.query(DagModel).filter(
DagModel.dag_id.in_(DAG_IDS))
for dm in dms:
logging.info('Setting DAG :: {} is_paused={}'.format(dm, is_paused))
dm.is_paused = is_paused
session.commit() | python | {
"resource": ""
} |
q265797 | SchedulerMetricsJob.print_stats | test | def print_stats(self):
"""
Print operational metrics for the scheduler test.
"""
session = settings.Session()
TI = TaskInstance
tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.all()
)
successful_tis = [x for x in tis if x.state == State.SUCCESS]
ti_perf = [(ti.dag_id, ti.task_id, ti.execution_date,
(ti.queued_dttm - self.start_date).total_seconds(),
(ti.start_date - self.start_date).total_seconds(),
(ti.end_date - self.start_date).total_seconds(),
ti.duration) for ti in successful_tis]
ti_perf_df = pd.DataFrame(ti_perf, columns=['dag_id', 'task_id',
'execution_date',
'queue_delay',
'start_delay', 'land_time',
'duration'])
print('Performance Results')
print('###################')
for dag_id in DAG_IDS:
print('DAG {}'.format(dag_id))
print(ti_perf_df[ti_perf_df['dag_id'] == dag_id])
print('###################')
if len(tis) > len(successful_tis):
print("WARNING!! The following task instances haven't completed")
print(pd.DataFrame([(ti.dag_id, ti.task_id, ti.execution_date, ti.state)
for ti in filter(lambda x: x.state != State.SUCCESS, tis)],
columns=['dag_id', 'task_id', 'execution_date', 'state']))
session.commit() | python | {
"resource": ""
} |
q265798 | SchedulerMetricsJob.heartbeat | test | def heartbeat(self):
"""
Override the scheduler heartbeat to determine when the test is complete
"""
super(SchedulerMetricsJob, self).heartbeat()
session = settings.Session()
# Get all the relevant task instances
TI = TaskInstance
successful_tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.filter(TI.state.in_([State.SUCCESS]))
.all()
)
session.commit()
dagbag = DagBag(SUBDIR)
dags = [dagbag.dags[dag_id] for dag_id in DAG_IDS]
# the tasks in perf_dag_1 and per_dag_2 have a daily schedule interval.
num_task_instances = sum([(timezone.utcnow() - task.start_date).days
for dag in dags for task in dag.tasks])
if (len(successful_tis) == num_task_instances or
(timezone.utcnow() - self.start_date).total_seconds() >
MAX_RUNTIME_SECS):
if len(successful_tis) == num_task_instances:
self.log.info("All tasks processed! Printing stats.")
else:
self.log.info("Test timeout reached. Printing available stats.")
self.print_stats()
set_dags_paused_state(True)
sys.exit() | python | {
"resource": ""
} |
q265799 | AwsLambdaHook.invoke_lambda | test | def invoke_lambda(self, payload):
"""
Invoke Lambda Function
"""
awslambda_conn = self.get_conn()
response = awslambda_conn.invoke(
FunctionName=self.function_name,
InvocationType=self.invocation_type,
LogType=self.log_type,
Payload=payload,
Qualifier=self.qualifier
)
return response | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.