after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def create_paramfile(trans, uploaded_datasets):
"""
Create the upload tool's JSON "param" file.
"""
def _chown(path):
try:
# get username from email/username
pwent = trans.user.system_user_pwent(trans.app.config.real_system_username)
cmd = shlex.split(trans.app.config.external_chown_script)
cmd.extend([path, pwent[0], str(pwent[3])])
log.debug("Changing ownership of %s with: %s" % (path, " ".join(cmd)))
p = subprocess.Popen(
cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
assert p.returncode == 0, stderr
except Exception as e:
log.warning(
"Changing ownership of uploaded file %s failed: %s" % (path, str(e))
)
# TODO: json_file should go in the working directory
json_file = tempfile.mkstemp()
json_file_path = json_file[1]
json_file = os.fdopen(json_file[0], "w")
for uploaded_dataset in uploaded_datasets:
data = uploaded_dataset.data
if uploaded_dataset.type == "composite":
# we need to init metadata before the job is dispatched
data.init_meta()
for meta_name, meta_value in uploaded_dataset.metadata.items():
setattr(data.metadata, meta_name, meta_value)
trans.sa_session.add(data)
trans.sa_session.flush()
json = dict(
file_type=uploaded_dataset.file_type,
dataset_id=data.dataset.id,
dbkey=uploaded_dataset.dbkey,
type=uploaded_dataset.type,
metadata=uploaded_dataset.metadata,
primary_file=uploaded_dataset.primary_file,
composite_file_paths=uploaded_dataset.composite_files,
composite_files=dict(
(k, v.__dict__)
for k, v in data.datatype.get_composite_files(data).items()
),
)
else:
try:
is_binary = uploaded_dataset.datatype.is_binary
except Exception:
is_binary = None
try:
link_data_only = uploaded_dataset.link_data_only
except Exception:
link_data_only = "copy_files"
try:
uuid_str = uploaded_dataset.uuid
except Exception:
uuid_str = None
try:
purge_source = uploaded_dataset.purge_source
except Exception:
purge_source = True
try:
user_ftp_dir = os.path.abspath(trans.user_ftp_dir)
except Exception:
user_ftp_dir = None
if user_ftp_dir and uploaded_dataset.path.startswith(user_ftp_dir):
uploaded_dataset.type = "ftp_import"
json = dict(
file_type=uploaded_dataset.file_type,
ext=uploaded_dataset.ext,
name=uploaded_dataset.name,
dataset_id=data.dataset.id,
dbkey=uploaded_dataset.dbkey,
type=uploaded_dataset.type,
is_binary=is_binary,
link_data_only=link_data_only,
uuid=uuid_str,
to_posix_lines=getattr(uploaded_dataset, "to_posix_lines", True),
auto_decompress=getattr(uploaded_dataset, "auto_decompress", True),
purge_source=purge_source,
space_to_tab=uploaded_dataset.space_to_tab,
in_place=trans.app.config.external_chown_script is None,
check_content=trans.app.config.check_upload_content,
path=uploaded_dataset.path,
)
# TODO: This will have to change when we start bundling inputs.
# Also, in_place above causes the file to be left behind since the
# user cannot remove it unless the parent directory is writable.
if (
link_data_only == "copy_files"
and trans.app.config.external_chown_script
):
_chown(uploaded_dataset.path)
json_file.write(dumps(json) + "\n")
json_file.close()
if trans.app.config.external_chown_script:
_chown(json_file_path)
return json_file_path
|
def create_paramfile(trans, uploaded_datasets):
"""
Create the upload tool's JSON "param" file.
"""
def _chown(path):
try:
# get username from email/username
pwent = trans.user.system_user_pwent(trans.app.config.real_system_username)
cmd = shlex.split(trans.app.config.external_chown_script)
cmd.extend([path, pwent[0], str(pwent[3])])
log.debug("Changing ownership of %s with: %s" % (path, " ".join(cmd)))
p = subprocess.Popen(
cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
assert p.returncode == 0, stderr
except Exception as e:
log.warning(
"Changing ownership of uploaded file %s failed: %s" % (path, str(e))
)
# TODO: json_file should go in the working directory
json_file = tempfile.mkstemp()
json_file_path = json_file[1]
json_file = os.fdopen(json_file[0], "w")
for uploaded_dataset in uploaded_datasets:
data = uploaded_dataset.data
if uploaded_dataset.type == "composite":
# we need to init metadata before the job is dispatched
data.init_meta()
for meta_name, meta_value in uploaded_dataset.metadata.items():
setattr(data.metadata, meta_name, meta_value)
trans.sa_session.add(data)
trans.sa_session.flush()
json = dict(
file_type=uploaded_dataset.file_type,
dataset_id=data.dataset.id,
dbkey=uploaded_dataset.dbkey,
type=uploaded_dataset.type,
metadata=uploaded_dataset.metadata,
primary_file=uploaded_dataset.primary_file,
composite_file_paths=uploaded_dataset.composite_files,
composite_files=dict(
(k, v.__dict__)
for k, v in data.datatype.get_composite_files(data).items()
),
)
else:
try:
is_binary = uploaded_dataset.datatype.is_binary
except:
is_binary = None
try:
link_data_only = uploaded_dataset.link_data_only
except:
link_data_only = "copy_files"
try:
uuid_str = uploaded_dataset.uuid
except:
uuid_str = None
try:
purge_source = uploaded_dataset.purge_source
except:
purge_source = True
json = dict(
file_type=uploaded_dataset.file_type,
ext=uploaded_dataset.ext,
name=uploaded_dataset.name,
dataset_id=data.dataset.id,
dbkey=uploaded_dataset.dbkey,
type=uploaded_dataset.type,
is_binary=is_binary,
link_data_only=link_data_only,
uuid=uuid_str,
to_posix_lines=getattr(uploaded_dataset, "to_posix_lines", True),
auto_decompress=getattr(uploaded_dataset, "auto_decompress", True),
purge_source=purge_source,
space_to_tab=uploaded_dataset.space_to_tab,
in_place=trans.app.config.external_chown_script is None,
check_content=trans.app.config.check_upload_content,
path=uploaded_dataset.path,
)
# TODO: This will have to change when we start bundling inputs.
# Also, in_place above causes the file to be left behind since the
# user cannot remove it unless the parent directory is writable.
if (
link_data_only == "copy_files"
and trans.app.config.external_chown_script
):
_chown(uploaded_dataset.path)
json_file.write(dumps(json) + "\n")
json_file.close()
if trans.app.config.external_chown_script:
_chown(json_file_path)
return json_file_path
|
https://github.com/galaxyproject/galaxy/issues/4300
|
Traceback (most recent call last):
File "/gpfs1/data/galaxy_server/galaxy-dev/tools/data_source/upload.py", line 425, in <module>
__main__()
File "/gpfs1/data/galaxy_server/galaxy-dev/tools/data_source/upload.py", line 413, in __main__
add_file( dataset, registry, json_file, output_path )
File "/gpfs1/data/galaxy_server/galaxy-dev/tools/data_source/upload.py", line 325, in add_file
shutil.move( dataset.path, output_path )
File "/global/apps/bioinf/galaxy/bin/Python-2.7.13/lib/python2.7/shutil.py", line 303, in move
os.unlink(src)
OSError: [Errno 13] Permission denied: '/gpfs1/data/galaxy_server/galaxy-dev/database/tmp/strio_url_paste_aqTRvr'
|
OSError
|
def add_file(dataset, registry, json_file, output_path):
data_type = None
line_count = None
converted_path = None
stdout = None
link_data_only = dataset.get("link_data_only", "copy_files")
run_as_real_user = in_place = dataset.get("in_place", True)
purge_source = dataset.get("purge_source", True)
# in_place is True if there is no external chmod in place,
# however there are other instances where modifications should not occur in_place:
# when a file is added from a directory on the local file system (ftp import folder or any other path).
if dataset.type in ("server_dir", "path_paste", "ftp_import"):
in_place = False
check_content = dataset.get("check_content", True)
auto_decompress = dataset.get("auto_decompress", True)
try:
ext = dataset.file_type
except AttributeError:
file_err(
"Unable to process uploaded file, missing file_type parameter.",
dataset,
json_file,
)
return
if dataset.type == "url":
try:
page = urlopen(dataset.path) # page will be .close()ed by sniff methods
temp_name, dataset.is_multi_byte = sniff.stream_to_file(
page,
prefix="url_paste",
source_encoding=util.get_charset_from_http_headers(page.headers),
)
except Exception as e:
file_err(
"Unable to fetch %s\n%s" % (dataset.path, str(e)), dataset, json_file
)
return
dataset.path = temp_name
# See if we have an empty file
if not os.path.exists(dataset.path):
file_err(
"Uploaded temporary file (%s) does not exist." % dataset.path,
dataset,
json_file,
)
return
if not os.path.getsize(dataset.path) > 0:
file_err("The uploaded file is empty", dataset, json_file)
return
if not dataset.type == "url":
# Already set is_multi_byte above if type == 'url'
try:
dataset.is_multi_byte = multi_byte.is_multi_byte(
codecs.open(dataset.path, "r", "utf-8").read(100)
)
except UnicodeDecodeError as e:
dataset.is_multi_byte = False
# Is dataset an image?
i_ext = get_image_ext(dataset.path)
if i_ext:
ext = i_ext
data_type = ext
# Is dataset content multi-byte?
elif dataset.is_multi_byte:
data_type = "multi-byte char"
ext = sniff.guess_ext(dataset.path, registry.sniff_order, is_multi_byte=True)
# Is dataset content supported sniffable binary?
else:
# FIXME: This ignores the declared sniff order in datatype_conf.xml
# resulting in improper behavior
type_info = Binary.is_sniffable_binary(dataset.path)
if type_info:
data_type = type_info[0]
ext = type_info[1]
if not data_type:
root_datatype = registry.get_datatype_by_extension(dataset.file_type)
if getattr(root_datatype, "compressed", False):
data_type = "compressed archive"
ext = dataset.file_type
else:
# See if we have a gzipped file, which, if it passes our restrictions, we'll uncompress
is_gzipped, is_valid = check_gzip(dataset.path, check_content=check_content)
if is_gzipped and not is_valid:
file_err(
"The gzipped uploaded file contains inappropriate content",
dataset,
json_file,
)
return
elif is_gzipped and is_valid and auto_decompress:
if link_data_only == "copy_files":
# We need to uncompress the temp_name file, but BAM files must remain compressed in the BGZF format
CHUNK_SIZE = 2**20 # 1Mb
fd, uncompressed = tempfile.mkstemp(
prefix="data_id_%s_upload_gunzip_" % dataset.dataset_id,
dir=os.path.dirname(output_path),
text=False,
)
gzipped_file = gzip.GzipFile(dataset.path, "rb")
while 1:
try:
chunk = gzipped_file.read(CHUNK_SIZE)
except IOError:
os.close(fd)
os.remove(uncompressed)
file_err(
"Problem decompressing gzipped data", dataset, json_file
)
return
if not chunk:
break
os.write(fd, chunk)
os.close(fd)
gzipped_file.close()
# Replace the gzipped file with the decompressed file if it's safe to do so
if not in_place:
dataset.path = uncompressed
else:
shutil.move(uncompressed, dataset.path)
os.chmod(dataset.path, 0o644)
dataset.name = dataset.name.rstrip(".gz")
data_type = "gzip"
if not data_type:
# See if we have a bz2 file, much like gzip
is_bzipped, is_valid = check_bz2(dataset.path, check_content)
if is_bzipped and not is_valid:
file_err(
"The gzipped uploaded file contains inappropriate content",
dataset,
json_file,
)
return
elif is_bzipped and is_valid and auto_decompress:
if link_data_only == "copy_files":
# We need to uncompress the temp_name file
CHUNK_SIZE = 2**20 # 1Mb
fd, uncompressed = tempfile.mkstemp(
prefix="data_id_%s_upload_bunzip2_" % dataset.dataset_id,
dir=os.path.dirname(output_path),
text=False,
)
bzipped_file = bz2.BZ2File(dataset.path, "rb")
while 1:
try:
chunk = bzipped_file.read(CHUNK_SIZE)
except IOError:
os.close(fd)
os.remove(uncompressed)
file_err(
"Problem decompressing bz2 compressed data",
dataset,
json_file,
)
return
if not chunk:
break
os.write(fd, chunk)
os.close(fd)
bzipped_file.close()
# Replace the bzipped file with the decompressed file if it's safe to do so
if not in_place:
dataset.path = uncompressed
else:
shutil.move(uncompressed, dataset.path)
os.chmod(dataset.path, 0o644)
dataset.name = dataset.name.rstrip(".bz2")
data_type = "bz2"
if not data_type:
# See if we have a zip archive
is_zipped = check_zip(dataset.path)
if is_zipped and auto_decompress:
if link_data_only == "copy_files":
CHUNK_SIZE = 2**20 # 1Mb
uncompressed = None
uncompressed_name = None
unzipped = False
z = zipfile.ZipFile(dataset.path)
for name in z.namelist():
if name.endswith("/"):
continue
if unzipped:
stdout = "ZIP file contained more than one file, only the first file was added to Galaxy."
break
fd, uncompressed = tempfile.mkstemp(
prefix="data_id_%s_upload_zip_" % dataset.dataset_id,
dir=os.path.dirname(output_path),
text=False,
)
if sys.version_info[:2] >= (2, 6):
zipped_file = z.open(name)
while 1:
try:
chunk = zipped_file.read(CHUNK_SIZE)
except IOError:
os.close(fd)
os.remove(uncompressed)
file_err(
"Problem decompressing zipped data",
dataset,
json_file,
)
return
if not chunk:
break
os.write(fd, chunk)
os.close(fd)
zipped_file.close()
uncompressed_name = name
unzipped = True
else:
# python < 2.5 doesn't have a way to read members in chunks(!)
try:
outfile = open(uncompressed, "wb")
outfile.write(z.read(name))
outfile.close()
uncompressed_name = name
unzipped = True
except IOError:
os.close(fd)
os.remove(uncompressed)
file_err(
"Problem decompressing zipped data",
dataset,
json_file,
)
return
z.close()
# Replace the zipped file with the decompressed file if it's safe to do so
if uncompressed is not None:
if not in_place:
dataset.path = uncompressed
else:
shutil.move(uncompressed, dataset.path)
os.chmod(dataset.path, 0o644)
dataset.name = uncompressed_name
data_type = "zip"
if not data_type:
# TODO refactor this logic. check_binary isn't guaranteed to be
# correct since it only looks at whether the first 100 chars are
# printable or not. If someone specifies a known unsniffable
# binary datatype and check_binary fails, the file gets mangled.
if check_binary(dataset.path) or Binary.is_ext_unsniffable(
dataset.file_type
):
# We have a binary dataset, but it is not Bam, Sff or Pdf
data_type = "binary"
# binary_ok = False
parts = dataset.name.split(".")
if len(parts) > 1:
ext = parts[-1].strip().lower()
if check_content and not Binary.is_ext_unsniffable(ext):
file_err(
"The uploaded binary file contains inappropriate content",
dataset,
json_file,
)
return
elif (
Binary.is_ext_unsniffable(ext) and dataset.file_type != ext
):
err_msg = (
"You must manually set the 'File Format' to '%s' when uploading %s files."
% (ext.capitalize(), ext)
)
file_err(err_msg, dataset, json_file)
return
if not data_type:
# We must have a text file
if check_content and check_html(dataset.path):
file_err(
"The uploaded file contains inappropriate HTML content",
dataset,
json_file,
)
return
if data_type != "binary":
if link_data_only == "copy_files" and data_type not in (
"gzip",
"bz2",
"zip",
):
# Convert universal line endings to Posix line endings if to_posix_lines is True
# and the data is not binary or gzip-, bz2- or zip-compressed.
if dataset.to_posix_lines:
tmpdir = output_adjacent_tmpdir(output_path)
tmp_prefix = "data_id_%s_convert_" % dataset.dataset_id
if dataset.space_to_tab:
line_count, converted_path = (
sniff.convert_newlines_sep2tabs(
dataset.path,
in_place=in_place,
tmp_dir=tmpdir,
tmp_prefix=tmp_prefix,
)
)
else:
line_count, converted_path = sniff.convert_newlines(
dataset.path,
in_place=in_place,
tmp_dir=tmpdir,
tmp_prefix=tmp_prefix,
)
if dataset.file_type == "auto":
ext = sniff.guess_ext(dataset.path, registry.sniff_order)
else:
ext = dataset.file_type
data_type = ext
# Save job info for the framework
if ext == "auto" and data_type == "binary":
ext = "data"
if ext == "auto" and dataset.ext:
ext = dataset.ext
if ext == "auto":
ext = "data"
datatype = registry.get_datatype_by_extension(ext)
if (
dataset.type in ("server_dir", "path_paste")
and link_data_only == "link_to_files"
):
# Never alter a file that will not be copied to Galaxy's local file store.
if datatype.dataset_content_needs_grooming(dataset.path):
err_msg = (
"The uploaded files need grooming, so change your <b>Copy data into Galaxy?</b> selection to be "
+ "<b>Copy files into Galaxy</b> instead of <b>Link to files without copying into Galaxy</b> so grooming can be performed."
)
file_err(err_msg, dataset, json_file)
return
if link_data_only == "copy_files" and converted_path:
# Move the dataset to its "real" path
try:
shutil.move(converted_path, output_path)
except OSError as e:
# We may not have permission to remove converted_path
if e.errno != errno.EACCES:
raise
elif link_data_only == "copy_files":
if purge_source and not run_as_real_user:
# if the upload tool runs as a real user the real user
# can't move dataset.path as this path is owned by galaxy.
shutil.move(dataset.path, output_path)
else:
shutil.copy(dataset.path, output_path)
# Write the job info
stdout = stdout or "uploaded %s file" % data_type
info = dict(
type="dataset",
dataset_id=dataset.dataset_id,
ext=ext,
stdout=stdout,
name=dataset.name,
line_count=line_count,
)
if dataset.get("uuid", None) is not None:
info["uuid"] = dataset.get("uuid")
json_file.write(dumps(info) + "\n")
if (
link_data_only == "copy_files"
and datatype
and datatype.dataset_content_needs_grooming(output_path)
):
# Groom the dataset content if necessary
datatype.groom_dataset_content(output_path)
|
def add_file(dataset, registry, json_file, output_path):
data_type = None
line_count = None
converted_path = None
stdout = None
link_data_only = dataset.get("link_data_only", "copy_files")
in_place = dataset.get("in_place", True)
purge_source = dataset.get("purge_source", True)
check_content = dataset.get("check_content", True)
auto_decompress = dataset.get("auto_decompress", True)
try:
ext = dataset.file_type
except AttributeError:
file_err(
"Unable to process uploaded file, missing file_type parameter.",
dataset,
json_file,
)
return
if dataset.type == "url":
try:
page = urlopen(dataset.path) # page will be .close()ed by sniff methods
temp_name, dataset.is_multi_byte = sniff.stream_to_file(
page,
prefix="url_paste",
source_encoding=util.get_charset_from_http_headers(page.headers),
)
except Exception as e:
file_err(
"Unable to fetch %s\n%s" % (dataset.path, str(e)), dataset, json_file
)
return
dataset.path = temp_name
# See if we have an empty file
if not os.path.exists(dataset.path):
file_err(
"Uploaded temporary file (%s) does not exist." % dataset.path,
dataset,
json_file,
)
return
if not os.path.getsize(dataset.path) > 0:
file_err("The uploaded file is empty", dataset, json_file)
return
if not dataset.type == "url":
# Already set is_multi_byte above if type == 'url'
try:
dataset.is_multi_byte = multi_byte.is_multi_byte(
codecs.open(dataset.path, "r", "utf-8").read(100)
)
except UnicodeDecodeError as e:
dataset.is_multi_byte = False
# Is dataset an image?
i_ext = get_image_ext(dataset.path)
if i_ext:
ext = i_ext
data_type = ext
# Is dataset content multi-byte?
elif dataset.is_multi_byte:
data_type = "multi-byte char"
ext = sniff.guess_ext(dataset.path, registry.sniff_order, is_multi_byte=True)
# Is dataset content supported sniffable binary?
else:
# FIXME: This ignores the declared sniff order in datatype_conf.xml
# resulting in improper behavior
type_info = Binary.is_sniffable_binary(dataset.path)
if type_info:
data_type = type_info[0]
ext = type_info[1]
if not data_type:
root_datatype = registry.get_datatype_by_extension(dataset.file_type)
if getattr(root_datatype, "compressed", False):
data_type = "compressed archive"
ext = dataset.file_type
else:
# See if we have a gzipped file, which, if it passes our restrictions, we'll uncompress
is_gzipped, is_valid = check_gzip(dataset.path, check_content=check_content)
if is_gzipped and not is_valid:
file_err(
"The gzipped uploaded file contains inappropriate content",
dataset,
json_file,
)
return
elif is_gzipped and is_valid and auto_decompress:
if link_data_only == "copy_files":
# We need to uncompress the temp_name file, but BAM files must remain compressed in the BGZF format
CHUNK_SIZE = 2**20 # 1Mb
fd, uncompressed = tempfile.mkstemp(
prefix="data_id_%s_upload_gunzip_" % dataset.dataset_id,
dir=os.path.dirname(output_path),
text=False,
)
gzipped_file = gzip.GzipFile(dataset.path, "rb")
while 1:
try:
chunk = gzipped_file.read(CHUNK_SIZE)
except IOError:
os.close(fd)
os.remove(uncompressed)
file_err(
"Problem decompressing gzipped data", dataset, json_file
)
return
if not chunk:
break
os.write(fd, chunk)
os.close(fd)
gzipped_file.close()
# Replace the gzipped file with the decompressed file if it's safe to do so
if dataset.type in ("server_dir", "path_paste") or not in_place:
dataset.path = uncompressed
else:
shutil.move(uncompressed, dataset.path)
os.chmod(dataset.path, 0o644)
dataset.name = dataset.name.rstrip(".gz")
data_type = "gzip"
if not data_type:
# See if we have a bz2 file, much like gzip
is_bzipped, is_valid = check_bz2(dataset.path, check_content)
if is_bzipped and not is_valid:
file_err(
"The gzipped uploaded file contains inappropriate content",
dataset,
json_file,
)
return
elif is_bzipped and is_valid and auto_decompress:
if link_data_only == "copy_files":
# We need to uncompress the temp_name file
CHUNK_SIZE = 2**20 # 1Mb
fd, uncompressed = tempfile.mkstemp(
prefix="data_id_%s_upload_bunzip2_" % dataset.dataset_id,
dir=os.path.dirname(output_path),
text=False,
)
bzipped_file = bz2.BZ2File(dataset.path, "rb")
while 1:
try:
chunk = bzipped_file.read(CHUNK_SIZE)
except IOError:
os.close(fd)
os.remove(uncompressed)
file_err(
"Problem decompressing bz2 compressed data",
dataset,
json_file,
)
return
if not chunk:
break
os.write(fd, chunk)
os.close(fd)
bzipped_file.close()
# Replace the bzipped file with the decompressed file if it's safe to do so
if dataset.type in ("server_dir", "path_paste") or not in_place:
dataset.path = uncompressed
else:
shutil.move(uncompressed, dataset.path)
os.chmod(dataset.path, 0o644)
dataset.name = dataset.name.rstrip(".bz2")
data_type = "bz2"
if not data_type:
# See if we have a zip archive
is_zipped = check_zip(dataset.path)
if is_zipped and auto_decompress:
if link_data_only == "copy_files":
CHUNK_SIZE = 2**20 # 1Mb
uncompressed = None
uncompressed_name = None
unzipped = False
z = zipfile.ZipFile(dataset.path)
for name in z.namelist():
if name.endswith("/"):
continue
if unzipped:
stdout = "ZIP file contained more than one file, only the first file was added to Galaxy."
break
fd, uncompressed = tempfile.mkstemp(
prefix="data_id_%s_upload_zip_" % dataset.dataset_id,
dir=os.path.dirname(output_path),
text=False,
)
if sys.version_info[:2] >= (2, 6):
zipped_file = z.open(name)
while 1:
try:
chunk = zipped_file.read(CHUNK_SIZE)
except IOError:
os.close(fd)
os.remove(uncompressed)
file_err(
"Problem decompressing zipped data",
dataset,
json_file,
)
return
if not chunk:
break
os.write(fd, chunk)
os.close(fd)
zipped_file.close()
uncompressed_name = name
unzipped = True
else:
# python < 2.5 doesn't have a way to read members in chunks(!)
try:
outfile = open(uncompressed, "wb")
outfile.write(z.read(name))
outfile.close()
uncompressed_name = name
unzipped = True
except IOError:
os.close(fd)
os.remove(uncompressed)
file_err(
"Problem decompressing zipped data",
dataset,
json_file,
)
return
z.close()
# Replace the zipped file with the decompressed file if it's safe to do so
if uncompressed is not None:
if (
dataset.type in ("server_dir", "path_paste")
or not in_place
):
dataset.path = uncompressed
else:
shutil.move(uncompressed, dataset.path)
os.chmod(dataset.path, 0o644)
dataset.name = uncompressed_name
data_type = "zip"
if not data_type:
# TODO refactor this logic. check_binary isn't guaranteed to be
# correct since it only looks at whether the first 100 chars are
# printable or not. If someone specifies a known unsniffable
# binary datatype and check_binary fails, the file gets mangled.
if check_binary(dataset.path) or Binary.is_ext_unsniffable(
dataset.file_type
):
# We have a binary dataset, but it is not Bam, Sff or Pdf
data_type = "binary"
# binary_ok = False
parts = dataset.name.split(".")
if len(parts) > 1:
ext = parts[-1].strip().lower()
if check_content and not Binary.is_ext_unsniffable(ext):
file_err(
"The uploaded binary file contains inappropriate content",
dataset,
json_file,
)
return
elif (
Binary.is_ext_unsniffable(ext) and dataset.file_type != ext
):
err_msg = (
"You must manually set the 'File Format' to '%s' when uploading %s files."
% (ext.capitalize(), ext)
)
file_err(err_msg, dataset, json_file)
return
if not data_type:
# We must have a text file
if check_content and check_html(dataset.path):
file_err(
"The uploaded file contains inappropriate HTML content",
dataset,
json_file,
)
return
if data_type != "binary":
if link_data_only == "copy_files":
if dataset.type in (
"server_dir",
"path_paste",
) and data_type not in ["gzip", "bz2", "zip"]:
in_place = False
# Convert universal line endings to Posix line endings, but allow the user to turn it off,
# so that is becomes possible to upload gzip, bz2 or zip files with binary data without
# corrupting the content of those files.
if dataset.to_posix_lines:
tmpdir = output_adjacent_tmpdir(output_path)
tmp_prefix = "data_id_%s_convert_" % dataset.dataset_id
if dataset.space_to_tab:
line_count, converted_path = (
sniff.convert_newlines_sep2tabs(
dataset.path,
in_place=in_place,
tmp_dir=tmpdir,
tmp_prefix=tmp_prefix,
)
)
else:
line_count, converted_path = sniff.convert_newlines(
dataset.path,
in_place=in_place,
tmp_dir=tmpdir,
tmp_prefix=tmp_prefix,
)
if dataset.file_type == "auto":
ext = sniff.guess_ext(dataset.path, registry.sniff_order)
else:
ext = dataset.file_type
data_type = ext
# Save job info for the framework
if ext == "auto" and data_type == "binary":
ext = "data"
if ext == "auto" and dataset.ext:
ext = dataset.ext
if ext == "auto":
ext = "data"
datatype = registry.get_datatype_by_extension(ext)
if (
dataset.type in ("server_dir", "path_paste")
and link_data_only == "link_to_files"
):
# Never alter a file that will not be copied to Galaxy's local file store.
if datatype.dataset_content_needs_grooming(dataset.path):
err_msg = (
"The uploaded files need grooming, so change your <b>Copy data into Galaxy?</b> selection to be "
+ "<b>Copy files into Galaxy</b> instead of <b>Link to files without copying into Galaxy</b> so grooming can be performed."
)
file_err(err_msg, dataset, json_file)
return
if (
link_data_only == "copy_files"
and dataset.type in ("server_dir", "path_paste")
and data_type not in ["gzip", "bz2", "zip"]
):
# Move the dataset to its "real" path
if converted_path is not None:
shutil.copy(converted_path, output_path)
try:
os.remove(converted_path)
except:
pass
else:
# This should not happen, but it's here just in case
shutil.copy(dataset.path, output_path)
elif link_data_only == "copy_files":
if purge_source:
shutil.move(dataset.path, output_path)
else:
shutil.copy(dataset.path, output_path)
# Write the job info
stdout = stdout or "uploaded %s file" % data_type
info = dict(
type="dataset",
dataset_id=dataset.dataset_id,
ext=ext,
stdout=stdout,
name=dataset.name,
line_count=line_count,
)
if dataset.get("uuid", None) is not None:
info["uuid"] = dataset.get("uuid")
json_file.write(dumps(info) + "\n")
if (
link_data_only == "copy_files"
and datatype
and datatype.dataset_content_needs_grooming(output_path)
):
# Groom the dataset content if necessary
datatype.groom_dataset_content(output_path)
|
https://github.com/galaxyproject/galaxy/issues/4300
|
Traceback (most recent call last):
File "/gpfs1/data/galaxy_server/galaxy-dev/tools/data_source/upload.py", line 425, in <module>
__main__()
File "/gpfs1/data/galaxy_server/galaxy-dev/tools/data_source/upload.py", line 413, in __main__
add_file( dataset, registry, json_file, output_path )
File "/gpfs1/data/galaxy_server/galaxy-dev/tools/data_source/upload.py", line 325, in add_file
shutil.move( dataset.path, output_path )
File "/global/apps/bioinf/galaxy/bin/Python-2.7.13/lib/python2.7/shutil.py", line 303, in move
os.unlink(src)
OSError: [Errno 13] Permission denied: '/gpfs1/data/galaxy_server/galaxy-dev/database/tmp/strio_url_paste_aqTRvr'
|
OSError
|
def add_file(dataset, registry, json_file, output_path):
data_type = None
line_count = None
converted_path = None
stdout = None
link_data_only = dataset.get("link_data_only", "copy_files")
run_as_real_user = in_place = dataset.get("in_place", True)
purge_source = dataset.get("purge_source", True)
# in_place is True if there is no external chmod in place,
# however there are other instances where modifications should not occur in_place:
# when a file is added from a directory on the local file system (ftp import folder or any other path).
if dataset.type in ("server_dir", "path_paste", "ftp_import"):
in_place = False
check_content = dataset.get("check_content", True)
auto_decompress = dataset.get("auto_decompress", True)
try:
ext = dataset.file_type
except AttributeError:
file_err(
"Unable to process uploaded file, missing file_type parameter.",
dataset,
json_file,
)
return
if dataset.type == "url":
try:
page = urlopen(dataset.path) # page will be .close()ed by sniff methods
temp_name, dataset.is_multi_byte = sniff.stream_to_file(
page,
prefix="url_paste",
source_encoding=util.get_charset_from_http_headers(page.headers),
)
except Exception as e:
file_err(
"Unable to fetch %s\n%s" % (dataset.path, str(e)), dataset, json_file
)
return
dataset.path = temp_name
# See if we have an empty file
if not os.path.exists(dataset.path):
file_err(
"Uploaded temporary file (%s) does not exist." % dataset.path,
dataset,
json_file,
)
return
if not os.path.getsize(dataset.path) > 0:
file_err("The uploaded file is empty", dataset, json_file)
return
if not dataset.type == "url":
# Already set is_multi_byte above if type == 'url'
try:
dataset.is_multi_byte = multi_byte.is_multi_byte(
codecs.open(dataset.path, "r", "utf-8").read(100)
)
except UnicodeDecodeError as e:
dataset.is_multi_byte = False
# Is dataset an image?
i_ext = get_image_ext(dataset.path)
if i_ext:
ext = i_ext
data_type = ext
# Is dataset content multi-byte?
elif dataset.is_multi_byte:
data_type = "multi-byte char"
ext = sniff.guess_ext(dataset.path, registry.sniff_order, is_multi_byte=True)
# Is dataset content supported sniffable binary?
else:
# FIXME: This ignores the declared sniff order in datatype_conf.xml
# resulting in improper behavior
type_info = Binary.is_sniffable_binary(dataset.path)
if type_info:
data_type = type_info[0]
ext = type_info[1]
if not data_type:
root_datatype = registry.get_datatype_by_extension(dataset.file_type)
if getattr(root_datatype, "compressed", False):
data_type = "compressed archive"
ext = dataset.file_type
else:
# See if we have a gzipped file, which, if it passes our restrictions, we'll uncompress
is_gzipped, is_valid = check_gzip(dataset.path, check_content=check_content)
if is_gzipped and not is_valid:
file_err(
"The gzipped uploaded file contains inappropriate content",
dataset,
json_file,
)
return
elif is_gzipped and is_valid and auto_decompress:
if link_data_only == "copy_files":
# We need to uncompress the temp_name file, but BAM files must remain compressed in the BGZF format
CHUNK_SIZE = 2**20 # 1Mb
fd, uncompressed = tempfile.mkstemp(
prefix="data_id_%s_upload_gunzip_" % dataset.dataset_id,
dir=os.path.dirname(output_path),
text=False,
)
gzipped_file = gzip.GzipFile(dataset.path, "rb")
while 1:
try:
chunk = gzipped_file.read(CHUNK_SIZE)
except IOError:
os.close(fd)
os.remove(uncompressed)
file_err(
"Problem decompressing gzipped data", dataset, json_file
)
return
if not chunk:
break
os.write(fd, chunk)
os.close(fd)
gzipped_file.close()
# Replace the gzipped file with the decompressed file if it's safe to do so
if not in_place:
dataset.path = uncompressed
else:
shutil.move(uncompressed, dataset.path)
os.chmod(dataset.path, 0o644)
dataset.name = dataset.name.rstrip(".gz")
data_type = "gzip"
if not data_type and bz2 is not None:
# See if we have a bz2 file, much like gzip
is_bzipped, is_valid = check_bz2(dataset.path, check_content)
if is_bzipped and not is_valid:
file_err(
"The gzipped uploaded file contains inappropriate content",
dataset,
json_file,
)
return
elif is_bzipped and is_valid and auto_decompress:
if link_data_only == "copy_files":
# We need to uncompress the temp_name file
CHUNK_SIZE = 2**20 # 1Mb
fd, uncompressed = tempfile.mkstemp(
prefix="data_id_%s_upload_bunzip2_" % dataset.dataset_id,
dir=os.path.dirname(output_path),
text=False,
)
bzipped_file = bz2.BZ2File(dataset.path, "rb")
while 1:
try:
chunk = bzipped_file.read(CHUNK_SIZE)
except IOError:
os.close(fd)
os.remove(uncompressed)
file_err(
"Problem decompressing bz2 compressed data",
dataset,
json_file,
)
return
if not chunk:
break
os.write(fd, chunk)
os.close(fd)
bzipped_file.close()
# Replace the bzipped file with the decompressed file if it's safe to do so
if not in_place:
dataset.path = uncompressed
else:
shutil.move(uncompressed, dataset.path)
os.chmod(dataset.path, 0o644)
dataset.name = dataset.name.rstrip(".bz2")
data_type = "bz2"
if not data_type:
# See if we have a zip archive
is_zipped = check_zip(dataset.path)
if is_zipped and auto_decompress:
if link_data_only == "copy_files":
CHUNK_SIZE = 2**20 # 1Mb
uncompressed = None
uncompressed_name = None
unzipped = False
z = zipfile.ZipFile(dataset.path)
for name in z.namelist():
if name.endswith("/"):
continue
if unzipped:
stdout = "ZIP file contained more than one file, only the first file was added to Galaxy."
break
fd, uncompressed = tempfile.mkstemp(
prefix="data_id_%s_upload_zip_" % dataset.dataset_id,
dir=os.path.dirname(output_path),
text=False,
)
if sys.version_info[:2] >= (2, 6):
zipped_file = z.open(name)
while 1:
try:
chunk = zipped_file.read(CHUNK_SIZE)
except IOError:
os.close(fd)
os.remove(uncompressed)
file_err(
"Problem decompressing zipped data",
dataset,
json_file,
)
return
if not chunk:
break
os.write(fd, chunk)
os.close(fd)
zipped_file.close()
uncompressed_name = name
unzipped = True
else:
# python < 2.5 doesn't have a way to read members in chunks(!)
try:
outfile = open(uncompressed, "wb")
outfile.write(z.read(name))
outfile.close()
uncompressed_name = name
unzipped = True
except IOError:
os.close(fd)
os.remove(uncompressed)
file_err(
"Problem decompressing zipped data",
dataset,
json_file,
)
return
z.close()
# Replace the zipped file with the decompressed file if it's safe to do so
if uncompressed is not None:
if not in_place:
dataset.path = uncompressed
else:
shutil.move(uncompressed, dataset.path)
os.chmod(dataset.path, 0o644)
dataset.name = uncompressed_name
data_type = "zip"
if not data_type:
# TODO refactor this logic. check_binary isn't guaranteed to be
# correct since it only looks at whether the first 100 chars are
# printable or not. If someone specifies a known unsniffable
# binary datatype and check_binary fails, the file gets mangled.
if check_binary(dataset.path) or Binary.is_ext_unsniffable(
dataset.file_type
):
# We have a binary dataset, but it is not Bam, Sff or Pdf
data_type = "binary"
# binary_ok = False
parts = dataset.name.split(".")
if len(parts) > 1:
ext = parts[-1].strip().lower()
if check_content and not Binary.is_ext_unsniffable(ext):
file_err(
"The uploaded binary file contains inappropriate content",
dataset,
json_file,
)
return
elif (
Binary.is_ext_unsniffable(ext) and dataset.file_type != ext
):
err_msg = (
"You must manually set the 'File Format' to '%s' when uploading %s files."
% (ext.capitalize(), ext)
)
file_err(err_msg, dataset, json_file)
return
if not data_type:
# We must have a text file
if check_content and check_html(dataset.path):
file_err(
"The uploaded file contains inappropriate HTML content",
dataset,
json_file,
)
return
if data_type != "binary":
if link_data_only == "copy_files" and data_type not in (
"gzip",
"bz2",
"zip",
):
# Convert universal line endings to Posix line endings if to_posix_lines is True
# and the data is not binary or gzip-, bz2- or zip-compressed.
if dataset.to_posix_lines:
tmpdir = output_adjacent_tmpdir(output_path)
tmp_prefix = "data_id_%s_convert_" % dataset.dataset_id
if dataset.space_to_tab:
line_count, converted_path = (
sniff.convert_newlines_sep2tabs(
dataset.path,
in_place=in_place,
tmp_dir=tmpdir,
tmp_prefix=tmp_prefix,
)
)
else:
line_count, converted_path = sniff.convert_newlines(
dataset.path,
in_place=in_place,
tmp_dir=tmpdir,
tmp_prefix=tmp_prefix,
)
if dataset.file_type == "auto":
ext = sniff.guess_ext(dataset.path, registry.sniff_order)
else:
ext = dataset.file_type
data_type = ext
# Save job info for the framework
if ext == "auto" and data_type == "binary":
ext = "data"
if ext == "auto" and dataset.ext:
ext = dataset.ext
if ext == "auto":
ext = "data"
datatype = registry.get_datatype_by_extension(ext)
if (
dataset.type in ("server_dir", "path_paste")
and link_data_only == "link_to_files"
):
# Never alter a file that will not be copied to Galaxy's local file store.
if datatype.dataset_content_needs_grooming(dataset.path):
err_msg = (
"The uploaded files need grooming, so change your <b>Copy data into Galaxy?</b> selection to be "
+ "<b>Copy files into Galaxy</b> instead of <b>Link to files without copying into Galaxy</b> so grooming can be performed."
)
file_err(err_msg, dataset, json_file)
return
if link_data_only == "copy_files" and converted_path:
# Move the dataset to its "real" path
try:
shutil.move(converted_path, output_path)
except OSError as e:
# We may not have permission to remove converted_path
if e.errno != errno.EACCES:
raise
elif link_data_only == "copy_files":
if purge_source and not run_as_real_user:
# if the upload tool runs as a real user the real user
# can't move dataset.path as this path is owned by galaxy.
shutil.move(dataset.path, output_path)
else:
shutil.copy(dataset.path, output_path)
# Write the job info
stdout = stdout or "uploaded %s file" % data_type
info = dict(
type="dataset",
dataset_id=dataset.dataset_id,
ext=ext,
stdout=stdout,
name=dataset.name,
line_count=line_count,
)
if dataset.get("uuid", None) is not None:
info["uuid"] = dataset.get("uuid")
json_file.write(dumps(info) + "\n")
if (
link_data_only == "copy_files"
and datatype
and datatype.dataset_content_needs_grooming(output_path)
):
# Groom the dataset content if necessary
datatype.groom_dataset_content(output_path)
|
def add_file(dataset, registry, json_file, output_path):
data_type = None
line_count = None
converted_path = None
stdout = None
link_data_only = dataset.get("link_data_only", "copy_files")
in_place = dataset.get("in_place", True)
purge_source = dataset.get("purge_source", True)
# in_place is True if there is no external chmod in place,
# however there are other instances where modifications should not occur in_place:
# in-place unpacking or editing of line-ending when linking in data or when
# importing data from the FTP folder while purge_source is set to false
if not purge_source and dataset.get("type") == "ftp_import":
# If we do not purge the source we should not modify it in place.
in_place = False
if dataset.type in ("server_dir", "path_paste"):
in_place = False
check_content = dataset.get("check_content", True)
auto_decompress = dataset.get("auto_decompress", True)
try:
ext = dataset.file_type
except AttributeError:
file_err(
"Unable to process uploaded file, missing file_type parameter.",
dataset,
json_file,
)
return
if dataset.type == "url":
try:
page = urlopen(dataset.path) # page will be .close()ed by sniff methods
temp_name, dataset.is_multi_byte = sniff.stream_to_file(
page,
prefix="url_paste",
source_encoding=util.get_charset_from_http_headers(page.headers),
)
except Exception as e:
file_err(
"Unable to fetch %s\n%s" % (dataset.path, str(e)), dataset, json_file
)
return
dataset.path = temp_name
# See if we have an empty file
if not os.path.exists(dataset.path):
file_err(
"Uploaded temporary file (%s) does not exist." % dataset.path,
dataset,
json_file,
)
return
if not os.path.getsize(dataset.path) > 0:
file_err("The uploaded file is empty", dataset, json_file)
return
if not dataset.type == "url":
# Already set is_multi_byte above if type == 'url'
try:
dataset.is_multi_byte = multi_byte.is_multi_byte(
codecs.open(dataset.path, "r", "utf-8").read(100)
)
except UnicodeDecodeError as e:
dataset.is_multi_byte = False
# Is dataset an image?
i_ext = get_image_ext(dataset.path)
if i_ext:
ext = i_ext
data_type = ext
# Is dataset content multi-byte?
elif dataset.is_multi_byte:
data_type = "multi-byte char"
ext = sniff.guess_ext(dataset.path, registry.sniff_order, is_multi_byte=True)
# Is dataset content supported sniffable binary?
else:
# FIXME: This ignores the declared sniff order in datatype_conf.xml
# resulting in improper behavior
type_info = Binary.is_sniffable_binary(dataset.path)
if type_info:
data_type = type_info[0]
ext = type_info[1]
if not data_type:
root_datatype = registry.get_datatype_by_extension(dataset.file_type)
if getattr(root_datatype, "compressed", False):
data_type = "compressed archive"
ext = dataset.file_type
else:
# See if we have a gzipped file, which, if it passes our restrictions, we'll uncompress
is_gzipped, is_valid = check_gzip(dataset.path, check_content=check_content)
if is_gzipped and not is_valid:
file_err(
"The gzipped uploaded file contains inappropriate content",
dataset,
json_file,
)
return
elif is_gzipped and is_valid and auto_decompress:
if link_data_only == "copy_files":
# We need to uncompress the temp_name file, but BAM files must remain compressed in the BGZF format
CHUNK_SIZE = 2**20 # 1Mb
fd, uncompressed = tempfile.mkstemp(
prefix="data_id_%s_upload_gunzip_" % dataset.dataset_id,
dir=os.path.dirname(output_path),
text=False,
)
gzipped_file = gzip.GzipFile(dataset.path, "rb")
while 1:
try:
chunk = gzipped_file.read(CHUNK_SIZE)
except IOError:
os.close(fd)
os.remove(uncompressed)
file_err(
"Problem decompressing gzipped data", dataset, json_file
)
return
if not chunk:
break
os.write(fd, chunk)
os.close(fd)
gzipped_file.close()
# Replace the gzipped file with the decompressed file if it's safe to do so
if not in_place:
dataset.path = uncompressed
else:
shutil.move(uncompressed, dataset.path)
os.chmod(dataset.path, 0o644)
dataset.name = dataset.name.rstrip(".gz")
data_type = "gzip"
if not data_type and bz2 is not None:
# See if we have a bz2 file, much like gzip
is_bzipped, is_valid = check_bz2(dataset.path, check_content)
if is_bzipped and not is_valid:
file_err(
"The gzipped uploaded file contains inappropriate content",
dataset,
json_file,
)
return
elif is_bzipped and is_valid and auto_decompress:
if link_data_only == "copy_files":
# We need to uncompress the temp_name file
CHUNK_SIZE = 2**20 # 1Mb
fd, uncompressed = tempfile.mkstemp(
prefix="data_id_%s_upload_bunzip2_" % dataset.dataset_id,
dir=os.path.dirname(output_path),
text=False,
)
bzipped_file = bz2.BZ2File(dataset.path, "rb")
while 1:
try:
chunk = bzipped_file.read(CHUNK_SIZE)
except IOError:
os.close(fd)
os.remove(uncompressed)
file_err(
"Problem decompressing bz2 compressed data",
dataset,
json_file,
)
return
if not chunk:
break
os.write(fd, chunk)
os.close(fd)
bzipped_file.close()
# Replace the bzipped file with the decompressed file if it's safe to do so
if not in_place:
dataset.path = uncompressed
else:
shutil.move(uncompressed, dataset.path)
os.chmod(dataset.path, 0o644)
dataset.name = dataset.name.rstrip(".bz2")
data_type = "bz2"
if not data_type:
# See if we have a zip archive
is_zipped = check_zip(dataset.path)
if is_zipped and auto_decompress:
if link_data_only == "copy_files":
CHUNK_SIZE = 2**20 # 1Mb
uncompressed = None
uncompressed_name = None
unzipped = False
z = zipfile.ZipFile(dataset.path)
for name in z.namelist():
if name.endswith("/"):
continue
if unzipped:
stdout = "ZIP file contained more than one file, only the first file was added to Galaxy."
break
fd, uncompressed = tempfile.mkstemp(
prefix="data_id_%s_upload_zip_" % dataset.dataset_id,
dir=os.path.dirname(output_path),
text=False,
)
if sys.version_info[:2] >= (2, 6):
zipped_file = z.open(name)
while 1:
try:
chunk = zipped_file.read(CHUNK_SIZE)
except IOError:
os.close(fd)
os.remove(uncompressed)
file_err(
"Problem decompressing zipped data",
dataset,
json_file,
)
return
if not chunk:
break
os.write(fd, chunk)
os.close(fd)
zipped_file.close()
uncompressed_name = name
unzipped = True
else:
# python < 2.5 doesn't have a way to read members in chunks(!)
try:
outfile = open(uncompressed, "wb")
outfile.write(z.read(name))
outfile.close()
uncompressed_name = name
unzipped = True
except IOError:
os.close(fd)
os.remove(uncompressed)
file_err(
"Problem decompressing zipped data",
dataset,
json_file,
)
return
z.close()
# Replace the zipped file with the decompressed file if it's safe to do so
if uncompressed is not None:
if not in_place:
dataset.path = uncompressed
else:
shutil.move(uncompressed, dataset.path)
os.chmod(dataset.path, 0o644)
dataset.name = uncompressed_name
data_type = "zip"
if not data_type:
# TODO refactor this logic. check_binary isn't guaranteed to be
# correct since it only looks at whether the first 100 chars are
# printable or not. If someone specifies a known unsniffable
# binary datatype and check_binary fails, the file gets mangled.
if check_binary(dataset.path) or Binary.is_ext_unsniffable(
dataset.file_type
):
# We have a binary dataset, but it is not Bam, Sff or Pdf
data_type = "binary"
# binary_ok = False
parts = dataset.name.split(".")
if len(parts) > 1:
ext = parts[-1].strip().lower()
if check_content and not Binary.is_ext_unsniffable(ext):
file_err(
"The uploaded binary file contains inappropriate content",
dataset,
json_file,
)
return
elif (
Binary.is_ext_unsniffable(ext) and dataset.file_type != ext
):
err_msg = (
"You must manually set the 'File Format' to '%s' when uploading %s files."
% (ext.capitalize(), ext)
)
file_err(err_msg, dataset, json_file)
return
if not data_type:
# We must have a text file
if check_content and check_html(dataset.path):
file_err(
"The uploaded file contains inappropriate HTML content",
dataset,
json_file,
)
return
if data_type != "binary":
if link_data_only == "copy_files" and data_type not in (
"gzip",
"bz2",
"zip",
):
# Convert universal line endings to Posix line endings if to_posix_lines is True
# and the data is not binary or gzip-, bz2- or zip-compressed.
if dataset.to_posix_lines:
tmpdir = output_adjacent_tmpdir(output_path)
tmp_prefix = "data_id_%s_convert_" % dataset.dataset_id
if dataset.space_to_tab:
line_count, converted_path = (
sniff.convert_newlines_sep2tabs(
dataset.path,
in_place=in_place,
tmp_dir=tmpdir,
tmp_prefix=tmp_prefix,
)
)
else:
line_count, converted_path = sniff.convert_newlines(
dataset.path,
in_place=in_place,
tmp_dir=tmpdir,
tmp_prefix=tmp_prefix,
)
if dataset.file_type == "auto":
ext = sniff.guess_ext(dataset.path, registry.sniff_order)
else:
ext = dataset.file_type
data_type = ext
# Save job info for the framework
if ext == "auto" and data_type == "binary":
ext = "data"
if ext == "auto" and dataset.ext:
ext = dataset.ext
if ext == "auto":
ext = "data"
datatype = registry.get_datatype_by_extension(ext)
if (
dataset.type in ("server_dir", "path_paste")
and link_data_only == "link_to_files"
):
# Never alter a file that will not be copied to Galaxy's local file store.
if datatype.dataset_content_needs_grooming(dataset.path):
err_msg = (
"The uploaded files need grooming, so change your <b>Copy data into Galaxy?</b> selection to be "
+ "<b>Copy files into Galaxy</b> instead of <b>Link to files without copying into Galaxy</b> so grooming can be performed."
)
file_err(err_msg, dataset, json_file)
return
if link_data_only == "copy_files" and converted_path:
# Move the dataset to its "real" path
shutil.copy(converted_path, output_path)
try:
os.remove(converted_path)
except Exception:
pass
elif link_data_only == "copy_files":
if purge_source:
shutil.move(dataset.path, output_path)
else:
shutil.copy(dataset.path, output_path)
# Write the job info
stdout = stdout or "uploaded %s file" % data_type
info = dict(
type="dataset",
dataset_id=dataset.dataset_id,
ext=ext,
stdout=stdout,
name=dataset.name,
line_count=line_count,
)
if dataset.get("uuid", None) is not None:
info["uuid"] = dataset.get("uuid")
json_file.write(dumps(info) + "\n")
if (
link_data_only == "copy_files"
and datatype
and datatype.dataset_content_needs_grooming(output_path)
):
# Groom the dataset content if necessary
datatype.groom_dataset_content(output_path)
|
https://github.com/galaxyproject/galaxy/issues/4300
|
Traceback (most recent call last):
File "/gpfs1/data/galaxy_server/galaxy-dev/tools/data_source/upload.py", line 425, in <module>
__main__()
File "/gpfs1/data/galaxy_server/galaxy-dev/tools/data_source/upload.py", line 413, in __main__
add_file( dataset, registry, json_file, output_path )
File "/gpfs1/data/galaxy_server/galaxy-dev/tools/data_source/upload.py", line 325, in add_file
shutil.move( dataset.path, output_path )
File "/global/apps/bioinf/galaxy/bin/Python-2.7.13/lib/python2.7/shutil.py", line 303, in move
os.unlink(src)
OSError: [Errno 13] Permission denied: '/gpfs1/data/galaxy_server/galaxy-dev/database/tmp/strio_url_paste_aqTRvr'
|
OSError
|
def build_command(
runner,
job_wrapper,
container=None,
modify_command_for_container=True,
include_metadata=False,
include_work_dir_outputs=True,
create_tool_working_directory=True,
remote_command_params={},
metadata_directory=None,
):
"""
Compose the sequence of commands necessary to execute a job. This will
currently include:
- environment settings corresponding to any requirement tags
- preparing input files
- command line taken from job wrapper
- commands to set metadata (if include_metadata is True)
"""
shell = job_wrapper.shell
base_command_line = job_wrapper.get_command_line()
# job_id = job_wrapper.job_id
# log.debug( 'Tool evaluation for job (%s) produced command-line: %s' % ( job_id, base_command_line ) )
if not base_command_line:
raise Exception("Attempting to run a tool with empty command definition.")
commands_builder = CommandsBuilder(base_command_line)
# All job runners currently handle this case which should never occur
if not commands_builder.commands:
return None
# Version, dependency resolution, and task splitting are prepended to the
# command - so they need to appear in the following order to ensure that
# the underlying application used by version command is available in the
# environment after dependency resolution, but the task splitting command
# is still executed in Galaxy's Python environment.
__handle_version_command(commands_builder, job_wrapper)
# One could imagine also allowing dependencies inside of the container but
# that is too sophisticated for a first crack at this - build your
# containers ready to go!
if not container or container.resolve_dependencies:
__handle_dependency_resolution(
commands_builder, job_wrapper, remote_command_params
)
__handle_task_splitting(commands_builder, job_wrapper)
if (
container and modify_command_for_container
) or job_wrapper.commands_in_new_shell:
if container and modify_command_for_container:
# Many Docker containers do not have /bin/bash.
external_command_shell = container.shell
else:
external_command_shell = shell
externalized_commands = __externalize_commands(
job_wrapper, external_command_shell, commands_builder, remote_command_params
)
if container and modify_command_for_container:
# Stop now and build command before handling metadata and copying
# working directory files back. These should always happen outside
# of docker container - no security implications when generating
# metadata and means no need for Galaxy to be available to container
# and not copying workdir outputs back means on can be more restrictive
# of where container can write to in some circumstances.
run_in_container_command = container.containerize_command(
externalized_commands
)
commands_builder = CommandsBuilder(run_in_container_command)
else:
commands_builder = CommandsBuilder(externalized_commands)
# Don't need to create a separate tool working directory for Pulsar
# jobs - that is handled by Pulsar.
if create_tool_working_directory:
# usually working will already exist, but it will not for task
# split jobs.
# Remove the working directory incase this is for instance a SLURM re-submission.
# xref https://github.com/galaxyproject/galaxy/issues/3289
commands_builder.prepend_command("rm -rf working; mkdir -p working; cd working")
if include_work_dir_outputs:
__handle_work_dir_outputs(
commands_builder, job_wrapper, runner, remote_command_params
)
commands_builder.capture_return_code()
if include_metadata and job_wrapper.requires_setting_metadata:
metadata_directory = metadata_directory or job_wrapper.working_directory
commands_builder.append_command("cd '%s'" % metadata_directory)
__handle_metadata(commands_builder, job_wrapper, runner, remote_command_params)
return commands_builder.build()
|
def build_command(
runner,
job_wrapper,
container=None,
modify_command_for_container=True,
include_metadata=False,
include_work_dir_outputs=True,
create_tool_working_directory=True,
remote_command_params={},
metadata_directory=None,
):
"""
Compose the sequence of commands necessary to execute a job. This will
currently include:
- environment settings corresponding to any requirement tags
- preparing input files
- command line taken from job wrapper
- commands to set metadata (if include_metadata is True)
"""
shell = job_wrapper.shell
base_command_line = job_wrapper.get_command_line()
# job_id = job_wrapper.job_id
# log.debug( 'Tool evaluation for job (%s) produced command-line: %s' % ( job_id, base_command_line ) )
if not base_command_line:
raise Exception("Attempting to run a tool with empty command definition.")
commands_builder = CommandsBuilder(base_command_line)
# All job runners currently handle this case which should never occur
if not commands_builder.commands:
return None
__handle_version_command(commands_builder, job_wrapper)
__handle_task_splitting(commands_builder, job_wrapper)
# One could imagine also allowing dependencies inside of the container but
# that is too sophisticated for a first crack at this - build your
# containers ready to go!
if not container or container.resolve_dependencies:
__handle_dependency_resolution(
commands_builder, job_wrapper, remote_command_params
)
if (
container and modify_command_for_container
) or job_wrapper.commands_in_new_shell:
if container and modify_command_for_container:
# Many Docker containers do not have /bin/bash.
external_command_shell = container.shell
else:
external_command_shell = shell
externalized_commands = __externalize_commands(
job_wrapper, external_command_shell, commands_builder, remote_command_params
)
if container and modify_command_for_container:
# Stop now and build command before handling metadata and copying
# working directory files back. These should always happen outside
# of docker container - no security implications when generating
# metadata and means no need for Galaxy to be available to container
# and not copying workdir outputs back means on can be more restrictive
# of where container can write to in some circumstances.
run_in_container_command = container.containerize_command(
externalized_commands
)
commands_builder = CommandsBuilder(run_in_container_command)
else:
commands_builder = CommandsBuilder(externalized_commands)
# Don't need to create a separate tool working directory for Pulsar
# jobs - that is handled by Pulsar.
if create_tool_working_directory:
# usually working will already exist, but it will not for task
# split jobs.
# Remove the working directory incase this is for instance a SLURM re-submission.
# xref https://github.com/galaxyproject/galaxy/issues/3289
commands_builder.prepend_command("rm -rf working; mkdir -p working; cd working")
if include_work_dir_outputs:
__handle_work_dir_outputs(
commands_builder, job_wrapper, runner, remote_command_params
)
commands_builder.capture_return_code()
if include_metadata and job_wrapper.requires_setting_metadata:
metadata_directory = metadata_directory or job_wrapper.working_directory
commands_builder.append_command("cd '%s'" % metadata_directory)
__handle_metadata(commands_builder, job_wrapper, runner, remote_command_params)
return commands_builder.build()
|
https://github.com/galaxyproject/galaxy/issues/4381
|
======================================================================
FAIL: NCBI BLAST+ blastn ( ncbi_blastn_wrapper ) > Test-1
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/peterjc/galaxy_blast/galaxy-dev/test/functional/test_toolbox.py", line 302, in test_tool
self.do_it( td )
File "/home/travis/build/peterjc/galaxy_blast/galaxy-dev/test/functional/test_toolbox.py", line 78, in do_it
raise e
JobOutputsError: Job in error state.
Job in error state.
-------------------- >> begin captured stdout << ---------------------
History with id 3777da040b354424 in error - summary of datasets in error below.
--------------------------------------
| 3 - megablast rhodopsin_nucs.fasta vs 'three_human_mRNA.fasta'
(HID - NAME)
| Dataset Blurb:
| error
| Dataset Info:
| Fatal error:
| /tmp/tmp8JSldT/job_working_directory/000/68/task_0:
| Traceback (most recent call last):
| File "./scripts/extract_dataset_part.py", line 17, in <module>
| import galaxy.model.mapping # need to load this before we unpickle, in order to se
| Dataset Job Standard Output:
| *Standard output was empty.*
| Dataset Job Standard Error:
| Fatal error:
| /tmp/tmp8JSldT/job_working_directory/000/68/task_0:
| Traceback (most recent call last):
| File "./scripts/extract_dataset_part.py", line 17, in <module>
| import galaxy.model.mapping # need to load this before we unpickle, in order to setup properties assigned by the mappers
| File "/home/travis/build/peterjc/galaxy_blast/galaxy-dev/lib/galaxy/model/__init__.py", line 21, in <module>
| from six import string_types
| ModuleNotFoundError: No module named 'six'
|
--------------------------------------
|
JobOutputsError
|
def build_command(
runner,
job_wrapper,
container=None,
modify_command_for_container=True,
include_metadata=False,
include_work_dir_outputs=True,
create_tool_working_directory=True,
remote_command_params={},
metadata_directory=None,
):
"""
Compose the sequence of commands necessary to execute a job. This will
currently include:
- environment settings corresponding to any requirement tags
- preparing input files
- command line taken from job wrapper
- commands to set metadata (if include_metadata is True)
"""
shell = job_wrapper.shell
base_command_line = job_wrapper.get_command_line()
# job_id = job_wrapper.job_id
# log.debug( 'Tool evaluation for job (%s) produced command-line: %s' % ( job_id, base_command_line ) )
if not base_command_line:
raise Exception("Attempting to run a tool with empty command definition.")
commands_builder = CommandsBuilder(base_command_line)
# All job runners currently handle this case which should never occur
if not commands_builder.commands:
return None
# Version, dependency resolution, and task splitting are prepended to the
# command - so they need to appear in the following order to ensure that
# the underlying application used by version command is available in the
# after dependency resolution but the task splitting command still has
# Galaxy's Python environment.
__handle_version_command(commands_builder, job_wrapper)
# One could imagine also allowing dependencies inside of the container but
# that is too sophisticated for a first crack at this - build your
# containers ready to go!
if not container or container.resolve_dependencies:
__handle_dependency_resolution(
commands_builder, job_wrapper, remote_command_params
)
__handle_task_splitting(commands_builder, job_wrapper)
if (
container and modify_command_for_container
) or job_wrapper.commands_in_new_shell:
if container and modify_command_for_container:
# Many Docker containers do not have /bin/bash.
external_command_shell = container.shell
else:
external_command_shell = shell
externalized_commands = __externalize_commands(
job_wrapper, external_command_shell, commands_builder, remote_command_params
)
if container and modify_command_for_container:
# Stop now and build command before handling metadata and copying
# working directory files back. These should always happen outside
# of docker container - no security implications when generating
# metadata and means no need for Galaxy to be available to container
# and not copying workdir outputs back means on can be more restrictive
# of where container can write to in some circumstances.
run_in_container_command = container.containerize_command(
externalized_commands
)
commands_builder = CommandsBuilder(run_in_container_command)
else:
commands_builder = CommandsBuilder(externalized_commands)
# Don't need to create a separate tool working directory for Pulsar
# jobs - that is handled by Pulsar.
if create_tool_working_directory:
# usually working will already exist, but it will not for task
# split jobs.
# Remove the working directory incase this is for instance a SLURM re-submission.
# xref https://github.com/galaxyproject/galaxy/issues/3289
commands_builder.prepend_command("rm -rf working; mkdir -p working; cd working")
if include_work_dir_outputs:
__handle_work_dir_outputs(
commands_builder, job_wrapper, runner, remote_command_params
)
commands_builder.capture_return_code()
if include_metadata and job_wrapper.requires_setting_metadata:
metadata_directory = metadata_directory or job_wrapper.working_directory
commands_builder.append_command("cd '%s'" % metadata_directory)
__handle_metadata(commands_builder, job_wrapper, runner, remote_command_params)
return commands_builder.build()
|
def build_command(
runner,
job_wrapper,
container=None,
modify_command_for_container=True,
include_metadata=False,
include_work_dir_outputs=True,
create_tool_working_directory=True,
remote_command_params={},
metadata_directory=None,
):
"""
Compose the sequence of commands necessary to execute a job. This will
currently include:
- environment settings corresponding to any requirement tags
- preparing input files
- command line taken from job wrapper
- commands to set metadata (if include_metadata is True)
"""
shell = job_wrapper.shell
base_command_line = job_wrapper.get_command_line()
# job_id = job_wrapper.job_id
# log.debug( 'Tool evaluation for job (%s) produced command-line: %s' % ( job_id, base_command_line ) )
if not base_command_line:
raise Exception("Attempting to run a tool with empty command definition.")
commands_builder = CommandsBuilder(base_command_line)
# All job runners currently handle this case which should never occur
if not commands_builder.commands:
return None
__handle_version_command(commands_builder, job_wrapper)
# One could imagine also allowing dependencies inside of the container but
# that is too sophisticated for a first crack at this - build your
# containers ready to go!
if not container or container.resolve_dependencies:
__handle_dependency_resolution(
commands_builder, job_wrapper, remote_command_params
)
__handle_task_splitting(commands_builder, job_wrapper)
if (
container and modify_command_for_container
) or job_wrapper.commands_in_new_shell:
if container and modify_command_for_container:
# Many Docker containers do not have /bin/bash.
external_command_shell = container.shell
else:
external_command_shell = shell
externalized_commands = __externalize_commands(
job_wrapper, external_command_shell, commands_builder, remote_command_params
)
if container and modify_command_for_container:
# Stop now and build command before handling metadata and copying
# working directory files back. These should always happen outside
# of docker container - no security implications when generating
# metadata and means no need for Galaxy to be available to container
# and not copying workdir outputs back means on can be more restrictive
# of where container can write to in some circumstances.
run_in_container_command = container.containerize_command(
externalized_commands
)
commands_builder = CommandsBuilder(run_in_container_command)
else:
commands_builder = CommandsBuilder(externalized_commands)
# Don't need to create a separate tool working directory for Pulsar
# jobs - that is handled by Pulsar.
if create_tool_working_directory:
# usually working will already exist, but it will not for task
# split jobs.
# Remove the working directory incase this is for instance a SLURM re-submission.
# xref https://github.com/galaxyproject/galaxy/issues/3289
commands_builder.prepend_command("rm -rf working; mkdir -p working; cd working")
if include_work_dir_outputs:
__handle_work_dir_outputs(
commands_builder, job_wrapper, runner, remote_command_params
)
commands_builder.capture_return_code()
if include_metadata and job_wrapper.requires_setting_metadata:
metadata_directory = metadata_directory or job_wrapper.working_directory
commands_builder.append_command("cd '%s'" % metadata_directory)
__handle_metadata(commands_builder, job_wrapper, runner, remote_command_params)
return commands_builder.build()
|
https://github.com/galaxyproject/galaxy/issues/4381
|
======================================================================
FAIL: NCBI BLAST+ blastn ( ncbi_blastn_wrapper ) > Test-1
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/peterjc/galaxy_blast/galaxy-dev/test/functional/test_toolbox.py", line 302, in test_tool
self.do_it( td )
File "/home/travis/build/peterjc/galaxy_blast/galaxy-dev/test/functional/test_toolbox.py", line 78, in do_it
raise e
JobOutputsError: Job in error state.
Job in error state.
-------------------- >> begin captured stdout << ---------------------
History with id 3777da040b354424 in error - summary of datasets in error below.
--------------------------------------
| 3 - megablast rhodopsin_nucs.fasta vs 'three_human_mRNA.fasta'
(HID - NAME)
| Dataset Blurb:
| error
| Dataset Info:
| Fatal error:
| /tmp/tmp8JSldT/job_working_directory/000/68/task_0:
| Traceback (most recent call last):
| File "./scripts/extract_dataset_part.py", line 17, in <module>
| import galaxy.model.mapping # need to load this before we unpickle, in order to se
| Dataset Job Standard Output:
| *Standard output was empty.*
| Dataset Job Standard Error:
| Fatal error:
| /tmp/tmp8JSldT/job_working_directory/000/68/task_0:
| Traceback (most recent call last):
| File "./scripts/extract_dataset_part.py", line 17, in <module>
| import galaxy.model.mapping # need to load this before we unpickle, in order to setup properties assigned by the mappers
| File "/home/travis/build/peterjc/galaxy_blast/galaxy-dev/lib/galaxy/model/__init__.py", line 21, in <module>
| from six import string_types
| ModuleNotFoundError: No module named 'six'
|
--------------------------------------
|
JobOutputsError
|
def get_data_inputs(self):
"""Get configure time data input descriptions."""
# Filter subworkflow steps and get inputs
step_to_input_type = {
"data_input": "dataset",
"data_collection_input": "dataset_collection",
}
inputs = []
if hasattr(self.subworkflow, "input_steps"):
for step in self.subworkflow.input_steps:
name = step.label
if not name:
step_module = module_factory.from_workflow_step(self.trans, step)
name = "%s:%s" % (step.order_index, step_module.get_name())
step_type = step.type
assert step_type in step_to_input_type
input = dict(
input_subworkflow_step_id=step.order_index,
name=name,
label=name,
multiple=False,
extensions="input",
input_type=step_to_input_type[step_type],
)
inputs.append(input)
return inputs
|
def get_data_inputs(self):
"""Get configure time data input descriptions."""
# Filter subworkflow steps and get inputs
step_to_input_type = {
"data_input": "dataset",
"data_collection_input": "dataset_collection",
}
inputs = []
if hasattr(self.subworkflow, "input_steps"):
for step in self.subworkflow.input_steps:
name = step.label
if not name:
step_module = module_factory.from_workflow_step(self.trans, step)
name = step_module.get_name()
step_type = step.type
assert step_type in step_to_input_type
input = dict(
input_subworkflow_step_id=step.order_index,
name=name,
label=name,
multiple=False,
extensions="input",
input_type=step_to_input_type[step_type],
)
inputs.append(input)
return inputs
|
https://github.com/galaxyproject/galaxy/issues/3120
|
Traceback (most recent call last):
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 82, in __invoke
outputs = invoker.invoke()
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 160, in invoke
jobs = self._invoke_step( step )
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 230, in _invoke_step
jobs = step.module.execute( self.trans, self.progress, self.workflow_invocation, step )
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/modules.py", line 435, in execute
subworkflow_invoker = progress.subworkflow_invoker( trans, step )
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 353, in subworkflow_invoker
subworkflow_progress = self.subworkflow_progress(step)
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 381, in subworkflow_progress
is_data=is_data,
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 305, in replacement_for_connection
raise Exception( message )
Exception: Workflow evaluation problem - failed to find output_name 1:out_file1 in step_outputs {None: <galaxy.model.HistoryDatasetAssociation object at 0x7f323f703890>}
|
Exception
|
def get_data_outputs(self):
outputs = []
if hasattr(self.subworkflow, "workflow_outputs"):
for workflow_output in self.subworkflow.workflow_outputs:
if workflow_output.workflow_step.type in {
"data_input",
"data_collection_input",
}:
# It is just confusing to display the input data as output data in subworkflows
continue
output_step = workflow_output.workflow_step
label = workflow_output.label
if not label:
label = "%s:%s" % (output_step.order_index, workflow_output.output_name)
output = dict(
name=label,
label=label,
extensions=["input"], # TODO
)
outputs.append(output)
return outputs
|
def get_data_outputs(self):
outputs = []
if hasattr(self.subworkflow, "workflow_outputs"):
for workflow_output in self.subworkflow.workflow_outputs:
output_step = workflow_output.workflow_step
label = workflow_output.label
if label is None:
label = "%s:%s" % (output_step.order_index, workflow_output.output_name)
output = dict(
name=label,
label=label,
extensions=["input"], # TODO
)
outputs.append(output)
return outputs
|
https://github.com/galaxyproject/galaxy/issues/3120
|
Traceback (most recent call last):
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 82, in __invoke
outputs = invoker.invoke()
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 160, in invoke
jobs = self._invoke_step( step )
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 230, in _invoke_step
jobs = step.module.execute( self.trans, self.progress, self.workflow_invocation, step )
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/modules.py", line 435, in execute
subworkflow_invoker = progress.subworkflow_invoker( trans, step )
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 353, in subworkflow_invoker
subworkflow_progress = self.subworkflow_progress(step)
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 381, in subworkflow_progress
is_data=is_data,
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 305, in replacement_for_connection
raise Exception( message )
Exception: Workflow evaluation problem - failed to find output_name 1:out_file1 in step_outputs {None: <galaxy.model.HistoryDatasetAssociation object at 0x7f323f703890>}
|
Exception
|
def execute(self, trans, progress, invocation, step):
"""Execute the given workflow step in the given workflow invocation.
Use the supplied workflow progress object to track outputs, find
inputs, etc...
"""
subworkflow_invoker = progress.subworkflow_invoker(trans, step)
subworkflow_invoker.invoke()
subworkflow = subworkflow_invoker.workflow
subworkflow_progress = subworkflow_invoker.progress
outputs = {}
for workflow_output in subworkflow.workflow_outputs:
workflow_output_label = workflow_output.label or "%s:%s" % (
step.order_index,
workflow_output.output_name,
)
replacement = subworkflow_progress.get_replacement_workflow_output(
workflow_output
)
outputs[workflow_output_label] = replacement
progress.set_step_outputs(step, outputs)
return None
|
def execute(self, trans, progress, invocation, step):
"""Execute the given workflow step in the given workflow invocation.
Use the supplied workflow progress object to track outputs, find
inputs, etc...
"""
subworkflow_invoker = progress.subworkflow_invoker(trans, step)
subworkflow_invoker.invoke()
subworkflow = subworkflow_invoker.workflow
subworkflow_progress = subworkflow_invoker.progress
outputs = {}
for workflow_output in subworkflow.workflow_outputs:
workflow_output_label = workflow_output.label
replacement = subworkflow_progress.get_replacement_workflow_output(
workflow_output
)
outputs[workflow_output_label] = replacement
progress.set_step_outputs(step, outputs)
return None
|
https://github.com/galaxyproject/galaxy/issues/3120
|
Traceback (most recent call last):
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 82, in __invoke
outputs = invoker.invoke()
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 160, in invoke
jobs = self._invoke_step( step )
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 230, in _invoke_step
jobs = step.module.execute( self.trans, self.progress, self.workflow_invocation, step )
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/modules.py", line 435, in execute
subworkflow_invoker = progress.subworkflow_invoker( trans, step )
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 353, in subworkflow_invoker
subworkflow_progress = self.subworkflow_progress(step)
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 381, in subworkflow_progress
is_data=is_data,
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 305, in replacement_for_connection
raise Exception( message )
Exception: Workflow evaluation problem - failed to find output_name 1:out_file1 in step_outputs {None: <galaxy.model.HistoryDatasetAssociation object at 0x7f323f703890>}
|
Exception
|
def get_filter_set(self, connections=None):
filter_set = []
if connections:
for oc in connections:
for ic in oc.input_step.module.get_data_inputs():
if (
"extensions" in ic
and ic["extensions"] != "input"
and ic["name"] == oc.input_name
):
filter_set += ic["extensions"]
if not filter_set:
filter_set = ["data"]
return ", ".join(filter_set)
|
def get_filter_set(self, connections=None):
filter_set = []
if connections:
for oc in connections:
for ic in oc.input_step.module.get_data_inputs():
if "extensions" in ic and ic["name"] == oc.input_name:
filter_set += ic["extensions"]
if not filter_set:
filter_set = ["data"]
return ", ".join(filter_set)
|
https://github.com/galaxyproject/galaxy/issues/3120
|
Traceback (most recent call last):
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 82, in __invoke
outputs = invoker.invoke()
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 160, in invoke
jobs = self._invoke_step( step )
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 230, in _invoke_step
jobs = step.module.execute( self.trans, self.progress, self.workflow_invocation, step )
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/modules.py", line 435, in execute
subworkflow_invoker = progress.subworkflow_invoker( trans, step )
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 353, in subworkflow_invoker
subworkflow_progress = self.subworkflow_progress(step)
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 381, in subworkflow_progress
is_data=is_data,
File "/home/eteri/GalaxyProject/galaxy/lib/galaxy/workflow/run.py", line 305, in replacement_for_connection
raise Exception( message )
Exception: Workflow evaluation problem - failed to find output_name 1:out_file1 in step_outputs {None: <galaxy.model.HistoryDatasetAssociation object at 0x7f323f703890>}
|
Exception
|
def populate_state(
request_context,
inputs,
incoming,
state,
errors={},
prefix="",
context=None,
check=True,
):
"""
Populates nested state dict from incoming parameter values.
>>> from xml.etree.ElementTree import XML
>>> from galaxy.util.bunch import Bunch
>>> from galaxy.util.odict import odict
>>> from galaxy.tools.parameters.basic import TextToolParameter, BooleanToolParameter
>>> from galaxy.tools.parameters.grouping import Repeat
>>> trans = Bunch( workflow_building_mode=False )
>>> a = TextToolParameter( None, XML( '<param name="a"/>' ) )
>>> b = Repeat()
>>> b.min = 0
>>> b.max = 1
>>> c = TextToolParameter( None, XML( '<param name="c"/>' ) )
>>> d = Repeat()
>>> d.min = 0
>>> d.max = 1
>>> e = TextToolParameter( None, XML( '<param name="e"/>' ) )
>>> f = Conditional()
>>> g = BooleanToolParameter( None, XML( '<param name="g"/>' ) )
>>> h = TextToolParameter( None, XML( '<param name="h"/>' ) )
>>> i = TextToolParameter( None, XML( '<param name="i"/>' ) )
>>> b.name = 'b'
>>> b.inputs = odict([ ('c', c), ('d', d) ])
>>> d.name = 'd'
>>> d.inputs = odict([ ('e', e), ('f', f) ])
>>> f.test_param = g
>>> f.name = 'f'
>>> f.cases = [ Bunch( value='true', inputs= { 'h': h } ), Bunch( value='false', inputs= { 'i': i } ) ]
>>> inputs = odict([('a',a),('b',b)])
>>> flat = odict([ ('a', 1 ), ( 'b_0|c', 2 ), ( 'b_0|d_0|e', 3 ), ( 'b_0|d_0|f|h', 4 ), ( 'b_0|d_0|f|g', True ) ])
>>> state = odict()
>>> populate_state( trans, inputs, flat, state, check=False )
>>> print state[ 'a' ]
1
>>> print state[ 'b' ][ 0 ][ 'c' ]
2
>>> print state[ 'b' ][ 0 ][ 'd' ][ 0 ][ 'e' ]
3
>>> print state[ 'b' ][ 0 ][ 'd' ][ 0 ][ 'f' ][ 'h' ]
4
"""
context = ExpressionContext(state, context)
for input in inputs.values():
state[input.name] = input.get_initial_value(request_context, context)
key = prefix + input.name
group_state = state[input.name]
group_prefix = "%s|" % (key)
if input.type == "repeat":
rep_index = 0
del group_state[:]
while True:
rep_prefix = "%s_%d" % (key, rep_index)
if (
not any(
incoming_key.startswith(rep_prefix)
for incoming_key in incoming.keys()
)
and rep_index >= input.min
):
break
if rep_index < input.max:
new_state = {"__index__": rep_index}
group_state.append(new_state)
populate_state(
request_context,
input.inputs,
incoming,
new_state,
errors,
prefix=rep_prefix + "|",
context=context,
check=check,
)
rep_index += 1
elif input.type == "conditional":
if input.value_ref and not input.value_ref_in_group:
test_param_key = prefix + input.test_param.name
else:
test_param_key = group_prefix + input.test_param.name
test_param_value = incoming.get(
test_param_key, group_state.get(input.test_param.name)
)
value, error = (
check_param(
request_context, input.test_param, test_param_value, context
)
if check
else [test_param_value, None]
)
if error:
errors[test_param_key] = error
else:
try:
current_case = input.get_current_case(value)
group_state = state[input.name] = {}
populate_state(
request_context,
input.cases[current_case].inputs,
incoming,
group_state,
errors,
prefix=group_prefix,
context=context,
check=check,
)
group_state["__current_case__"] = current_case
except Exception:
errors[test_param_key] = "The selected case is unavailable/invalid."
pass
group_state[input.test_param.name] = value
elif input.type == "section":
populate_state(
request_context,
input.inputs,
incoming,
group_state,
errors,
prefix=group_prefix,
context=context,
check=check,
)
elif input.type == "upload_dataset":
d_type = input.get_datatype(request_context, context=context)
writable_files = d_type.writable_files
while len(group_state) > len(writable_files):
del group_state[-1]
while len(writable_files) > len(group_state):
new_state = {"__index__": len(group_state)}
for upload_item in input.inputs.values():
new_state[upload_item.name] = upload_item.get_initial_value(
request_context, context
)
group_state.append(new_state)
for i, rep_state in enumerate(group_state):
rep_index = rep_state["__index__"]
rep_prefix = "%s_%d|" % (key, rep_index)
populate_state(
request_context,
input.inputs,
incoming,
rep_state,
errors,
prefix=rep_prefix,
context=context,
check=check,
)
else:
param_value = _get_incoming_value(incoming, key, state.get(input.name))
value, error = (
check_param(request_context, input, param_value, context)
if check
else [param_value, None]
)
if error:
errors[key] = error
state[input.name] = value
|
def populate_state(
request_context,
inputs,
incoming,
state,
errors={},
prefix="",
context=None,
check=True,
):
"""
Populates nested state dict from incoming parameter values.
>>> from xml.etree.ElementTree import XML
>>> from galaxy.util.bunch import Bunch
>>> from galaxy.util.odict import odict
>>> from galaxy.tools.parameters.basic import TextToolParameter, BooleanToolParameter
>>> from galaxy.tools.parameters.grouping import Repeat
>>> trans = Bunch( workflow_building_mode=False )
>>> a = TextToolParameter( None, XML( '<param name="a"/>' ) )
>>> b = Repeat()
>>> b.min = 0
>>> b.max = 1
>>> c = TextToolParameter( None, XML( '<param name="c"/>' ) )
>>> d = Repeat()
>>> d.min = 0
>>> d.max = 1
>>> e = TextToolParameter( None, XML( '<param name="e"/>' ) )
>>> f = Conditional()
>>> g = BooleanToolParameter( None, XML( '<param name="g"/>' ) )
>>> h = TextToolParameter( None, XML( '<param name="h"/>' ) )
>>> i = TextToolParameter( None, XML( '<param name="i"/>' ) )
>>> b.name = 'b'
>>> b.inputs = odict([ ('c', c), ('d', d) ])
>>> d.name = 'd'
>>> d.inputs = odict([ ('e', e), ('f', f) ])
>>> f.test_param = g
>>> f.name = 'f'
>>> f.cases = [ Bunch( value='true', inputs= { 'h': h } ), Bunch( value='false', inputs= { 'i': i } ) ]
>>> inputs = odict([('a',a),('b',b)])
>>> flat = odict([ ('a', 1 ), ( 'b_0|c', 2 ), ( 'b_0|d_0|e', 3 ), ( 'b_0|d_0|f|h', 4 ), ( 'b_0|d_0|f|g', True ) ])
>>> state = odict()
>>> populate_state( trans, inputs, flat, state, check=False )
>>> print state[ 'a' ]
1
>>> print state[ 'b' ][ 0 ][ 'c' ]
2
>>> print state[ 'b' ][ 0 ][ 'd' ][ 0 ][ 'e' ]
3
>>> print state[ 'b' ][ 0 ][ 'd' ][ 0 ][ 'f' ][ 'h' ]
4
"""
context = ExpressionContext(state, context)
for input in inputs.values():
state[input.name] = input.get_initial_value(request_context, context)
key = prefix + input.name
group_state = state[input.name]
group_prefix = "%s|" % (key)
if input.type == "repeat":
rep_index = 0
del group_state[:]
while True:
rep_prefix = "%s_%d" % (key, rep_index)
if (
not any(
incoming_key.startswith(rep_prefix)
for incoming_key in incoming.keys()
)
and rep_index >= input.min
):
break
if rep_index < input.max:
new_state = {"__index__": rep_index}
group_state.append(new_state)
populate_state(
request_context,
input.inputs,
incoming,
new_state,
errors,
prefix=rep_prefix + "|",
context=context,
)
rep_index += 1
elif input.type == "conditional":
if input.value_ref and not input.value_ref_in_group:
test_param_key = prefix + input.test_param.name
else:
test_param_key = group_prefix + input.test_param.name
test_param_value = incoming.get(
test_param_key, group_state.get(input.test_param.name)
)
value, error = (
check_param(
request_context, input.test_param, test_param_value, context
)
if check
else [test_param_value, None]
)
if error:
errors[test_param_key] = error
else:
try:
current_case = input.get_current_case(value)
group_state = state[input.name] = {}
populate_state(
request_context,
input.cases[current_case].inputs,
incoming,
group_state,
errors,
prefix=group_prefix,
context=context,
)
group_state["__current_case__"] = current_case
except Exception:
errors[test_param_key] = "The selected case is unavailable/invalid."
pass
group_state[input.test_param.name] = value
elif input.type == "section":
populate_state(
request_context,
input.inputs,
incoming,
group_state,
errors,
prefix=group_prefix,
context=context,
)
elif input.type == "upload_dataset":
d_type = input.get_datatype(request_context, context=context)
writable_files = d_type.writable_files
while len(group_state) > len(writable_files):
del group_state[-1]
while len(writable_files) > len(group_state):
new_state = {"__index__": len(group_state)}
for upload_item in input.inputs.values():
new_state[upload_item.name] = upload_item.get_initial_value(
request_context, context
)
group_state.append(new_state)
for i, rep_state in enumerate(group_state):
rep_index = rep_state["__index__"]
rep_prefix = "%s_%d|" % (key, rep_index)
populate_state(
request_context,
input.inputs,
incoming,
rep_state,
errors,
prefix=rep_prefix,
context=context,
)
else:
param_value = _get_incoming_value(incoming, key, state.get(input.name))
value, error = (
check_param(request_context, input, param_value, context)
if check
else [param_value, None]
)
if error:
errors[key] = error
state[input.name] = value
|
https://github.com/galaxyproject/galaxy/issues/4106
|
galaxy.web.framework.decorators ERROR 2017-05-22 12:39:15,677 Uncaught exception in exposed API method:
Traceback (most recent call last):
File "lib/galaxy/web/framework/decorators.py", line 281, in decorator rval = func( self, trans, *args, **kwargs )
File "lib/galaxy/webapps/galaxy/api/workflows.py", line 367, in build_module populate_state( trans, module.get_inputs(), inputs, module_state, check=False )
File "lib/galaxy/tools/parameters/__init__.py", line 277, in populate_state populate_state( request_context, input.inputs, incoming, new_state, errors, prefix=rep_prefix + '|', context=context )
File "lib/galaxy/tools/parameters/__init__.py", line 316, in populate_state value, error = check_param( request_context, input, param_value, context ) if check else [ param_value, None ]
File "lib/galaxy/tools/parameters/__init__.py", line 132, in check_param value = param.from_json( value, trans, param_values )
File "lib/galaxy/tools/parameters/basic.py", line 1635, in from_json rval = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( value )
File "/mnt/galaxy/galaxy-dist/.venv/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 831, in get return self._get_impl(ident, loading.load_on_ident)
File "/mnt/galaxy/galaxy-dist/.venv/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 864, in _get_impl return fallback_fn(self, key)
File "/mnt/galaxy/galaxy-dist/.venv/local/lib/python2.7/site-packages/sqlalchemy/orm/loading.py", line 219, in load_on_ident return q.one()
File "/mnt/galaxy/galaxy-dist/.venv/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 2718, in one ret = list(self)
File "/mnt/galaxy/galaxy-dist/.venv/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 2761, in __iter__ return self._execute_and_instances(context)
File "/mnt/galaxy/galaxy-dist/.venv/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 2776, in _execute_and_instances result = conn.execute(querycontext.statement, self._params)
File "/mnt/galaxy/galaxy-dist/.venv/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 914, in execute return meth(self, multiparams, params)
File "/mnt/galaxy/galaxy-dist/.venv/local/lib/python2.7/site-packages/sqlalchemy/sql/elements.py", line 323, in _execute_on_connection return connection._execute_clauseelement(self, multiparams, params)
File "/mnt/galaxy/galaxy-dist/.venv/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1010, in _execute_clauseelement compiled_sql, distilled_params
File "/mnt/galaxy/galaxy-dist/.venv/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1146, in _execute_context context)
File "/mnt/galaxy/galaxy-dist/.venv/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1341, in _handle_dbapi_exception exc_info
File "/mnt/galaxy/galaxy-dist/.venv/local/lib/python2.7/site-packages/sqlalchemy/util/compat.py", line 202, in raise_from_cause reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/mnt/galaxy/galaxy-dist/.venv/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1139, in _execute_context context)
File "/mnt/galaxy/galaxy-dist/.venv/local/lib/python2.7/site-packages/sqlalchemy/engine/default.py", line 450, in do_execute cursor.execute(statement, parameters) DataError: (psycopg2.DataError) invalid input syntax for integer: "__class__" LINE 3: WHERE history_dataset_association.id = '__class__' ^ [SQL: 'SELECT history_dataset_association.id AS history_dataset_association_id, history_dataset_association.history_id AS history_dataset_association_history_id, history_dataset_association.dataset_id AS history_dataset_association_dataset_id, history_dataset_association.create_time AS history_dataset_association_create_time, history_dataset_association.update_time AS history_dataset_association_update_time, history_dataset_association.state AS history_dataset_
|
DataError
|
def reload_data_managers(app, **kwargs):
from galaxy.tools.data_manager.manager import DataManagers
from galaxy.tools.toolbox.lineages.tool_shed import ToolVersionCache
log.debug("Executing data managers reload on '%s'", app.config.server_name)
app._configure_tool_data_tables(from_shed_config=False)
reload_tool_data_tables(app)
reload_count = app.data_managers._reload_count
app.data_managers = DataManagers(app, conf_watchers=app.data_managers.conf_watchers)
app.data_managers._reload_count = reload_count + 1
app.tool_version_cache = ToolVersionCache(app)
|
def reload_data_managers(app, **kwargs):
from galaxy.tools.data_manager.manager import DataManagers
log.debug("Executing data managers reload on '%s'", app.config.server_name)
app._configure_tool_data_tables(from_shed_config=False)
reload_tool_data_tables(app)
reload_count = app.data_managers._reload_count
app.data_managers = DataManagers(app, conf_watchers=app.data_managers.conf_watchers)
app.data_managers._reload_count = reload_count + 1
|
https://github.com/galaxyproject/galaxy/issues/3902
|
galaxy.web.framework.decorators ERROR 2017-04-08 10:22:00,380 Uncaught exception in exposed API method:
Traceback (most recent call last):
File "/var/galaxy/galaxy/lib/galaxy/web/framework/decorators.py", line 282, in decorator
rval = func( self, trans, *args, **kwargs )
File "/var/galaxy/galaxy/lib/galaxy/webapps/galaxy/api/tools.py", line 98, in build
return tool.to_json(trans, kwd.get('inputs', kwd))
File "/var/galaxy/galaxy/lib/galaxy/tools/__init__.py", line 1874, in to_json
tools = self.app.toolbox.get_loaded_tools_by_lineage( self.id )
File "/var/galaxy/galaxy/lib/galaxy/tools/toolbox/base.py", line 492, in get_loaded_tools_by_lineage
tool_lineage = self._lineage_map.get( tool_id )
File "/var/galaxy/galaxy/lib/galaxy/tools/toolbox/lineages/factory.py", line 38, in get
lineage = ToolShedLineage.from_tool_id( self.app, tool_id )
File "/var/galaxy/galaxy/lib/galaxy/tools/toolbox/lineages/tool_shed.py", line 70, in from_tool_id
return ToolShedLineage( app, tool_version )
File "/var/galaxy/galaxy/lib/galaxy/tools/toolbox/lineages/tool_shed.py", line 53, in __init__
self.tool_version_id = tool_version.id
File "/var/galaxy/galaxy/.venv/local/lib/python2.7/site-packages/sqlalchemy/orm/attributes.py", line 237, in __get__
return self.impl.get(instance_state(instance), dict_)
File "/var/galaxy/galaxy/.venv/local/lib/python2.7/site-packages/sqlalchemy/orm/attributes.py", line 578, in get
value = state._load_expired(state, passive)
File "/var/galaxy/galaxy/.venv/local/lib/python2.7/site-packages/sqlalchemy/orm/state.py", line 474, in _load_expired
self.manager.deferred_scalar_loader(self, toload)
File "/var/galaxy/galaxy/.venv/local/lib/python2.7/site-packages/sqlalchemy/orm/loading.py", line 610, in load_scalar_attributes
(state_str(state)))
DetachedInstanceError: Instance <ToolVersion at 0x7f20d07ef390> is not bound to a Session; attribute refresh operation cannot proceed
|
DetachedInstanceError
|
def __init__(self, job_wrapper, job_destination):
self.runner_state_handled = False
self.job_wrapper = job_wrapper
self.job_destination = job_destination
self.cleanup_file_attributes = [
"job_file",
"output_file",
"error_file",
"exit_code_file",
]
|
def __init__(self, job_wrapper, job_destination):
self.runner_state_handled = False
self.job_wrapper = job_wrapper
self.job_destination = job_destination
|
https://github.com/galaxyproject/galaxy/issues/3801
|
galaxy.jobs.runners ERROR 2017-03-22 11:53:04,424 (1778) Unhandled exception calling queue_job
Traceback (most recent call last):
File "galaxy/lib/galaxy/jobs/runners/__init__.py", line 104, in run_next
method(arg)
File "galaxy/lib/galaxy/jobs/runners/local.py", line 133, in queue_job
self._fail_job_local(job_wrapper, "Unable to finish job")
File "galaxy/lib/galaxy/jobs/runners/local.py", line 173, in _fail_job_local
self.fail_job(job_state, exception=True)
File "galaxy/lib/galaxy/jobs/runners/__init__.py", line 381, in fail_job
job_state.cleanup()
AttributeError: 'JobState' object has no attribute 'cleanup'
|
AttributeError
|
def __init__(
self,
files_dir=None,
job_wrapper=None,
job_id=None,
job_file=None,
output_file=None,
error_file=None,
exit_code_file=None,
job_name=None,
job_destination=None,
):
super(AsynchronousJobState, self).__init__(job_wrapper, job_destination)
self.old_state = None
self._running = False
self.check_count = 0
self.start_time = None
# job_id is the DRM's job id, not the Galaxy job id
self.job_id = job_id
self.job_file = job_file
self.output_file = output_file
self.error_file = error_file
self.exit_code_file = exit_code_file
self.job_name = job_name
self.set_defaults(files_dir)
|
def __init__(
self,
files_dir=None,
job_wrapper=None,
job_id=None,
job_file=None,
output_file=None,
error_file=None,
exit_code_file=None,
job_name=None,
job_destination=None,
):
super(AsynchronousJobState, self).__init__(job_wrapper, job_destination)
self.old_state = None
self._running = False
self.check_count = 0
self.start_time = None
# job_id is the DRM's job id, not the Galaxy job id
self.job_id = job_id
self.job_file = job_file
self.output_file = output_file
self.error_file = error_file
self.exit_code_file = exit_code_file
self.job_name = job_name
self.set_defaults(files_dir)
self.cleanup_file_attributes = [
"job_file",
"output_file",
"error_file",
"exit_code_file",
]
|
https://github.com/galaxyproject/galaxy/issues/3801
|
galaxy.jobs.runners ERROR 2017-03-22 11:53:04,424 (1778) Unhandled exception calling queue_job
Traceback (most recent call last):
File "galaxy/lib/galaxy/jobs/runners/__init__.py", line 104, in run_next
method(arg)
File "galaxy/lib/galaxy/jobs/runners/local.py", line 133, in queue_job
self._fail_job_local(job_wrapper, "Unable to finish job")
File "galaxy/lib/galaxy/jobs/runners/local.py", line 173, in _fail_job_local
self.fail_job(job_state, exception=True)
File "galaxy/lib/galaxy/jobs/runners/__init__.py", line 381, in fail_job
job_state.cleanup()
AttributeError: 'JobState' object has no attribute 'cleanup'
|
AttributeError
|
def cleanup(self):
for file in [
getattr(self, a) for a in self.cleanup_file_attributes if hasattr(self, a)
]:
try:
os.unlink(file)
except Exception as e:
# TODO: Move this prefix stuff to a method so we don't have dispatch on attributes we may or may
# not have.
if not hasattr(self, "job_id"):
prefix = "(%s)" % self.job_wrapper.get_id_tag()
else:
prefix = "(%s/%s)" % (self.job_wrapper.get_id_tag(), self.job_id)
log.debug("%s Unable to cleanup %s: %s" % (prefix, file, str(e)))
|
def cleanup(self):
for file in [
getattr(self, a) for a in self.cleanup_file_attributes if hasattr(self, a)
]:
try:
os.unlink(file)
except Exception as e:
log.debug(
"(%s/%s) Unable to cleanup %s: %s"
% (self.job_wrapper.get_id_tag(), self.job_id, file, str(e))
)
|
https://github.com/galaxyproject/galaxy/issues/3801
|
galaxy.jobs.runners ERROR 2017-03-22 11:53:04,424 (1778) Unhandled exception calling queue_job
Traceback (most recent call last):
File "galaxy/lib/galaxy/jobs/runners/__init__.py", line 104, in run_next
method(arg)
File "galaxy/lib/galaxy/jobs/runners/local.py", line 133, in queue_job
self._fail_job_local(job_wrapper, "Unable to finish job")
File "galaxy/lib/galaxy/jobs/runners/local.py", line 173, in _fail_job_local
self.fail_job(job_state, exception=True)
File "galaxy/lib/galaxy/jobs/runners/__init__.py", line 381, in fail_job
job_state.cleanup()
AttributeError: 'JobState' object has no attribute 'cleanup'
|
AttributeError
|
def queue_job(self, job_wrapper):
"""Create job script and submit it to the DRM"""
# prepare the job
include_metadata = asbool(
job_wrapper.job_destination.params.get("embed_metadata_in_job", True)
)
if not self.prepare_job(job_wrapper, include_metadata=include_metadata):
return
# get configured job destination
job_destination = job_wrapper.job_destination
# wrapper.get_id_tag() instead of job_id for compatibility with TaskWrappers.
galaxy_id_tag = job_wrapper.get_id_tag()
# get destination params
query_params = submission_params(prefix="", **job_destination.params)
container = None
universe = query_params.get("universe", None)
if universe and universe.strip().lower() == "docker":
container = self._find_container(job_wrapper)
if container:
# HTCondor needs the image as 'docker_image'
query_params.update({"docker_image": container})
galaxy_slots = query_params.get("request_cpus", None)
if galaxy_slots:
galaxy_slots_statement = (
'GALAXY_SLOTS="%s"; export GALAXY_SLOTS_CONFIGURED="1"' % galaxy_slots
)
else:
galaxy_slots_statement = 'GALAXY_SLOTS="1"'
# define job attributes
cjs = CondorJobState(
files_dir=self.app.config.cluster_files_directory, job_wrapper=job_wrapper
)
cluster_directory = self.app.config.cluster_files_directory
cjs.user_log = os.path.join(
cluster_directory, "galaxy_%s.condor.log" % galaxy_id_tag
)
cjs.register_cleanup_file_attribute("user_log")
submit_file = os.path.join(
cluster_directory, "galaxy_%s.condor.desc" % galaxy_id_tag
)
executable = cjs.job_file
build_submit_params = dict(
executable=executable,
output=cjs.output_file,
error=cjs.error_file,
user_log=cjs.user_log,
query_params=query_params,
)
submit_file_contents = build_submit_description(**build_submit_params)
script = self.get_job_file(
job_wrapper,
exit_code_path=cjs.exit_code_file,
slots_statement=galaxy_slots_statement,
)
try:
self.write_executable_script(executable, script)
except:
job_wrapper.fail("failure preparing job script", exception=True)
log.exception("(%s) failure preparing job script" % galaxy_id_tag)
return
cleanup_job = job_wrapper.cleanup_job
try:
open(submit_file, "w").write(submit_file_contents)
except Exception:
if cleanup_job == "always":
cjs.cleanup()
# job_wrapper.fail() calls job_wrapper.cleanup()
job_wrapper.fail("failure preparing submit file", exception=True)
log.exception("(%s) failure preparing submit file" % galaxy_id_tag)
return
# job was deleted while we were preparing it
if job_wrapper.get_state() == model.Job.states.DELETED:
log.debug("Job %s deleted by user before it entered the queue" % galaxy_id_tag)
if cleanup_job in ("always", "onsuccess"):
os.unlink(submit_file)
cjs.cleanup()
job_wrapper.cleanup()
return
log.debug("(%s) submitting file %s" % (galaxy_id_tag, executable))
external_job_id, message = condor_submit(submit_file)
if external_job_id is None:
log.debug(
"condor_submit failed for job %s: %s" % (job_wrapper.get_id_tag(), message)
)
if self.app.config.cleanup_job == "always":
os.unlink(submit_file)
cjs.cleanup()
job_wrapper.fail("condor_submit failed", exception=True)
return
os.unlink(submit_file)
log.info("(%s) queued as %s" % (galaxy_id_tag, external_job_id))
# store runner information for tracking if Galaxy restarts
job_wrapper.set_job_destination(job_destination, external_job_id)
# Store DRM related state information for job
cjs.job_id = external_job_id
cjs.job_destination = job_destination
# Add to our 'queue' of jobs to monitor
self.monitor_queue.put(cjs)
|
def queue_job(self, job_wrapper):
"""Create job script and submit it to the DRM"""
# prepare the job
include_metadata = asbool(
job_wrapper.job_destination.params.get("embed_metadata_in_job", True)
)
if not self.prepare_job(job_wrapper, include_metadata=include_metadata):
return
# get configured job destination
job_destination = job_wrapper.job_destination
# wrapper.get_id_tag() instead of job_id for compatibility with TaskWrappers.
galaxy_id_tag = job_wrapper.get_id_tag()
# get destination params
query_params = submission_params(prefix="", **job_destination.params)
container = None
universe = query_params.get("universe", None)
if universe and universe.strip().lower() == "docker":
container = self.find_container(job_wrapper)
if container:
# HTCondor needs the image as 'docker_image'
query_params.update({"docker_image": container})
galaxy_slots = query_params.get("request_cpus", None)
if galaxy_slots:
galaxy_slots_statement = (
'GALAXY_SLOTS="%s"; export GALAXY_SLOTS_CONFIGURED="1"' % galaxy_slots
)
else:
galaxy_slots_statement = 'GALAXY_SLOTS="1"'
# define job attributes
cjs = CondorJobState(
files_dir=self.app.config.cluster_files_directory, job_wrapper=job_wrapper
)
cluster_directory = self.app.config.cluster_files_directory
cjs.user_log = os.path.join(
cluster_directory, "galaxy_%s.condor.log" % galaxy_id_tag
)
cjs.register_cleanup_file_attribute("user_log")
submit_file = os.path.join(
cluster_directory, "galaxy_%s.condor.desc" % galaxy_id_tag
)
executable = cjs.job_file
build_submit_params = dict(
executable=executable,
output=cjs.output_file,
error=cjs.error_file,
user_log=cjs.user_log,
query_params=query_params,
)
submit_file_contents = build_submit_description(**build_submit_params)
script = self.get_job_file(
job_wrapper,
exit_code_path=cjs.exit_code_file,
slots_statement=galaxy_slots_statement,
)
try:
self.write_executable_script(executable, script)
except:
job_wrapper.fail("failure preparing job script", exception=True)
log.exception("(%s) failure preparing job script" % galaxy_id_tag)
return
cleanup_job = job_wrapper.cleanup_job
try:
open(submit_file, "w").write(submit_file_contents)
except Exception:
if cleanup_job == "always":
cjs.cleanup()
# job_wrapper.fail() calls job_wrapper.cleanup()
job_wrapper.fail("failure preparing submit file", exception=True)
log.exception("(%s) failure preparing submit file" % galaxy_id_tag)
return
# job was deleted while we were preparing it
if job_wrapper.get_state() == model.Job.states.DELETED:
log.debug("Job %s deleted by user before it entered the queue" % galaxy_id_tag)
if cleanup_job in ("always", "onsuccess"):
os.unlink(submit_file)
cjs.cleanup()
job_wrapper.cleanup()
return
log.debug("(%s) submitting file %s" % (galaxy_id_tag, executable))
external_job_id, message = condor_submit(submit_file)
if external_job_id is None:
log.debug(
"condor_submit failed for job %s: %s" % (job_wrapper.get_id_tag(), message)
)
if self.app.config.cleanup_job == "always":
os.unlink(submit_file)
cjs.cleanup()
job_wrapper.fail("condor_submit failed", exception=True)
return
os.unlink(submit_file)
log.info("(%s) queued as %s" % (galaxy_id_tag, external_job_id))
# store runner information for tracking if Galaxy restarts
job_wrapper.set_job_destination(job_destination, external_job_id)
# Store DRM related state information for job
cjs.job_id = external_job_id
cjs.job_destination = job_destination
# Add to our 'queue' of jobs to monitor
self.monitor_queue.put(cjs)
|
https://github.com/galaxyproject/galaxy/issues/3455
|
Traceback (most recent call last):
File "/galaxy-central/lib/galaxy/jobs/runners/__init__.py", line 104, in run_next
method(arg)
File "/galaxy-central/lib/galaxy/jobs/runners/condor.py", line 69, in queue_job
container = self.find_container( job_wrapper )
AttributeError: 'CondorJobRunner' object has no attribute 'find_container'
|
AttributeError
|
def _write_integrated_tool_panel_config_file(self):
"""
Write the current in-memory version of the integrated_tool_panel.xml file to disk. Since Galaxy administrators
use this file to manage the tool panel, we'll not use xml_to_string() since it doesn't write XML quite right.
"""
tracking_directory = self._integrated_tool_panel_tracking_directory
if not tracking_directory:
fd, filename = tempfile.mkstemp()
else:
if not os.path.exists(tracking_directory):
os.makedirs(tracking_directory)
name = "integrated_tool_panel_%.10f.xml" % time.time()
filename = os.path.join(tracking_directory, name)
open_file = open(filename, "w")
fd = open_file.fileno()
os.write(fd, '<?xml version="1.0"?>\n')
os.write(fd, "<toolbox>\n")
os.write(fd, " <!--\n ")
os.write(
fd,
"\n ".join([l for l in INTEGRATED_TOOL_PANEL_DESCRIPTION.split("\n") if l]),
)
os.write(fd, "\n -->\n")
for key, item_type, item in self._integrated_tool_panel.panel_items_iter():
if item:
if item_type == panel_item_types.TOOL:
os.write(fd, ' <tool id="%s" />\n' % item.id)
elif item_type == panel_item_types.WORKFLOW:
os.write(fd, ' <workflow id="%s" />\n' % item.id)
elif item_type == panel_item_types.LABEL:
label_id = item.id or ""
label_text = item.text or ""
label_version = item.version or ""
os.write(
fd,
' <label id="%s" text="%s" version="%s" />\n'
% (label_id, label_text, label_version),
)
elif item_type == panel_item_types.SECTION:
section_id = item.id or ""
section_name = item.name or ""
section_version = item.version or ""
os.write(
fd,
' <section id="%s" name="%s" version="%s">\n'
% (escape(section_id), escape(section_name), section_version),
)
for (
section_key,
section_item_type,
section_item,
) in item.panel_items_iter():
if section_item_type == panel_item_types.TOOL:
if section_item:
os.write(fd, ' <tool id="%s" />\n' % section_item.id)
elif section_item_type == panel_item_types.WORKFLOW:
if section_item:
os.write(
fd, ' <workflow id="%s" />\n' % section_item.id
)
elif section_item_type == panel_item_types.LABEL:
if section_item:
label_id = section_item.id or ""
label_text = section_item.text or ""
label_version = section_item.version or ""
os.write(
fd,
' <label id="%s" text="%s" version="%s" />\n'
% (label_id, label_text, label_version),
)
os.write(fd, " </section>\n")
os.write(fd, "</toolbox>\n")
os.close(fd)
destination = os.path.abspath(self._integrated_tool_panel_config)
if tracking_directory:
open(filename + ".stack", "w").write("".join(traceback.format_stack()))
shutil.copy(filename, filename + ".copy")
filename = filename + ".copy"
shutil.move(filename, destination)
os.chmod(self._integrated_tool_panel_config, 0o644)
|
def _write_integrated_tool_panel_config_file(self):
"""
Write the current in-memory version of the integrated_tool_panel.xml file to disk. Since Galaxy administrators
use this file to manage the tool panel, we'll not use xml_to_string() since it doesn't write XML quite right.
"""
tracking_directory = self._integrated_tool_panel_tracking_directory
if not tracking_directory:
fd, filename = tempfile.mkstemp()
else:
if not os.path.exists(tracking_directory):
os.makedirs(tracking_directory)
name = "integrated_tool_panel_%.10f.xml" % time.time()
filename = os.path.join(tracking_directory, name)
open_file = open(filename, "w")
fd = open_file.fileno()
os.write(fd, '<?xml version="1.0"?>\n')
os.write(fd, "<toolbox>\n")
os.write(fd, " <!--\n ")
os.write(
fd,
"\n ".join([l for l in INTEGRATED_TOOL_PANEL_DESCRIPTION.split("\n") if l]),
)
os.write(fd, "\n -->\n")
for key, item_type, item in self._integrated_tool_panel.panel_items_iter():
if item:
if item_type == panel_item_types.TOOL:
os.write(fd, ' <tool id="%s" />\n' % item.id)
elif item_type == panel_item_types.WORKFLOW:
os.write(fd, ' <workflow id="%s" />\n' % item.id)
elif item_type == panel_item_types.LABEL:
label_id = item.id or ""
label_text = item.text or ""
label_version = item.version or ""
os.write(
fd,
' <label id="%s" text="%s" version="%s" />\n'
% (label_id, label_text, label_version),
)
elif item_type == panel_item_types.SECTION:
section_id = item.id or ""
section_name = item.name or ""
section_version = item.version or ""
os.write(
fd,
' <section id="%s" name="%s" version="%s">\n'
% (section_id, section_name, section_version),
)
for (
section_key,
section_item_type,
section_item,
) in item.panel_items_iter():
if section_item_type == panel_item_types.TOOL:
if section_item:
os.write(fd, ' <tool id="%s" />\n' % section_item.id)
elif section_item_type == panel_item_types.WORKFLOW:
if section_item:
os.write(
fd, ' <workflow id="%s" />\n' % section_item.id
)
elif section_item_type == panel_item_types.LABEL:
if section_item:
label_id = section_item.id or ""
label_text = section_item.text or ""
label_version = section_item.version or ""
os.write(
fd,
' <label id="%s" text="%s" version="%s" />\n'
% (label_id, label_text, label_version),
)
os.write(fd, " </section>\n")
os.write(fd, "</toolbox>\n")
os.close(fd)
destination = os.path.abspath(self._integrated_tool_panel_config)
if tracking_directory:
open(filename + ".stack", "w").write("".join(traceback.format_stack()))
shutil.copy(filename, filename + ".copy")
filename = filename + ".copy"
shutil.move(filename, destination)
os.chmod(self._integrated_tool_panel_config, 0o644)
|
https://github.com/galaxyproject/galaxy/issues/3084
|
...
galaxy.jobs DEBUG 2016-10-24 14:39:57,371 Loading job configuration from ./config/job_conf.xml
galaxy.jobs DEBUG 2016-10-24 14:39:57,372 Read definition for handler 'main'
galaxy.jobs INFO 2016-10-24 14:39:57,373 Setting <handlers> default to child with id 'main'
galaxy.jobs DEBUG 2016-10-24 14:39:57,374 <destinations> default set to child with id or tag 'all.q'
galaxy.jobs DEBUG 2016-10-24 14:39:57,374 Done loading job configuration
beaker.container DEBUG 2016-10-24 14:39:58,124 data file ./database/citations/data/container_file/4/48/48e563f148dc04d8b31c94878c138019862e580d.cache
Traceback (most recent call last):
File "/mnt/shared/galaxy/galaxy-dist/lib/galaxy/webapps/galaxy/buildapp.py", line 63, in paste_app_factory
app = galaxy.app.UniverseApplication( global_conf=global_conf, **kwargs )
File "/mnt/shared/galaxy/galaxy-dist/lib/galaxy/app.py", line 93, in __init__
self._configure_toolbox()
File "/mnt/shared/galaxy/galaxy-dist/lib/galaxy/config.py", line 775, in _configure_toolbox
self.reload_toolbox()
File "/mnt/shared/galaxy/galaxy-dist/lib/galaxy/config.py", line 761, in reload_toolbox
self.toolbox = tools.ToolBox( tool_configs, self.config.tool_path, self )
File "/mnt/shared/galaxy/galaxy-dist/lib/galaxy/tools/__init__.py", line 111, in __init__
app=app,
File "/mnt/shared/galaxy/galaxy-dist/lib/galaxy/tools/toolbox/base.py", line 1051, in __init__
super(BaseGalaxyToolBox, self).__init__(config_filenames, tool_root_dir, app)
File "/mnt/shared/galaxy/galaxy-dist/lib/galaxy/tools/toolbox/base.py", line 71, in __init__
self._init_integrated_tool_panel( app.config )
File "/mnt/shared/galaxy/galaxy-dist/lib/galaxy/tools/toolbox/integrated_panel.py", line 36, in _init_integrated_tool_panel
self._load_integrated_tool_panel_keys()
File "/mnt/shared/galaxy/galaxy-dist/lib/galaxy/tools/toolbox/base.py", line 355, in _load_integrated_tool_panel_keys
tree = parse_xml( self._integrated_tool_panel_config )
File "/mnt/shared/galaxy/galaxy-dist/lib/galaxy/util/__init__.py", line 187, in parse_xml
root = tree.parse( fname, parser=ElementTree.XMLParser( target=DoctypeSafeCallbackTarget() ) )
File "/mnt/shared/galaxy/apps/python/2.7.11/lib/python2.7/xml/etree/ElementTree.py", line 656, in parse
parser.feed(data)
File "/mnt/shared/galaxy/apps/python/2.7.11/lib/python2.7/xml/etree/ElementTree.py", line 1642, in feed
self._raiseerror(v)
File "/mnt/shared/galaxy/apps/python/2.7.11/lib/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
raise err
ParseError: not well-formed (invalid token): line 164, column 40
Removing PID file paster.pid
|
ParseError
|
def parse_xml(fname):
"""Returns a parsed xml tree"""
# handle deprecation warning for XMLParsing a file with DOCTYPE
class DoctypeSafeCallbackTarget(ElementTree.TreeBuilder):
def doctype(*args):
pass
tree = ElementTree.ElementTree()
try:
root = tree.parse(
fname, parser=ElementTree.XMLParser(target=DoctypeSafeCallbackTarget())
)
except ParseError:
log.exception("Error parsing file %s", fname)
raise
ElementInclude.include(root)
return tree
|
def parse_xml(fname):
"""Returns a parsed xml tree"""
# handle deprecation warning for XMLParsing a file with DOCTYPE
class DoctypeSafeCallbackTarget(ElementTree.TreeBuilder):
def doctype(*args):
pass
tree = ElementTree.ElementTree()
root = tree.parse(
fname, parser=ElementTree.XMLParser(target=DoctypeSafeCallbackTarget())
)
ElementInclude.include(root)
return tree
|
https://github.com/galaxyproject/galaxy/issues/3084
|
...
galaxy.jobs DEBUG 2016-10-24 14:39:57,371 Loading job configuration from ./config/job_conf.xml
galaxy.jobs DEBUG 2016-10-24 14:39:57,372 Read definition for handler 'main'
galaxy.jobs INFO 2016-10-24 14:39:57,373 Setting <handlers> default to child with id 'main'
galaxy.jobs DEBUG 2016-10-24 14:39:57,374 <destinations> default set to child with id or tag 'all.q'
galaxy.jobs DEBUG 2016-10-24 14:39:57,374 Done loading job configuration
beaker.container DEBUG 2016-10-24 14:39:58,124 data file ./database/citations/data/container_file/4/48/48e563f148dc04d8b31c94878c138019862e580d.cache
Traceback (most recent call last):
File "/mnt/shared/galaxy/galaxy-dist/lib/galaxy/webapps/galaxy/buildapp.py", line 63, in paste_app_factory
app = galaxy.app.UniverseApplication( global_conf=global_conf, **kwargs )
File "/mnt/shared/galaxy/galaxy-dist/lib/galaxy/app.py", line 93, in __init__
self._configure_toolbox()
File "/mnt/shared/galaxy/galaxy-dist/lib/galaxy/config.py", line 775, in _configure_toolbox
self.reload_toolbox()
File "/mnt/shared/galaxy/galaxy-dist/lib/galaxy/config.py", line 761, in reload_toolbox
self.toolbox = tools.ToolBox( tool_configs, self.config.tool_path, self )
File "/mnt/shared/galaxy/galaxy-dist/lib/galaxy/tools/__init__.py", line 111, in __init__
app=app,
File "/mnt/shared/galaxy/galaxy-dist/lib/galaxy/tools/toolbox/base.py", line 1051, in __init__
super(BaseGalaxyToolBox, self).__init__(config_filenames, tool_root_dir, app)
File "/mnt/shared/galaxy/galaxy-dist/lib/galaxy/tools/toolbox/base.py", line 71, in __init__
self._init_integrated_tool_panel( app.config )
File "/mnt/shared/galaxy/galaxy-dist/lib/galaxy/tools/toolbox/integrated_panel.py", line 36, in _init_integrated_tool_panel
self._load_integrated_tool_panel_keys()
File "/mnt/shared/galaxy/galaxy-dist/lib/galaxy/tools/toolbox/base.py", line 355, in _load_integrated_tool_panel_keys
tree = parse_xml( self._integrated_tool_panel_config )
File "/mnt/shared/galaxy/galaxy-dist/lib/galaxy/util/__init__.py", line 187, in parse_xml
root = tree.parse( fname, parser=ElementTree.XMLParser( target=DoctypeSafeCallbackTarget() ) )
File "/mnt/shared/galaxy/apps/python/2.7.11/lib/python2.7/xml/etree/ElementTree.py", line 656, in parse
parser.feed(data)
File "/mnt/shared/galaxy/apps/python/2.7.11/lib/python2.7/xml/etree/ElementTree.py", line 1642, in feed
self._raiseerror(v)
File "/mnt/shared/galaxy/apps/python/2.7.11/lib/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
raise err
ParseError: not well-formed (invalid token): line 164, column 40
Removing PID file paster.pid
|
ParseError
|
def start(self):
if not self._active:
self._active = True
register_postfork_function(self.thread.start)
|
def start(self):
if not self._active:
self._active = True
self.thread.start()
|
https://github.com/galaxyproject/galaxy/issues/2831
|
$ . .venv/bin/activate
(.venv)$ pip install watchdog
(.venv)$ tox -e py27-unit
...
======================================================================
FAIL: tools.test_watcher.test_tool_conf_watcher
----------------------------------------------------------------------
Traceback (most recent call last):
File "/opt/galaxy/.venv/local/lib/python2.7/site-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/opt/galaxy/test/unit/tools/test_watcher.py", line 44, in test_tool_conf_watcher
wait_for_reload(lambda: callback.called)
File "/opt/galaxy/test/unit/tools/test_watcher.py", line 56, in wait_for_reload
assert reloaded
AssertionError
----------------------------------------------------------------------
Ran 787 tests in 73.355s
FAILED (SKIP=2, failures=1)
|
AssertionError
|
def monitor(self, path):
mod_time = None
if os.path.exists(path):
mod_time = time.ctime(os.path.getmtime(path))
with self._lock:
self.paths[path] = mod_time
self.start()
|
def monitor(self, path):
mod_time = None
if os.path.exists(path):
mod_time = time.ctime(os.path.getmtime(path))
with self._lock:
self.paths[path] = mod_time
|
https://github.com/galaxyproject/galaxy/issues/2831
|
$ . .venv/bin/activate
(.venv)$ pip install watchdog
(.venv)$ tox -e py27-unit
...
======================================================================
FAIL: tools.test_watcher.test_tool_conf_watcher
----------------------------------------------------------------------
Traceback (most recent call last):
File "/opt/galaxy/.venv/local/lib/python2.7/site-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/opt/galaxy/test/unit/tools/test_watcher.py", line 44, in test_tool_conf_watcher
wait_for_reload(lambda: callback.called)
File "/opt/galaxy/test/unit/tools/test_watcher.py", line 56, in wait_for_reload
assert reloaded
AssertionError
----------------------------------------------------------------------
Ran 787 tests in 73.355s
FAILED (SKIP=2, failures=1)
|
AssertionError
|
def encode_dict_ids(self, a_dict, kind=None, skip_startswith=None):
"""
Encode all ids in dictionary. Ids are identified by (a) an 'id' key or
(b) a key that ends with '_id'
"""
for key, val in a_dict.items():
if (
key == "id"
or key.endswith("_id")
and (skip_startswith is None or not key.startswith(skip_startswith))
):
a_dict[key] = self.encode_id(val, kind=kind)
return a_dict
|
def encode_dict_ids(self, a_dict, kind=None):
"""
Encode all ids in dictionary. Ids are identified by (a) an 'id' key or
(b) a key that ends with '_id'
"""
for key, val in a_dict.items():
if key == "id" or key.endswith("_id"):
a_dict[key] = self.encode_id(val, kind=kind)
return a_dict
|
https://github.com/galaxyproject/galaxy/issues/2423
|
ERROR: Convert BIOM ( biom_convert ) > Test-1
----------------------------------------------------------------------
Traceback (most recent call last):
File "/tmp/tmp_EztYh/galaxy-dev/test/functional/test_toolbox.py", line 299, in test_tool
self.do_it( td )
File "/tmp/tmp_EztYh/galaxy-dev/test/functional/test_toolbox.py", line 60, in do_it
raise e
RunToolException: Error creating a job for these tool inputs - Attempted to encode None id
|
RunToolException
|
def execute(cls, app, sa_session, action, job, replacement_dict):
# TODO Optimize this later. Just making it work for now.
# TODO Support purging as well as deletion if user_purge is enabled.
# Dataset candidates for deletion must be
# 1) Created by the workflow.
# 2) Not have any job_to_input_dataset associations with states other
# than OK or DELETED. If a step errors, we don't want to delete/purge it
# automatically.
# 3) Not marked as a workflow output.
# POTENTIAL ISSUES: When many outputs are being finish()ed
# concurrently, sometimes non-terminal steps won't be cleaned up
# because of the lag in job state updates.
sa_session.flush()
if not job.workflow_invocation_step:
log.debug(
"This job is not part of a workflow invocation, delete intermediates aborted."
)
return
wfi = job.workflow_invocation_step.workflow_invocation
sa_session.refresh(wfi)
if wfi.active:
log.debug(
"Workflow still scheduling so new jobs may appear, skipping deletion of intermediate files."
)
# Still evaluating workflow so we don't yet have all workflow invocation
# steps to start looking at.
return
outputs_defined = wfi.workflow.has_outputs_defined()
if outputs_defined:
wfi_steps = [
wfistep
for wfistep in wfi.steps
if not wfistep.workflow_step.workflow_outputs
and wfistep.workflow_step.type == "tool"
]
jobs_to_check = []
for wfi_step in wfi_steps:
sa_session.refresh(wfi_step)
wfi_step_job = wfi_step.job
if wfi_step_job:
jobs_to_check.append(wfi_step_job)
else:
log.debug(
"No job found yet for wfi_step %s, (step %s)"
% (wfi_step, wfi_step.workflow_step)
)
for j2c in jobs_to_check:
sa_session.refresh(j2c)
creating_jobs = []
for input_dataset in j2c.input_datasets:
sa_session.refresh(input_dataset)
sa_session.refresh(input_dataset.dataset.creating_job)
creating_jobs.append(
(input_dataset, input_dataset.dataset.creating_job)
)
for input_dataset in [
x.dataset
for (x, creating_job) in creating_jobs
if creating_job.workflow_invocation_step
and creating_job.workflow_invocation_step.workflow_invocation == wfi
]:
safe_to_delete = True
for job_to_check in [d_j.job for d_j in input_dataset.dependent_jobs]:
if job_to_check != job and job_to_check.state not in [
job.states.OK,
job.states.DELETED,
]:
log.debug(
"Workflow Intermediates cleanup attempted, but non-terminal state '%s' detected for job %s"
% (job_to_check.state, job_to_check.id)
)
safe_to_delete = False
if safe_to_delete:
# Support purging here too.
input_dataset.mark_deleted()
else:
# No workflow outputs defined, so we can't know what to delete.
# We could make this work differently in the future
pass
|
def execute(cls, app, sa_session, action, job, replacement_dict):
# TODO Optimize this later. Just making it work for now.
# TODO Support purging as well as deletion if user_purge is enabled.
# Dataset candidates for deletion must be
# 1) Created by the workflow.
# 2) Not have any job_to_input_dataset associations with states other
# than OK or DELETED. If a step errors, we don't want to delete/purge it
# automatically.
# 3) Not marked as a workflow output.
# POTENTIAL ISSUES: When many outputs are being finish()ed
# concurrently, sometimes non-terminal steps won't be cleaned up
# because of the lag in job state updates.
sa_session.flush()
if not job.workflow_invocation_step:
log.debug(
"This job is not part of a workflow invocation, delete intermediates aborted."
)
return
wfi = job.workflow_invocation_step.workflow_invocation
sa_session.refresh(wfi)
if wfi.active:
log.debug(
"Workflow still scheduling so new jobs may appear, skipping deletion of intermediate files."
)
# Still evaluating workflow so we don't yet have all workflow invocation
# steps to start looking at.
return
outputs_defined = wfi.workflow.has_outputs_defined()
if outputs_defined:
wfi_steps = [
wfistep
for wfistep in wfi.steps
if not wfistep.workflow_step.workflow_outputs
and wfistep.workflow_step.type == "tool"
]
jobs_to_check = []
for wfi_step in wfi_steps:
sa_session.refresh(wfi_step)
wfi_step_job = wfi_step.job
if wfi_step_job:
jobs_to_check.append(wfi_step_job)
else:
log.debug(
"No job found yet for wfi_step %s, (step %s)"
% (wfi_step, wfi_step.workflow_step)
)
for j2c in jobs_to_check:
creating_jobs = [
(x, x.dataset.creating_job)
for x in j2c.input_datasets
if x.dataset.creating_job
]
for x, creating_job in creating_jobs:
sa_session.refresh(creating_job)
sa_session.refresh(x)
for input_dataset in [
x.dataset
for (x, creating_job) in creating_jobs
if creating_job.workflow_invocation_step
and creating_job.workflow_invocation_step.workflow_invocation == wfi
]:
safe_to_delete = True
for job_to_check in [d_j.job for d_j in input_dataset.dependent_jobs]:
if job_to_check != job and job_to_check.state not in [
job.states.OK,
job.states.DELETED,
]:
log.debug(
"Workflow Intermediates cleanup attempted, but non-terminal state '%s' detected for job %s"
% (job_to_check.state, job_to_check.id)
)
safe_to_delete = False
if safe_to_delete:
# Support purging here too.
input_dataset.mark_deleted()
else:
# No workflow outputs defined, so we can't know what to delete.
# We could make this work differently in the future
pass
|
https://github.com/galaxyproject/galaxy/issues/1531
|
job traceback:
Traceback (most recent call last):
File "/galaxy-repl/instances/main/server/lib/galaxy/jobs/runners/__init__.py", line 590, in finish_job
job_state.job_wrapper.finish( stdout, stderr, exit_code )
File "/galaxy-repl/instances/main/server/lib/galaxy/jobs/__init__.py", line 1275, in finish
ActionBox.execute(self.app, self.sa_session, pja.post_job_action, job)
File "/galaxy-repl/instances/main/server/lib/galaxy/jobs/actions/post.py", line 548, in execute
ActionBox.actions[pja.action_type].execute(app, sa_session, pja, job, replacement_dict)
File "/galaxy-repl/instances/main/server/lib/galaxy/jobs/actions/post.py", line 421, in execute
for input_dataset in [x.dataset for x in j2c.input_datasets if x.dataset.creating_job.workflow_invocation_step and x.dataset.creating_job.workflow_invocation_step.workflow_invocation == wfi]:
AttributeError: 'NoneType' object has no attribute 'creating_job'
|
AttributeError
|
def _workflow_to_dict_editor(self, trans, stored):
""" """
workflow = stored.latest_workflow
# Pack workflow data into a dictionary and return
data = {}
data["name"] = workflow.name
data["steps"] = {}
data["upgrade_messages"] = {}
# For each step, rebuild the form and encode the state
for step in workflow.steps:
# Load from database representation
module = module_factory.from_workflow_step(trans, step)
if not module:
step_annotation = self.get_item_annotation_obj(
trans.sa_session, trans.user, step
)
annotation_str = ""
if step_annotation:
annotation_str = step_annotation.annotation
invalid_tool_form_html = """<div class="toolForm tool-node-error">
<div class="toolFormTitle form-row-error">Unrecognized Tool: %s</div>
<div class="toolFormBody"><div class="form-row">
The tool id '%s' for this tool is unrecognized.<br/><br/>
To save this workflow, you will need to delete this step or enable the tool.
</div></div></div>""" % (
step.tool_id,
step.tool_id,
)
step_dict = {
"id": step.order_index,
"type": "invalid",
"tool_id": step.tool_id,
"name": "Unrecognized Tool: %s" % step.tool_id,
"tool_state": None,
"tooltip": None,
"tool_errors": ["Unrecognized Tool Id: %s" % step.tool_id],
"data_inputs": [],
"data_outputs": [],
"form_html": invalid_tool_form_html,
"annotation": annotation_str,
"input_connections": {},
"post_job_actions": {},
"uuid": str(step.uuid),
"label": step.label or None,
"workflow_outputs": [],
}
# Position
step_dict["position"] = step.position
# Add to return value
data["steps"][step.order_index] = step_dict
continue
# Fix any missing parameters
upgrade_message = module.check_and_update_state()
if upgrade_message:
# FIXME: Frontend should be able to handle workflow messages
# as a dictionary not just the values
data["upgrade_messages"][step.order_index] = upgrade_message.values()
# Get user annotation.
step_annotation = self.get_item_annotation_obj(
trans.sa_session, trans.user, step
)
annotation_str = ""
if step_annotation:
annotation_str = step_annotation.annotation
form_html = None
if trans.history:
# If in a web session, attach form html. No reason to do
# so for API requests.
form_html = module.get_config_form()
# Pack attributes into plain dictionary
step_dict = {
"id": step.order_index,
"type": module.type,
"tool_id": module.get_tool_id(),
"name": module.get_name(),
"tool_state": module.get_state(),
"tooltip": module.get_tooltip(static_path=url_for("/static")),
"tool_errors": module.get_errors(),
"data_inputs": module.get_data_inputs(),
"data_outputs": module.get_data_outputs(),
"form_html": form_html,
"annotation": annotation_str,
"post_job_actions": {},
"uuid": str(step.uuid) if step.uuid else None,
"label": step.label or None,
"workflow_outputs": [],
}
# Connections
input_connections = step.input_connections
input_connections_type = {}
multiple_input = {} # Boolean value indicating if this can be mutliple
if step.type is None or step.type == "tool":
# Determine full (prefixed) names of valid input datasets
data_input_names = {}
def callback(input, value, prefixed_name, prefixed_label):
if isinstance(input, DataToolParameter) or isinstance(
input, DataCollectionToolParameter
):
data_input_names[prefixed_name] = True
multiple_input[prefixed_name] = input.multiple
if isinstance(input, DataToolParameter):
input_connections_type[input.name] = "dataset"
if isinstance(input, DataCollectionToolParameter):
input_connections_type[input.name] = "dataset_collection"
visit_input_values(module.tool.inputs, module.state.inputs, callback)
# Filter
# FIXME: this removes connection without displaying a message currently!
input_connections = [
conn
for conn in input_connections
if conn.input_name in data_input_names
]
# post_job_actions
pja_dict = {}
for pja in step.post_job_actions:
pja_dict[pja.action_type + pja.output_name] = dict(
action_type=pja.action_type,
output_name=pja.output_name,
action_arguments=pja.action_arguments,
)
step_dict["post_job_actions"] = pja_dict
# workflow outputs
outputs = []
for output in step.workflow_outputs:
outputs.append(output.output_name)
step_dict["workflow_outputs"] = outputs
# Encode input connections as dictionary
input_conn_dict = {}
for conn in input_connections:
input_type = "dataset"
if conn.input_name in input_connections_type:
input_type = input_connections_type[conn.input_name]
conn_dict = dict(
id=conn.output_step.order_index,
output_name=conn.output_name,
input_type=input_type,
)
if conn.input_name in multiple_input:
if conn.input_name in input_conn_dict:
input_conn_dict[conn.input_name].append(conn_dict)
else:
input_conn_dict[conn.input_name] = [conn_dict]
else:
input_conn_dict[conn.input_name] = conn_dict
step_dict["input_connections"] = input_conn_dict
# Position
step_dict["position"] = step.position
# Add to return value
data["steps"][step.order_index] = step_dict
return data
|
def _workflow_to_dict_editor(self, trans, stored):
""" """
workflow = stored.latest_workflow
# Pack workflow data into a dictionary and return
data = {}
data["name"] = workflow.name
data["steps"] = {}
data["upgrade_messages"] = {}
# For each step, rebuild the form and encode the state
for step in workflow.steps:
# Load from database representation
module = module_factory.from_workflow_step(trans, step)
if not module:
step_annotation = self.get_item_annotation_obj(
trans.sa_session, trans.user, step
)
annotation_str = ""
if step_annotation:
annotation_str = step_annotation.annotation
invalid_tool_form_html = """<div class="toolForm tool-node-error">
<div class="toolFormTitle form-row-error">Unrecognized Tool: %s</div>
<div class="toolFormBody"><div class="form-row">
The tool id '%s' for this tool is unrecognized.<br/><br/>
To save this workflow, you will need to delete this step or enable the tool.
</div></div></div>""" % (
step.tool_id,
step.tool_id,
)
step_dict = {
"id": step.order_index,
"type": "invalid",
"tool_id": step.tool_id,
"name": "Unrecognized Tool: %s" % step.tool_id,
"tool_state": None,
"tooltip": None,
"tool_errors": ["Unrecognized Tool Id: %s" % step.tool_id],
"data_inputs": [],
"data_outputs": [],
"form_html": invalid_tool_form_html,
"annotation": annotation_str,
"input_connections": {},
"post_job_actions": {},
"uuid": str(step.uuid),
"label": step.label or None,
"workflow_outputs": [],
}
# Position
step_dict["position"] = step.position
# Add to return value
data["steps"][step.order_index] = step_dict
continue
# Fix any missing parameters
upgrade_message = module.check_and_update_state()
if upgrade_message:
# FIXME: Frontend should be able to handle workflow messages
# as a dictionary not just the values
data["upgrade_messages"][step.order_index] = upgrade_message.values()
# Get user annotation.
step_annotation = self.get_item_annotation_obj(
trans.sa_session, trans.user, step
)
annotation_str = ""
if step_annotation:
annotation_str = step_annotation.annotation
# Pack attributes into plain dictionary
step_dict = {
"id": step.order_index,
"type": module.type,
"tool_id": module.get_tool_id(),
"name": module.get_name(),
"tool_state": module.get_state(),
"tooltip": module.get_tooltip(static_path=url_for("/static")),
"tool_errors": module.get_errors(),
"data_inputs": module.get_data_inputs(),
"data_outputs": module.get_data_outputs(),
"form_html": module.get_config_form(),
"annotation": annotation_str,
"post_job_actions": {},
"uuid": str(step.uuid) if step.uuid else None,
"label": step.label or None,
"workflow_outputs": [],
}
# Connections
input_connections = step.input_connections
input_connections_type = {}
multiple_input = {} # Boolean value indicating if this can be mutliple
if step.type is None or step.type == "tool":
# Determine full (prefixed) names of valid input datasets
data_input_names = {}
def callback(input, value, prefixed_name, prefixed_label):
if isinstance(input, DataToolParameter) or isinstance(
input, DataCollectionToolParameter
):
data_input_names[prefixed_name] = True
multiple_input[prefixed_name] = input.multiple
if isinstance(input, DataToolParameter):
input_connections_type[input.name] = "dataset"
if isinstance(input, DataCollectionToolParameter):
input_connections_type[input.name] = "dataset_collection"
visit_input_values(module.tool.inputs, module.state.inputs, callback)
# Filter
# FIXME: this removes connection without displaying a message currently!
input_connections = [
conn
for conn in input_connections
if conn.input_name in data_input_names
]
# post_job_actions
pja_dict = {}
for pja in step.post_job_actions:
pja_dict[pja.action_type + pja.output_name] = dict(
action_type=pja.action_type,
output_name=pja.output_name,
action_arguments=pja.action_arguments,
)
step_dict["post_job_actions"] = pja_dict
# workflow outputs
outputs = []
for output in step.workflow_outputs:
outputs.append(output.output_name)
step_dict["workflow_outputs"] = outputs
# Encode input connections as dictionary
input_conn_dict = {}
for conn in input_connections:
input_type = "dataset"
if conn.input_name in input_connections_type:
input_type = input_connections_type[conn.input_name]
conn_dict = dict(
id=conn.output_step.order_index,
output_name=conn.output_name,
input_type=input_type,
)
if conn.input_name in multiple_input:
if conn.input_name in input_conn_dict:
input_conn_dict[conn.input_name].append(conn_dict)
else:
input_conn_dict[conn.input_name] = [conn_dict]
else:
input_conn_dict[conn.input_name] = conn_dict
step_dict["input_connections"] = input_conn_dict
# Position
step_dict["position"] = step.position
# Add to return value
data["steps"][step.order_index] = step_dict
return data
|
https://github.com/galaxyproject/galaxy/issues/734
|
======================================================================
FAIL: test_export_editor (api.test_workflows.WorkflowsApiTestCase)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/opt/galaxy/test/api/test_workflows.py", line 395, in test_export_editor
downloaded_workflow = self._download_workflow( uploaded_workflow_id, style="editor" )
File "/opt/galaxy/test/api/test_workflows.py", line 1155, in _download_workflow
self._assert_status_code_is( download_response, 200 )
File "/opt/galaxy/test/base/api.py", line 82, in _assert_status_code_is
assert_status_code_is( response, expected_status_code )
File "/opt/galaxy/test/base/api_asserts.py", line 15, in assert_status_code_is
raise AssertionError( assertion_message )
AssertionError: Request status code (500) was not expected value 200. Body was {u'err_msg': u'Uncaught exception in exposed API method:', u'err_code': 0}
-------------------- >> begin captured logging << --------------------
requests.packages.urllib3.connectionpool: INFO: Starting new HTTP connection (1): localhost
galaxy.web.framework.webapp: INFO: Session authenticated using Galaxy master api key
requests.packages.urllib3.connectionpool: DEBUG: "GET /api/users?key=TEST123 HTTP/1.1" 200 None
requests.packages.urllib3.connectionpool: INFO: Starting new HTTP connection (1): localhost
galaxy.web.framework.webapp: INFO: Session authenticated using Galaxy master api key
requests.packages.urllib3.connectionpool: DEBUG: "POST /api/users/adb5f5c93f827949/api_key HTTP/1.1" 200 None
requests.packages.urllib3.connectionpool: INFO: Starting new HTTP connection (1): localhost
requests.packages.urllib3.connectionpool: DEBUG: "POST /api/workflows/upload HTTP/1.1" 200 None
requests.packages.urllib3.connectionpool: INFO: Starting new HTTP connection (1): localhost
galaxy.tools: ERROR: tools::to_json - [history_id=None] Failed to retrieve history. History unavailable. Please specify a valid history id..
Traceback (most recent call last):
File "/opt/galaxy/lib/galaxy/tools/__init__.py", line 2347, in to_json
raise Exception('History unavailable. Please specify a valid history id')
Exception: History unavailable. Please specify a valid history id
galaxy.web.framework.decorators: ERROR: Uncaught exception in exposed API method:
Traceback (most recent call last):
File "/opt/galaxy/lib/galaxy/web/framework/decorators.py", line 260, in decorator
rval = func( self, trans, *args, **kwargs)
File "/opt/galaxy/lib/galaxy/webapps/galaxy/api/workflows.py", line 232, in workflow_dict
ret_dict = self.workflow_contents_manager.workflow_to_dict( trans, stored_workflow, style=style )
File "/opt/galaxy/lib/galaxy/managers/workflows.py", line 274, in workflow_to_dict
return self._workflow_to_dict_editor( trans, stored )
File "/opt/galaxy/lib/galaxy/managers/workflows.py", line 349, in _workflow_to_dict_editor
'form_html': module.get_config_form(),
File "/opt/galaxy/lib/galaxy/workflow/modules.py", line 717, in get_config_form
tool=self.tool, values=self.state.inputs, errors=( self.errors or {} ) )
File "/opt/galaxy/lib/galaxy/web/framework/webapp.py", line 809, in fill_template
return self.fill_template_mako( filename, **kwargs )
File "/opt/galaxy/lib/galaxy/web/framework/webapp.py", line 823, in fill_template_mako
return template.render( **data )
File "/opt/galaxy/eggs/Mako-0.4.1-py2.7.egg/mako/template.py", line 296, in render
return runtime._render(self, self.callable_, args, data)
File "/opt/galaxy/eggs/Mako-0.4.1-py2.7.egg/mako/runtime.py", line 660, in _render
**_kwargs_for_callable(callable_, data))
File "/opt/galaxy/eggs/Mako-0.4.1-py2.7.egg/mako/runtime.py", line 692, in _render_context
_exec_template(inherit, lclcontext, args=args, kwargs=kwargs)
File "/opt/galaxy/eggs/Mako-0.4.1-py2.7.egg/mako/runtime.py", line 718, in _exec_template
callable_(context, *args, **kwargs)
File "/opt/galaxy/database/compiled_templates/workflow/editor_tool_form.mako.py", line 37, in render_body
'history_id' : trans.security.encode_id( trans.history.id ),
AttributeError: 'NoneType' object has no attribute 'id'
requests.packages.urllib3.connectionpool: DEBUG: "GET /api/workflows/917af94b51aeccc8/download?style=editor&key=08b6c93ec530f27b033916ff5a7dbf70 HTTP/1.1" 500 None
--------------------- >> end captured logging << ---------------------
|
AssertionError
|
def get_feature_meta(column, preprocessing_parameters, backend):
column = column.astype(str)
idx2str, str2idx, str2freq, max_size, _, _, _ = create_vocabulary(
column,
preprocessing_parameters["tokenizer"],
num_most_frequent=preprocessing_parameters["most_common"],
lowercase=preprocessing_parameters["lowercase"],
processor=backend.df_engine,
)
return {
"idx2str": idx2str,
"str2idx": str2idx,
"str2freq": str2freq,
"vocab_size": len(str2idx),
"max_set_size": max_size,
}
|
def get_feature_meta(column, preprocessing_parameters, backend):
idx2str, str2idx, str2freq, max_size, _, _, _ = create_vocabulary(
column,
preprocessing_parameters["tokenizer"],
num_most_frequent=preprocessing_parameters["most_common"],
lowercase=preprocessing_parameters["lowercase"],
processor=backend.df_engine,
)
return {
"idx2str": idx2str,
"str2idx": str2idx,
"str2freq": str2freq,
"vocab_size": len(str2idx),
"max_set_size": max_size,
}
|
https://github.com/ludwig-ai/ludwig/issues/1040
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-13-c2e6c8f1ae7c> in <module>()
10 skip_save_progress=True,
11 skip_save_model=False,
---> 12 skip_save_processed_input=True
13 )
/usr/local/lib/python3.6/dist-packages/ludwig/hyperopt/run.py in hyperopt(config, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, skip_save_unprocessed_output, skip_save_predictions, skip_save_eval_stats, skip_save_hyperopt_statistics, output_directory, gpus, gpu_memory_limit, allow_parallel_threads, use_horovod, random_seed, debug, **kwargs)
289 random_seed=random_seed,
290 debug=debug,
--> 291 **kwargs
292 )
293
/usr/local/lib/python3.6/dist-packages/ludwig/hyperopt/execution.py in execute(self, config, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, skip_save_unprocessed_output, skip_save_predictions, skip_save_eval_stats, output_directory, gpus, gpu_memory_limit, allow_parallel_threads, use_horovod, random_seed, debug, **kwargs)
149 skip_collect_overall_stats=False,
150 random_seed=random_seed,
--> 151 debug=debug,
152 )
153 metric_score = self.get_metric_score(eval_stats)
/usr/local/lib/python3.6/dist-packages/ludwig/api.py in experiment(self, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, model_load_path, model_resume_path, eval_split, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, skip_save_unprocessed_output, skip_save_predictions, skip_save_eval_stats, skip_collect_predictions, skip_collect_overall_stats, output_directory, random_seed, debug, **kwargs)
1054 output_directory=output_directory,
1055 random_seed=random_seed,
-> 1056 debug=debug,
1057 )
1058
/usr/local/lib/python3.6/dist-packages/ludwig/api.py in train(self, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, model_resume_path, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, output_directory, random_seed, debug, **kwargs)
415 random_seed=random_seed,
416 devbug=debug,
--> 417 **kwargs,
418 )
419 (training_set,
/usr/local/lib/python3.6/dist-packages/ludwig/api.py in preprocess(self, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, skip_save_processed_input, random_seed, debug, **kwargs)
1261 preprocessing_params=self.config[PREPROCESSING],
1262 backend=self.backend,
-> 1263 random_seed=random_seed
1264 )
1265
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in preprocess_for_training(config, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, skip_save_processed_input, preprocessing_params, backend, random_seed)
1395 preprocessing_params=preprocessing_params,
1396 backend=backend,
-> 1397 random_seed=random_seed
1398 )
1399 training_set, test_set, validation_set, training_set_metadata = processed
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in preprocess_for_training(features, dataset, training_set, validation_set, test_set, training_set_metadata, skip_save_processed_input, preprocessing_params, backend, random_seed)
183 preprocessing_params=preprocessing_params,
184 backend=backend,
--> 185 random_seed=random_seed
186 )
187
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in _preprocess_df_for_training(features, dataset, training_set, validation_set, test_set, training_set_metadata, preprocessing_params, backend, random_seed)
1645 metadata=training_set_metadata,
1646 random_seed=random_seed,
-> 1647 backend=backend
1648 )
1649
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in build_dataset(dataset_df, features, global_preprocessing_parameters, metadata, backend, random_seed)
1013 features,
1014 global_preprocessing_parameters,
-> 1015 backend
1016 )
1017
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in build_metadata(dataset_df, features, global_preprocessing_parameters, backend)
1122 column,
1123 preprocessing_parameters,
-> 1124 backend
1125 )
1126
/usr/local/lib/python3.6/dist-packages/ludwig/features/category_feature.py in get_feature_meta(column, preprocessing_parameters, backend)
65 lowercase=preprocessing_parameters['lowercase'],
66 add_padding=False,
---> 67 processor=backend.df_engine
68 )
69 return {
/usr/local/lib/python3.6/dist-packages/ludwig/utils/strings_utils.py in create_vocabulary(data, tokenizer_type, add_unknown, add_padding, lowercase, num_most_frequent, vocab_file, unknown_symbol, padding_symbol, pretrained_model_name_or_path, processor)
139 vocab = load_vocabulary(vocab_file)
140
--> 141 processed_lines = data.map(lambda line: tokenizer(line.lower() if lowercase else line))
142 processed_counts = processed_lines.explode().value_counts(sort=False)
143 processed_counts = processor.compute(processed_counts)
/usr/local/lib/python3.6/dist-packages/pandas/core/series.py in map(self, arg, na_action)
3980 dtype: object
3981 """
-> 3982 new_values = super()._map_values(arg, na_action=na_action)
3983 return self._constructor(new_values, index=self.index).__finalize__(
3984 self, method="map"
/usr/local/lib/python3.6/dist-packages/pandas/core/base.py in _map_values(self, mapper, na_action)
1158
1159 # mapper is a function
-> 1160 new_values = map_f(values, mapper)
1161
1162 return new_values
pandas/_libs/lib.pyx in pandas._libs.lib.map_infer()
/usr/local/lib/python3.6/dist-packages/ludwig/utils/strings_utils.py in <lambda>(line)
139 vocab = load_vocabulary(vocab_file)
140
--> 141 processed_lines = data.map(lambda line: tokenizer(line.lower() if lowercase else line))
142 processed_counts = processed_lines.explode().value_counts(sort=False)
143 processed_counts = processor.compute(processed_counts)
/usr/local/lib/python3.6/dist-packages/ludwig/utils/strings_utils.py in __call__(self, text)
305 class StrippedStringToListTokenizer(BaseTokenizer):
306 def __call__(self, text):
--> 307 return [text.strip()]
308
309
AttributeError: 'int' object has no attribute 'strip'
|
AttributeError
|
def get_feature_meta(column, preprocessing_parameters, backend):
column = column.astype(str)
idx2str, str2idx, str2freq, _, _, _, _ = create_vocabulary(
column,
"stripped",
num_most_frequent=preprocessing_parameters["most_common"],
lowercase=preprocessing_parameters["lowercase"],
add_padding=False,
processor=backend.df_engine,
)
return {
"idx2str": idx2str,
"str2idx": str2idx,
"str2freq": str2freq,
"vocab_size": len(str2idx),
}
|
def get_feature_meta(column, preprocessing_parameters, backend):
idx2str, str2idx, str2freq, _, _, _, _ = create_vocabulary(
column,
"stripped",
num_most_frequent=preprocessing_parameters["most_common"],
lowercase=preprocessing_parameters["lowercase"],
add_padding=False,
processor=backend.df_engine,
)
return {
"idx2str": idx2str,
"str2idx": str2idx,
"str2freq": str2freq,
"vocab_size": len(str2idx),
}
|
https://github.com/ludwig-ai/ludwig/issues/1040
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-13-c2e6c8f1ae7c> in <module>()
10 skip_save_progress=True,
11 skip_save_model=False,
---> 12 skip_save_processed_input=True
13 )
/usr/local/lib/python3.6/dist-packages/ludwig/hyperopt/run.py in hyperopt(config, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, skip_save_unprocessed_output, skip_save_predictions, skip_save_eval_stats, skip_save_hyperopt_statistics, output_directory, gpus, gpu_memory_limit, allow_parallel_threads, use_horovod, random_seed, debug, **kwargs)
289 random_seed=random_seed,
290 debug=debug,
--> 291 **kwargs
292 )
293
/usr/local/lib/python3.6/dist-packages/ludwig/hyperopt/execution.py in execute(self, config, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, skip_save_unprocessed_output, skip_save_predictions, skip_save_eval_stats, output_directory, gpus, gpu_memory_limit, allow_parallel_threads, use_horovod, random_seed, debug, **kwargs)
149 skip_collect_overall_stats=False,
150 random_seed=random_seed,
--> 151 debug=debug,
152 )
153 metric_score = self.get_metric_score(eval_stats)
/usr/local/lib/python3.6/dist-packages/ludwig/api.py in experiment(self, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, model_load_path, model_resume_path, eval_split, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, skip_save_unprocessed_output, skip_save_predictions, skip_save_eval_stats, skip_collect_predictions, skip_collect_overall_stats, output_directory, random_seed, debug, **kwargs)
1054 output_directory=output_directory,
1055 random_seed=random_seed,
-> 1056 debug=debug,
1057 )
1058
/usr/local/lib/python3.6/dist-packages/ludwig/api.py in train(self, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, model_resume_path, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, output_directory, random_seed, debug, **kwargs)
415 random_seed=random_seed,
416 devbug=debug,
--> 417 **kwargs,
418 )
419 (training_set,
/usr/local/lib/python3.6/dist-packages/ludwig/api.py in preprocess(self, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, skip_save_processed_input, random_seed, debug, **kwargs)
1261 preprocessing_params=self.config[PREPROCESSING],
1262 backend=self.backend,
-> 1263 random_seed=random_seed
1264 )
1265
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in preprocess_for_training(config, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, skip_save_processed_input, preprocessing_params, backend, random_seed)
1395 preprocessing_params=preprocessing_params,
1396 backend=backend,
-> 1397 random_seed=random_seed
1398 )
1399 training_set, test_set, validation_set, training_set_metadata = processed
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in preprocess_for_training(features, dataset, training_set, validation_set, test_set, training_set_metadata, skip_save_processed_input, preprocessing_params, backend, random_seed)
183 preprocessing_params=preprocessing_params,
184 backend=backend,
--> 185 random_seed=random_seed
186 )
187
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in _preprocess_df_for_training(features, dataset, training_set, validation_set, test_set, training_set_metadata, preprocessing_params, backend, random_seed)
1645 metadata=training_set_metadata,
1646 random_seed=random_seed,
-> 1647 backend=backend
1648 )
1649
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in build_dataset(dataset_df, features, global_preprocessing_parameters, metadata, backend, random_seed)
1013 features,
1014 global_preprocessing_parameters,
-> 1015 backend
1016 )
1017
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in build_metadata(dataset_df, features, global_preprocessing_parameters, backend)
1122 column,
1123 preprocessing_parameters,
-> 1124 backend
1125 )
1126
/usr/local/lib/python3.6/dist-packages/ludwig/features/category_feature.py in get_feature_meta(column, preprocessing_parameters, backend)
65 lowercase=preprocessing_parameters['lowercase'],
66 add_padding=False,
---> 67 processor=backend.df_engine
68 )
69 return {
/usr/local/lib/python3.6/dist-packages/ludwig/utils/strings_utils.py in create_vocabulary(data, tokenizer_type, add_unknown, add_padding, lowercase, num_most_frequent, vocab_file, unknown_symbol, padding_symbol, pretrained_model_name_or_path, processor)
139 vocab = load_vocabulary(vocab_file)
140
--> 141 processed_lines = data.map(lambda line: tokenizer(line.lower() if lowercase else line))
142 processed_counts = processed_lines.explode().value_counts(sort=False)
143 processed_counts = processor.compute(processed_counts)
/usr/local/lib/python3.6/dist-packages/pandas/core/series.py in map(self, arg, na_action)
3980 dtype: object
3981 """
-> 3982 new_values = super()._map_values(arg, na_action=na_action)
3983 return self._constructor(new_values, index=self.index).__finalize__(
3984 self, method="map"
/usr/local/lib/python3.6/dist-packages/pandas/core/base.py in _map_values(self, mapper, na_action)
1158
1159 # mapper is a function
-> 1160 new_values = map_f(values, mapper)
1161
1162 return new_values
pandas/_libs/lib.pyx in pandas._libs.lib.map_infer()
/usr/local/lib/python3.6/dist-packages/ludwig/utils/strings_utils.py in <lambda>(line)
139 vocab = load_vocabulary(vocab_file)
140
--> 141 processed_lines = data.map(lambda line: tokenizer(line.lower() if lowercase else line))
142 processed_counts = processed_lines.explode().value_counts(sort=False)
143 processed_counts = processor.compute(processed_counts)
/usr/local/lib/python3.6/dist-packages/ludwig/utils/strings_utils.py in __call__(self, text)
305 class StrippedStringToListTokenizer(BaseTokenizer):
306 def __call__(self, text):
--> 307 return [text.strip()]
308
309
AttributeError: 'int' object has no attribute 'strip'
|
AttributeError
|
def get_feature_meta(column, preprocessing_parameters, backend):
column = column.astype(str)
idx2str, str2idx, str2freq, max_length, _, _, _ = create_vocabulary(
column,
preprocessing_parameters["tokenizer"],
lowercase=preprocessing_parameters["lowercase"],
num_most_frequent=preprocessing_parameters["most_common"],
vocab_file=preprocessing_parameters["vocab_file"],
unknown_symbol=preprocessing_parameters["unknown_symbol"],
padding_symbol=preprocessing_parameters["padding_symbol"],
processor=backend.df_engine,
)
max_length = min(preprocessing_parameters["sequence_length_limit"], max_length)
return {
"idx2str": idx2str,
"str2idx": str2idx,
"str2freq": str2freq,
"vocab_size": len(idx2str),
"max_sequence_length": max_length,
}
|
def get_feature_meta(column, preprocessing_parameters, backend):
idx2str, str2idx, str2freq, max_length, _, _, _ = create_vocabulary(
column,
preprocessing_parameters["tokenizer"],
lowercase=preprocessing_parameters["lowercase"],
num_most_frequent=preprocessing_parameters["most_common"],
vocab_file=preprocessing_parameters["vocab_file"],
unknown_symbol=preprocessing_parameters["unknown_symbol"],
padding_symbol=preprocessing_parameters["padding_symbol"],
processor=backend.df_engine,
)
max_length = min(preprocessing_parameters["sequence_length_limit"], max_length)
return {
"idx2str": idx2str,
"str2idx": str2idx,
"str2freq": str2freq,
"vocab_size": len(idx2str),
"max_sequence_length": max_length,
}
|
https://github.com/ludwig-ai/ludwig/issues/1040
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-13-c2e6c8f1ae7c> in <module>()
10 skip_save_progress=True,
11 skip_save_model=False,
---> 12 skip_save_processed_input=True
13 )
/usr/local/lib/python3.6/dist-packages/ludwig/hyperopt/run.py in hyperopt(config, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, skip_save_unprocessed_output, skip_save_predictions, skip_save_eval_stats, skip_save_hyperopt_statistics, output_directory, gpus, gpu_memory_limit, allow_parallel_threads, use_horovod, random_seed, debug, **kwargs)
289 random_seed=random_seed,
290 debug=debug,
--> 291 **kwargs
292 )
293
/usr/local/lib/python3.6/dist-packages/ludwig/hyperopt/execution.py in execute(self, config, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, skip_save_unprocessed_output, skip_save_predictions, skip_save_eval_stats, output_directory, gpus, gpu_memory_limit, allow_parallel_threads, use_horovod, random_seed, debug, **kwargs)
149 skip_collect_overall_stats=False,
150 random_seed=random_seed,
--> 151 debug=debug,
152 )
153 metric_score = self.get_metric_score(eval_stats)
/usr/local/lib/python3.6/dist-packages/ludwig/api.py in experiment(self, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, model_load_path, model_resume_path, eval_split, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, skip_save_unprocessed_output, skip_save_predictions, skip_save_eval_stats, skip_collect_predictions, skip_collect_overall_stats, output_directory, random_seed, debug, **kwargs)
1054 output_directory=output_directory,
1055 random_seed=random_seed,
-> 1056 debug=debug,
1057 )
1058
/usr/local/lib/python3.6/dist-packages/ludwig/api.py in train(self, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, model_resume_path, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, output_directory, random_seed, debug, **kwargs)
415 random_seed=random_seed,
416 devbug=debug,
--> 417 **kwargs,
418 )
419 (training_set,
/usr/local/lib/python3.6/dist-packages/ludwig/api.py in preprocess(self, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, skip_save_processed_input, random_seed, debug, **kwargs)
1261 preprocessing_params=self.config[PREPROCESSING],
1262 backend=self.backend,
-> 1263 random_seed=random_seed
1264 )
1265
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in preprocess_for_training(config, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, skip_save_processed_input, preprocessing_params, backend, random_seed)
1395 preprocessing_params=preprocessing_params,
1396 backend=backend,
-> 1397 random_seed=random_seed
1398 )
1399 training_set, test_set, validation_set, training_set_metadata = processed
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in preprocess_for_training(features, dataset, training_set, validation_set, test_set, training_set_metadata, skip_save_processed_input, preprocessing_params, backend, random_seed)
183 preprocessing_params=preprocessing_params,
184 backend=backend,
--> 185 random_seed=random_seed
186 )
187
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in _preprocess_df_for_training(features, dataset, training_set, validation_set, test_set, training_set_metadata, preprocessing_params, backend, random_seed)
1645 metadata=training_set_metadata,
1646 random_seed=random_seed,
-> 1647 backend=backend
1648 )
1649
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in build_dataset(dataset_df, features, global_preprocessing_parameters, metadata, backend, random_seed)
1013 features,
1014 global_preprocessing_parameters,
-> 1015 backend
1016 )
1017
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in build_metadata(dataset_df, features, global_preprocessing_parameters, backend)
1122 column,
1123 preprocessing_parameters,
-> 1124 backend
1125 )
1126
/usr/local/lib/python3.6/dist-packages/ludwig/features/category_feature.py in get_feature_meta(column, preprocessing_parameters, backend)
65 lowercase=preprocessing_parameters['lowercase'],
66 add_padding=False,
---> 67 processor=backend.df_engine
68 )
69 return {
/usr/local/lib/python3.6/dist-packages/ludwig/utils/strings_utils.py in create_vocabulary(data, tokenizer_type, add_unknown, add_padding, lowercase, num_most_frequent, vocab_file, unknown_symbol, padding_symbol, pretrained_model_name_or_path, processor)
139 vocab = load_vocabulary(vocab_file)
140
--> 141 processed_lines = data.map(lambda line: tokenizer(line.lower() if lowercase else line))
142 processed_counts = processed_lines.explode().value_counts(sort=False)
143 processed_counts = processor.compute(processed_counts)
/usr/local/lib/python3.6/dist-packages/pandas/core/series.py in map(self, arg, na_action)
3980 dtype: object
3981 """
-> 3982 new_values = super()._map_values(arg, na_action=na_action)
3983 return self._constructor(new_values, index=self.index).__finalize__(
3984 self, method="map"
/usr/local/lib/python3.6/dist-packages/pandas/core/base.py in _map_values(self, mapper, na_action)
1158
1159 # mapper is a function
-> 1160 new_values = map_f(values, mapper)
1161
1162 return new_values
pandas/_libs/lib.pyx in pandas._libs.lib.map_infer()
/usr/local/lib/python3.6/dist-packages/ludwig/utils/strings_utils.py in <lambda>(line)
139 vocab = load_vocabulary(vocab_file)
140
--> 141 processed_lines = data.map(lambda line: tokenizer(line.lower() if lowercase else line))
142 processed_counts = processed_lines.explode().value_counts(sort=False)
143 processed_counts = processor.compute(processed_counts)
/usr/local/lib/python3.6/dist-packages/ludwig/utils/strings_utils.py in __call__(self, text)
305 class StrippedStringToListTokenizer(BaseTokenizer):
306 def __call__(self, text):
--> 307 return [text.strip()]
308
309
AttributeError: 'int' object has no attribute 'strip'
|
AttributeError
|
def get_feature_meta(column, preprocessing_parameters, backend):
column = column.astype(str)
idx2str, str2idx, str2freq, max_size, _, _, _ = create_vocabulary(
column,
preprocessing_parameters["tokenizer"],
num_most_frequent=preprocessing_parameters["most_common"],
lowercase=preprocessing_parameters["lowercase"],
processor=backend.df_engine,
)
return {
"idx2str": idx2str,
"str2idx": str2idx,
"str2freq": str2freq,
"vocab_size": len(str2idx),
"max_set_size": max_size,
}
|
def get_feature_meta(column, preprocessing_parameters, backend):
idx2str, str2idx, str2freq, max_size, _, _, _ = create_vocabulary(
column,
preprocessing_parameters["tokenizer"],
num_most_frequent=preprocessing_parameters["most_common"],
lowercase=preprocessing_parameters["lowercase"],
processor=backend.df_engine,
)
return {
"idx2str": idx2str,
"str2idx": str2idx,
"str2freq": str2freq,
"vocab_size": len(str2idx),
"max_set_size": max_size,
}
|
https://github.com/ludwig-ai/ludwig/issues/1040
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-13-c2e6c8f1ae7c> in <module>()
10 skip_save_progress=True,
11 skip_save_model=False,
---> 12 skip_save_processed_input=True
13 )
/usr/local/lib/python3.6/dist-packages/ludwig/hyperopt/run.py in hyperopt(config, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, skip_save_unprocessed_output, skip_save_predictions, skip_save_eval_stats, skip_save_hyperopt_statistics, output_directory, gpus, gpu_memory_limit, allow_parallel_threads, use_horovod, random_seed, debug, **kwargs)
289 random_seed=random_seed,
290 debug=debug,
--> 291 **kwargs
292 )
293
/usr/local/lib/python3.6/dist-packages/ludwig/hyperopt/execution.py in execute(self, config, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, skip_save_unprocessed_output, skip_save_predictions, skip_save_eval_stats, output_directory, gpus, gpu_memory_limit, allow_parallel_threads, use_horovod, random_seed, debug, **kwargs)
149 skip_collect_overall_stats=False,
150 random_seed=random_seed,
--> 151 debug=debug,
152 )
153 metric_score = self.get_metric_score(eval_stats)
/usr/local/lib/python3.6/dist-packages/ludwig/api.py in experiment(self, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, model_load_path, model_resume_path, eval_split, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, skip_save_unprocessed_output, skip_save_predictions, skip_save_eval_stats, skip_collect_predictions, skip_collect_overall_stats, output_directory, random_seed, debug, **kwargs)
1054 output_directory=output_directory,
1055 random_seed=random_seed,
-> 1056 debug=debug,
1057 )
1058
/usr/local/lib/python3.6/dist-packages/ludwig/api.py in train(self, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, model_resume_path, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, output_directory, random_seed, debug, **kwargs)
415 random_seed=random_seed,
416 devbug=debug,
--> 417 **kwargs,
418 )
419 (training_set,
/usr/local/lib/python3.6/dist-packages/ludwig/api.py in preprocess(self, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, skip_save_processed_input, random_seed, debug, **kwargs)
1261 preprocessing_params=self.config[PREPROCESSING],
1262 backend=self.backend,
-> 1263 random_seed=random_seed
1264 )
1265
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in preprocess_for_training(config, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, skip_save_processed_input, preprocessing_params, backend, random_seed)
1395 preprocessing_params=preprocessing_params,
1396 backend=backend,
-> 1397 random_seed=random_seed
1398 )
1399 training_set, test_set, validation_set, training_set_metadata = processed
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in preprocess_for_training(features, dataset, training_set, validation_set, test_set, training_set_metadata, skip_save_processed_input, preprocessing_params, backend, random_seed)
183 preprocessing_params=preprocessing_params,
184 backend=backend,
--> 185 random_seed=random_seed
186 )
187
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in _preprocess_df_for_training(features, dataset, training_set, validation_set, test_set, training_set_metadata, preprocessing_params, backend, random_seed)
1645 metadata=training_set_metadata,
1646 random_seed=random_seed,
-> 1647 backend=backend
1648 )
1649
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in build_dataset(dataset_df, features, global_preprocessing_parameters, metadata, backend, random_seed)
1013 features,
1014 global_preprocessing_parameters,
-> 1015 backend
1016 )
1017
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in build_metadata(dataset_df, features, global_preprocessing_parameters, backend)
1122 column,
1123 preprocessing_parameters,
-> 1124 backend
1125 )
1126
/usr/local/lib/python3.6/dist-packages/ludwig/features/category_feature.py in get_feature_meta(column, preprocessing_parameters, backend)
65 lowercase=preprocessing_parameters['lowercase'],
66 add_padding=False,
---> 67 processor=backend.df_engine
68 )
69 return {
/usr/local/lib/python3.6/dist-packages/ludwig/utils/strings_utils.py in create_vocabulary(data, tokenizer_type, add_unknown, add_padding, lowercase, num_most_frequent, vocab_file, unknown_symbol, padding_symbol, pretrained_model_name_or_path, processor)
139 vocab = load_vocabulary(vocab_file)
140
--> 141 processed_lines = data.map(lambda line: tokenizer(line.lower() if lowercase else line))
142 processed_counts = processed_lines.explode().value_counts(sort=False)
143 processed_counts = processor.compute(processed_counts)
/usr/local/lib/python3.6/dist-packages/pandas/core/series.py in map(self, arg, na_action)
3980 dtype: object
3981 """
-> 3982 new_values = super()._map_values(arg, na_action=na_action)
3983 return self._constructor(new_values, index=self.index).__finalize__(
3984 self, method="map"
/usr/local/lib/python3.6/dist-packages/pandas/core/base.py in _map_values(self, mapper, na_action)
1158
1159 # mapper is a function
-> 1160 new_values = map_f(values, mapper)
1161
1162 return new_values
pandas/_libs/lib.pyx in pandas._libs.lib.map_infer()
/usr/local/lib/python3.6/dist-packages/ludwig/utils/strings_utils.py in <lambda>(line)
139 vocab = load_vocabulary(vocab_file)
140
--> 141 processed_lines = data.map(lambda line: tokenizer(line.lower() if lowercase else line))
142 processed_counts = processed_lines.explode().value_counts(sort=False)
143 processed_counts = processor.compute(processed_counts)
/usr/local/lib/python3.6/dist-packages/ludwig/utils/strings_utils.py in __call__(self, text)
305 class StrippedStringToListTokenizer(BaseTokenizer):
306 def __call__(self, text):
--> 307 return [text.strip()]
308
309
AttributeError: 'int' object has no attribute 'strip'
|
AttributeError
|
def get_feature_meta(column, preprocessing_parameters, backend):
column = column.astype(str)
tf_meta = TextFeatureMixin.feature_meta(column, preprocessing_parameters, backend)
(
char_idx2str,
char_str2idx,
char_str2freq,
char_max_len,
char_pad_idx,
char_pad_symbol,
char_unk_symbol,
word_idx2str,
word_str2idx,
word_str2freq,
word_max_len,
word_pad_idx,
word_pad_symbol,
word_unk_symbol,
) = tf_meta
char_max_len = min(
preprocessing_parameters["char_sequence_length_limit"], char_max_len
)
word_max_len = min(
preprocessing_parameters["word_sequence_length_limit"], word_max_len
)
return {
"char_idx2str": char_idx2str,
"char_str2idx": char_str2idx,
"char_str2freq": char_str2freq,
"char_vocab_size": len(char_idx2str),
"char_max_sequence_length": char_max_len,
"char_pad_idx": char_pad_idx,
"char_pad_symbol": char_pad_symbol,
"char_unk_symbol": char_unk_symbol,
"word_idx2str": word_idx2str,
"word_str2idx": word_str2idx,
"word_str2freq": word_str2freq,
"word_vocab_size": len(word_idx2str),
"word_max_sequence_length": word_max_len,
"word_pad_idx": word_pad_idx,
"word_pad_symbol": word_pad_symbol,
"word_unk_symbol": word_unk_symbol,
}
|
def get_feature_meta(column, preprocessing_parameters, backend):
tf_meta = TextFeatureMixin.feature_meta(column, preprocessing_parameters, backend)
(
char_idx2str,
char_str2idx,
char_str2freq,
char_max_len,
char_pad_idx,
char_pad_symbol,
char_unk_symbol,
word_idx2str,
word_str2idx,
word_str2freq,
word_max_len,
word_pad_idx,
word_pad_symbol,
word_unk_symbol,
) = tf_meta
char_max_len = min(
preprocessing_parameters["char_sequence_length_limit"], char_max_len
)
word_max_len = min(
preprocessing_parameters["word_sequence_length_limit"], word_max_len
)
return {
"char_idx2str": char_idx2str,
"char_str2idx": char_str2idx,
"char_str2freq": char_str2freq,
"char_vocab_size": len(char_idx2str),
"char_max_sequence_length": char_max_len,
"char_pad_idx": char_pad_idx,
"char_pad_symbol": char_pad_symbol,
"char_unk_symbol": char_unk_symbol,
"word_idx2str": word_idx2str,
"word_str2idx": word_str2idx,
"word_str2freq": word_str2freq,
"word_vocab_size": len(word_idx2str),
"word_max_sequence_length": word_max_len,
"word_pad_idx": word_pad_idx,
"word_pad_symbol": word_pad_symbol,
"word_unk_symbol": word_unk_symbol,
}
|
https://github.com/ludwig-ai/ludwig/issues/1040
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-13-c2e6c8f1ae7c> in <module>()
10 skip_save_progress=True,
11 skip_save_model=False,
---> 12 skip_save_processed_input=True
13 )
/usr/local/lib/python3.6/dist-packages/ludwig/hyperopt/run.py in hyperopt(config, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, skip_save_unprocessed_output, skip_save_predictions, skip_save_eval_stats, skip_save_hyperopt_statistics, output_directory, gpus, gpu_memory_limit, allow_parallel_threads, use_horovod, random_seed, debug, **kwargs)
289 random_seed=random_seed,
290 debug=debug,
--> 291 **kwargs
292 )
293
/usr/local/lib/python3.6/dist-packages/ludwig/hyperopt/execution.py in execute(self, config, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, skip_save_unprocessed_output, skip_save_predictions, skip_save_eval_stats, output_directory, gpus, gpu_memory_limit, allow_parallel_threads, use_horovod, random_seed, debug, **kwargs)
149 skip_collect_overall_stats=False,
150 random_seed=random_seed,
--> 151 debug=debug,
152 )
153 metric_score = self.get_metric_score(eval_stats)
/usr/local/lib/python3.6/dist-packages/ludwig/api.py in experiment(self, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, model_load_path, model_resume_path, eval_split, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, skip_save_unprocessed_output, skip_save_predictions, skip_save_eval_stats, skip_collect_predictions, skip_collect_overall_stats, output_directory, random_seed, debug, **kwargs)
1054 output_directory=output_directory,
1055 random_seed=random_seed,
-> 1056 debug=debug,
1057 )
1058
/usr/local/lib/python3.6/dist-packages/ludwig/api.py in train(self, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, model_resume_path, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, output_directory, random_seed, debug, **kwargs)
415 random_seed=random_seed,
416 devbug=debug,
--> 417 **kwargs,
418 )
419 (training_set,
/usr/local/lib/python3.6/dist-packages/ludwig/api.py in preprocess(self, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, skip_save_processed_input, random_seed, debug, **kwargs)
1261 preprocessing_params=self.config[PREPROCESSING],
1262 backend=self.backend,
-> 1263 random_seed=random_seed
1264 )
1265
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in preprocess_for_training(config, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, skip_save_processed_input, preprocessing_params, backend, random_seed)
1395 preprocessing_params=preprocessing_params,
1396 backend=backend,
-> 1397 random_seed=random_seed
1398 )
1399 training_set, test_set, validation_set, training_set_metadata = processed
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in preprocess_for_training(features, dataset, training_set, validation_set, test_set, training_set_metadata, skip_save_processed_input, preprocessing_params, backend, random_seed)
183 preprocessing_params=preprocessing_params,
184 backend=backend,
--> 185 random_seed=random_seed
186 )
187
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in _preprocess_df_for_training(features, dataset, training_set, validation_set, test_set, training_set_metadata, preprocessing_params, backend, random_seed)
1645 metadata=training_set_metadata,
1646 random_seed=random_seed,
-> 1647 backend=backend
1648 )
1649
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in build_dataset(dataset_df, features, global_preprocessing_parameters, metadata, backend, random_seed)
1013 features,
1014 global_preprocessing_parameters,
-> 1015 backend
1016 )
1017
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in build_metadata(dataset_df, features, global_preprocessing_parameters, backend)
1122 column,
1123 preprocessing_parameters,
-> 1124 backend
1125 )
1126
/usr/local/lib/python3.6/dist-packages/ludwig/features/category_feature.py in get_feature_meta(column, preprocessing_parameters, backend)
65 lowercase=preprocessing_parameters['lowercase'],
66 add_padding=False,
---> 67 processor=backend.df_engine
68 )
69 return {
/usr/local/lib/python3.6/dist-packages/ludwig/utils/strings_utils.py in create_vocabulary(data, tokenizer_type, add_unknown, add_padding, lowercase, num_most_frequent, vocab_file, unknown_symbol, padding_symbol, pretrained_model_name_or_path, processor)
139 vocab = load_vocabulary(vocab_file)
140
--> 141 processed_lines = data.map(lambda line: tokenizer(line.lower() if lowercase else line))
142 processed_counts = processed_lines.explode().value_counts(sort=False)
143 processed_counts = processor.compute(processed_counts)
/usr/local/lib/python3.6/dist-packages/pandas/core/series.py in map(self, arg, na_action)
3980 dtype: object
3981 """
-> 3982 new_values = super()._map_values(arg, na_action=na_action)
3983 return self._constructor(new_values, index=self.index).__finalize__(
3984 self, method="map"
/usr/local/lib/python3.6/dist-packages/pandas/core/base.py in _map_values(self, mapper, na_action)
1158
1159 # mapper is a function
-> 1160 new_values = map_f(values, mapper)
1161
1162 return new_values
pandas/_libs/lib.pyx in pandas._libs.lib.map_infer()
/usr/local/lib/python3.6/dist-packages/ludwig/utils/strings_utils.py in <lambda>(line)
139 vocab = load_vocabulary(vocab_file)
140
--> 141 processed_lines = data.map(lambda line: tokenizer(line.lower() if lowercase else line))
142 processed_counts = processed_lines.explode().value_counts(sort=False)
143 processed_counts = processor.compute(processed_counts)
/usr/local/lib/python3.6/dist-packages/ludwig/utils/strings_utils.py in __call__(self, text)
305 class StrippedStringToListTokenizer(BaseTokenizer):
306 def __call__(self, text):
--> 307 return [text.strip()]
308
309
AttributeError: 'int' object has no attribute 'strip'
|
AttributeError
|
def get_feature_meta(column, preprocessing_parameters, backend):
column = column.astype(str)
tokenizer = get_from_registry(
preprocessing_parameters["tokenizer"], tokenizer_registry
)()
max_length = 0
for timeseries in column:
processed_line = tokenizer(timeseries)
max_length = max(max_length, len(processed_line))
max_length = min(preprocessing_parameters["timeseries_length_limit"], max_length)
return {"max_timeseries_length": max_length}
|
def get_feature_meta(column, preprocessing_parameters, backend):
tokenizer = get_from_registry(
preprocessing_parameters["tokenizer"], tokenizer_registry
)()
max_length = 0
for timeseries in column:
processed_line = tokenizer(timeseries)
max_length = max(max_length, len(processed_line))
max_length = min(preprocessing_parameters["timeseries_length_limit"], max_length)
return {"max_timeseries_length": max_length}
|
https://github.com/ludwig-ai/ludwig/issues/1040
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-13-c2e6c8f1ae7c> in <module>()
10 skip_save_progress=True,
11 skip_save_model=False,
---> 12 skip_save_processed_input=True
13 )
/usr/local/lib/python3.6/dist-packages/ludwig/hyperopt/run.py in hyperopt(config, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, skip_save_unprocessed_output, skip_save_predictions, skip_save_eval_stats, skip_save_hyperopt_statistics, output_directory, gpus, gpu_memory_limit, allow_parallel_threads, use_horovod, random_seed, debug, **kwargs)
289 random_seed=random_seed,
290 debug=debug,
--> 291 **kwargs
292 )
293
/usr/local/lib/python3.6/dist-packages/ludwig/hyperopt/execution.py in execute(self, config, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, skip_save_unprocessed_output, skip_save_predictions, skip_save_eval_stats, output_directory, gpus, gpu_memory_limit, allow_parallel_threads, use_horovod, random_seed, debug, **kwargs)
149 skip_collect_overall_stats=False,
150 random_seed=random_seed,
--> 151 debug=debug,
152 )
153 metric_score = self.get_metric_score(eval_stats)
/usr/local/lib/python3.6/dist-packages/ludwig/api.py in experiment(self, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, model_load_path, model_resume_path, eval_split, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, skip_save_unprocessed_output, skip_save_predictions, skip_save_eval_stats, skip_collect_predictions, skip_collect_overall_stats, output_directory, random_seed, debug, **kwargs)
1054 output_directory=output_directory,
1055 random_seed=random_seed,
-> 1056 debug=debug,
1057 )
1058
/usr/local/lib/python3.6/dist-packages/ludwig/api.py in train(self, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, experiment_name, model_name, model_resume_path, skip_save_training_description, skip_save_training_statistics, skip_save_model, skip_save_progress, skip_save_log, skip_save_processed_input, output_directory, random_seed, debug, **kwargs)
415 random_seed=random_seed,
416 devbug=debug,
--> 417 **kwargs,
418 )
419 (training_set,
/usr/local/lib/python3.6/dist-packages/ludwig/api.py in preprocess(self, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, skip_save_processed_input, random_seed, debug, **kwargs)
1261 preprocessing_params=self.config[PREPROCESSING],
1262 backend=self.backend,
-> 1263 random_seed=random_seed
1264 )
1265
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in preprocess_for_training(config, dataset, training_set, validation_set, test_set, training_set_metadata, data_format, skip_save_processed_input, preprocessing_params, backend, random_seed)
1395 preprocessing_params=preprocessing_params,
1396 backend=backend,
-> 1397 random_seed=random_seed
1398 )
1399 training_set, test_set, validation_set, training_set_metadata = processed
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in preprocess_for_training(features, dataset, training_set, validation_set, test_set, training_set_metadata, skip_save_processed_input, preprocessing_params, backend, random_seed)
183 preprocessing_params=preprocessing_params,
184 backend=backend,
--> 185 random_seed=random_seed
186 )
187
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in _preprocess_df_for_training(features, dataset, training_set, validation_set, test_set, training_set_metadata, preprocessing_params, backend, random_seed)
1645 metadata=training_set_metadata,
1646 random_seed=random_seed,
-> 1647 backend=backend
1648 )
1649
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in build_dataset(dataset_df, features, global_preprocessing_parameters, metadata, backend, random_seed)
1013 features,
1014 global_preprocessing_parameters,
-> 1015 backend
1016 )
1017
/usr/local/lib/python3.6/dist-packages/ludwig/data/preprocessing.py in build_metadata(dataset_df, features, global_preprocessing_parameters, backend)
1122 column,
1123 preprocessing_parameters,
-> 1124 backend
1125 )
1126
/usr/local/lib/python3.6/dist-packages/ludwig/features/category_feature.py in get_feature_meta(column, preprocessing_parameters, backend)
65 lowercase=preprocessing_parameters['lowercase'],
66 add_padding=False,
---> 67 processor=backend.df_engine
68 )
69 return {
/usr/local/lib/python3.6/dist-packages/ludwig/utils/strings_utils.py in create_vocabulary(data, tokenizer_type, add_unknown, add_padding, lowercase, num_most_frequent, vocab_file, unknown_symbol, padding_symbol, pretrained_model_name_or_path, processor)
139 vocab = load_vocabulary(vocab_file)
140
--> 141 processed_lines = data.map(lambda line: tokenizer(line.lower() if lowercase else line))
142 processed_counts = processed_lines.explode().value_counts(sort=False)
143 processed_counts = processor.compute(processed_counts)
/usr/local/lib/python3.6/dist-packages/pandas/core/series.py in map(self, arg, na_action)
3980 dtype: object
3981 """
-> 3982 new_values = super()._map_values(arg, na_action=na_action)
3983 return self._constructor(new_values, index=self.index).__finalize__(
3984 self, method="map"
/usr/local/lib/python3.6/dist-packages/pandas/core/base.py in _map_values(self, mapper, na_action)
1158
1159 # mapper is a function
-> 1160 new_values = map_f(values, mapper)
1161
1162 return new_values
pandas/_libs/lib.pyx in pandas._libs.lib.map_infer()
/usr/local/lib/python3.6/dist-packages/ludwig/utils/strings_utils.py in <lambda>(line)
139 vocab = load_vocabulary(vocab_file)
140
--> 141 processed_lines = data.map(lambda line: tokenizer(line.lower() if lowercase else line))
142 processed_counts = processed_lines.explode().value_counts(sort=False)
143 processed_counts = processor.compute(processed_counts)
/usr/local/lib/python3.6/dist-packages/ludwig/utils/strings_utils.py in __call__(self, text)
305 class StrippedStringToListTokenizer(BaseTokenizer):
306 def __call__(self, text):
--> 307 return [text.strip()]
308
309
AttributeError: 'int' object has no attribute 'strip'
|
AttributeError
|
def decoder_teacher_forcing(self, encoder_output, target=None, encoder_end_state=None):
# ================ Setup ================
batch_size = tf.shape(encoder_output)[0]
# Prepare target for decoding
target_sequence_length = sequence_length_2D(target)
start_tokens = tf.tile([self.GO_SYMBOL], [batch_size])
end_tokens = tf.tile([self.END_SYMBOL], [batch_size])
if self.is_timeseries:
start_tokens = tf.cast(start_tokens, tf.float32)
end_tokens = tf.cast(end_tokens, tf.float32)
targets_with_go_and_eos = tf.concat(
[
tf.expand_dims(start_tokens, 1),
target, # right now cast to tf.int32, fails if tf.int64
tf.expand_dims(end_tokens, 1),
],
1,
)
target_sequence_length_with_eos = target_sequence_length + 1
# Decoder Embeddings
decoder_emb_inp = self.decoder_embedding(targets_with_go_and_eos)
# Setting up decoder memory from encoder output
if self.attention_mechanism is not None:
encoder_sequence_length = sequence_length_3D(encoder_output)
self.attention_mechanism.setup_memory(
encoder_output, memory_sequence_length=encoder_sequence_length
)
decoder_initial_state = self.build_decoder_initial_state(
batch_size, encoder_state=encoder_end_state, dtype=tf.float32
)
decoder = tfa.seq2seq.BasicDecoder(
self.decoder_rnncell, sampler=self.sampler, output_layer=self.dense_layer
)
# BasicDecoderOutput
outputs, final_state, generated_sequence_lengths = decoder(
decoder_emb_inp,
initial_state=decoder_initial_state,
sequence_length=target_sequence_length_with_eos,
)
logits = outputs.rnn_output
# mask = tf.sequence_mask(
# generated_sequence_lengths,
# maxlen=tf.shape(logits)[1],
# dtype=tf.float32
# )
# logits = logits * mask[:, :, tf.newaxis]
# append a trailing 0, useful for
# those datapoints that reach maximum length
# and don't have a eos at the end
logits = tf.pad(logits, [[0, 0], [0, 1], [0, 0]])
return logits # , outputs, final_state, generated_sequence_lengths
|
def decoder_teacher_forcing(self, encoder_output, target=None, encoder_end_state=None):
# ================ Setup ================
batch_size = encoder_output.shape[0]
# Prepare target for decoding
target_sequence_length = sequence_length_2D(target)
start_tokens = tf.tile([self.GO_SYMBOL], [batch_size])
end_tokens = tf.tile([self.END_SYMBOL], [batch_size])
if self.is_timeseries:
start_tokens = tf.cast(start_tokens, tf.float32)
end_tokens = tf.cast(end_tokens, tf.float32)
targets_with_go_and_eos = tf.concat(
[
tf.expand_dims(start_tokens, 1),
target, # right now cast to tf.int32, fails if tf.int64
tf.expand_dims(end_tokens, 1),
],
1,
)
target_sequence_length_with_eos = target_sequence_length + 1
# Decoder Embeddings
decoder_emb_inp = self.decoder_embedding(targets_with_go_and_eos)
# Setting up decoder memory from encoder output
if self.attention_mechanism is not None:
encoder_sequence_length = sequence_length_3D(encoder_output)
self.attention_mechanism.setup_memory(
encoder_output, memory_sequence_length=encoder_sequence_length
)
decoder_initial_state = self.build_decoder_initial_state(
batch_size, encoder_state=encoder_end_state, dtype=tf.float32
)
decoder = tfa.seq2seq.BasicDecoder(
self.decoder_rnncell, sampler=self.sampler, output_layer=self.dense_layer
)
# BasicDecoderOutput
outputs, final_state, generated_sequence_lengths = decoder(
decoder_emb_inp,
initial_state=decoder_initial_state,
sequence_length=target_sequence_length_with_eos,
)
logits = outputs.rnn_output
# mask = tf.sequence_mask(
# generated_sequence_lengths,
# maxlen=tf.shape(logits)[1],
# dtype=tf.float32
# )
# logits = logits * mask[:, :, tf.newaxis]
# append a trailing 0, useful for
# those datapoints that reach maximum length
# and don't have a eos at the end
logits = tf.pad(logits, [[0, 0], [0, 1], [0, 0]])
return logits # , outputs, final_state, generated_sequence_lengths
|
https://github.com/ludwig-ai/ludwig/issues/960
|
2020-10-19 03:06:47.001119: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version
Traceback (most recent call last):
File "train.py", line 78, in <module>
train(args)
File "train.py", line 41, in train
ludwig_model.save_savedmodel(args.model_dir)
File "/usr/local/lib/python3.6/dist-packages/ludwig/api.py", line 1445, in save_savedmodel
self.model.save_savedmodel(save_path)
File "/usr/local/lib/python3.6/dist-packages/ludwig/models/ecd.py", line 82, in save_savedmodel
keras_model = self.get_connected_model(training=False)
File "/usr/local/lib/python3.6/dist-packages/ludwig/models/ecd.py", line 77, in get_connected_model
inputs = inputs or self.get_model_inputs(training)
File "/usr/local/lib/python3.6/dist-packages/ludwig/models/ecd.py", line 63, in get_model_inputs
self.input_features.items()
File "/usr/local/lib/python3.6/dist-packages/ludwig/models/ecd.py", line 62, in <dictcomp>
for input_feature_name, input_feature in
File "/usr/local/lib/python3.6/dist-packages/ludwig/features/base_feature.py", line 70, in create_input
return tf.keras.Input(shape=self.get_input_shape(),
File "/usr/local/lib/python3.6/dist-packages/ludwig/features/set_feature.py", line 127, in get_input_shape
return len(self.vocab),
AttributeError: 'SetInputFeature' object has no attribute 'vocab'
|
AttributeError
|
def logits(self, inputs, target=None, training=None):
if training and target is not None:
return self.decoder_obj._logits_training(
inputs, target=tf.cast(target, dtype=tf.int32), training=training
)
else:
return inputs
|
def logits(self, inputs, target=None, training=None):
if training:
return self.decoder_obj._logits_training(
inputs, target=tf.cast(target, dtype=tf.int32), training=training
)
else:
return inputs
|
https://github.com/ludwig-ai/ludwig/issues/960
|
2020-10-19 03:06:47.001119: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version
Traceback (most recent call last):
File "train.py", line 78, in <module>
train(args)
File "train.py", line 41, in train
ludwig_model.save_savedmodel(args.model_dir)
File "/usr/local/lib/python3.6/dist-packages/ludwig/api.py", line 1445, in save_savedmodel
self.model.save_savedmodel(save_path)
File "/usr/local/lib/python3.6/dist-packages/ludwig/models/ecd.py", line 82, in save_savedmodel
keras_model = self.get_connected_model(training=False)
File "/usr/local/lib/python3.6/dist-packages/ludwig/models/ecd.py", line 77, in get_connected_model
inputs = inputs or self.get_model_inputs(training)
File "/usr/local/lib/python3.6/dist-packages/ludwig/models/ecd.py", line 63, in get_model_inputs
self.input_features.items()
File "/usr/local/lib/python3.6/dist-packages/ludwig/models/ecd.py", line 62, in <dictcomp>
for input_feature_name, input_feature in
File "/usr/local/lib/python3.6/dist-packages/ludwig/features/base_feature.py", line 70, in create_input
return tf.keras.Input(shape=self.get_input_shape(),
File "/usr/local/lib/python3.6/dist-packages/ludwig/features/set_feature.py", line 127, in get_input_shape
return len(self.vocab),
AttributeError: 'SetInputFeature' object has no attribute 'vocab'
|
AttributeError
|
def calibration_plot(
fraction_positives, mean_predicted_values, algorithm_names=None, filename=None
):
assert len(fraction_positives) == len(mean_predicted_values)
sns.set_style("whitegrid")
colors = plt.get_cmap("tab10").colors
num_algorithms = len(fraction_positives)
plt.figure(figsize=(9, 9))
plt.grid(which="both")
plt.grid(which="minor", alpha=0.5)
plt.grid(which="major", alpha=0.75)
plt.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for i in range(num_algorithms):
# ax1.plot(mean_predicted_values[i], fraction_positives[i],
# label=algorithms[i] if algorithm_names is not None and i < len(algorithms) else '')
# sns.tsplot(mean_predicted_values[i], fraction_positives[i], ax=ax1, color=colors[i])
assert len(mean_predicted_values[i]) == len(fraction_positives[i])
order = min(3, len(mean_predicted_values[i]) - 1)
sns.regplot(
mean_predicted_values[i],
fraction_positives[i],
order=order,
x_estimator=np.mean,
color=colors[i],
marker="o",
scatter_kws={"s": 40},
label=algorithm_names[i]
if algorithm_names is not None and i < len(algorithm_names)
else "",
)
ticks = np.linspace(0.0, 1.0, num=11)
plt.xlim([-0.05, 1.05])
plt.xticks(ticks)
plt.xlabel("Predicted probability")
plt.ylabel("Observed probability")
plt.ylim([-0.05, 1.05])
plt.yticks(ticks)
plt.legend(loc="lower right")
plt.title("Calibration (reliability curve)")
plt.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
|
def calibration_plot(
fraction_positives, mean_predicted_values, algorithm_names=None, filename=None
):
assert len(fraction_positives) == len(mean_predicted_values)
sns.set_style("whitegrid")
colors = plt.get_cmap("tab10").colors
num_algorithms = len(fraction_positives)
plt.figure(figsize=(9, 9))
plt.grid(which="both")
plt.grid(which="minor", alpha=0.5)
plt.grid(which="major", alpha=0.75)
plt.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for i in range(num_algorithms):
# ax1.plot(mean_predicted_values[i], fraction_positives[i],
# label=algorithms[i] if algorithm_names is not None and i < len(algorithms) else '')
# sns.tsplot(mean_predicted_values[i], fraction_positives[i], ax=ax1, color=colors[i])
assert len(mean_predicted_values[i]) == len(fraction_positives[i])
order = min(3, len(mean_predicted_values[i] - 1))
sns.regplot(
mean_predicted_values[i],
fraction_positives[i],
order=order,
x_estimator=np.mean,
color=colors[i],
marker="o",
scatter_kws={"s": 40},
label=algorithm_names[i]
if algorithm_names is not None and i < len(algorithm_names)
else "",
)
ticks = np.linspace(0.0, 1.0, num=11)
plt.xlim([-0.05, 1.05])
plt.xticks(ticks)
plt.xlabel("Predicted probability")
plt.ylabel("Observed probability")
plt.ylim([-0.05, 1.05])
plt.yticks(ticks)
plt.legend(loc="lower right")
plt.title("Calibration (reliability curve)")
plt.tight_layout()
ludwig.contrib.contrib_command("visualize_figure", plt.gcf())
if filename:
plt.savefig(filename)
else:
plt.show()
|
https://github.com/ludwig-ai/ludwig/issues/620
|
for command, viz_pattern in zip(commands, vis_patterns):
result = subprocess.run(command)
figure_cnt = glob.glob(viz_pattern)
assert 0 == result.returncode
E AssertionError: assert 0 == 1
E + where 1 = CompletedProcess(args=['python', '-m', 'ludwig.visualize', '--visualization', 'calibration_1_vs_all', '--metrics', 'ac...robabilities.npy', '--model_names', 'Model1', 'Model2', '--top_k', '6', '-od', 'results/experiment_run'], returncode=1).returncode
tests/integration_tests/test_visualization.py:1581: AssertionError
----------------------------- Captured stderr call -----------------------------
Traceback (most recent call last):
File "/opt/python/3.6.7/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/python/3.6.7/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/travis/build/uber/ludwig/ludwig/visualize.py", line 3265, in <module>
cli(sys.argv[1:])
File "/home/travis/build/uber/ludwig/ludwig/visualize.py", line 3260, in cli
vis_func(**vars(args))
File "/home/travis/build/uber/ludwig/ludwig/visualize.py", line 623, in calibration_1_vs_all_cli
calibration_1_vs_all(probabilities_per_model, gt, **kwargs)
File "/home/travis/build/uber/ludwig/ludwig/visualize.py", line 2654, in calibration_1_vs_all
filename=filename
File "/home/travis/build/uber/ludwig/ludwig/utils/visualization_utils.py", line 856, in calibration_plot
algorithm_names) else '')
File "/home/travis/virtualenv/python3.6.7/lib/python3.6/site-packages/seaborn/regression.py", line 810, in regplot
x_jitter, y_jitter, color, label)
File "/home/travis/virtualenv/python3.6.7/lib/python3.6/site-packages/seaborn/regression.py", line 114, in __init__
self.dropna("x", "y", "units", "x_partial", "y_partial")
File "/home/travis/virtualenv/python3.6.7/lib/python3.6/site-packages/seaborn/regression.py", line 66, in dropna
setattr(self, var, val[not_na])
IndexError: too many indices for array
|
IndexError
|
def train_online(
self,
data_df=None,
data_csv=None,
data_dict=None,
batch_size=None,
learning_rate=None,
regularization_lambda=None,
dropout_rate=None,
bucketing_field=None,
gpus=None,
gpu_fraction=1,
logging_level=logging.ERROR,
):
"""This function is used to perform one epoch of training of the model
on the specified dataset.
# Inputs
:param data_df: (DataFrame) dataframe containing data.
:param data_csv: (string) input data CSV file.
:param data_dict: (dict) input data dictionary. It is expected to
contain one key for each field and the values have to be lists of
the same length. Each index in the lists corresponds to one
datapoint. For example a data set consisting of two datapoints
with a text and a class may be provided as the following dict
``{'text_field_name': ['text of the first datapoint', text of the
second datapoint'], 'class_filed_name': ['class_datapoints_1',
'class_datapoints_2']}`.
:param batch_size: (int) the batch size to use for training. By default
it's the one specified in the model definition.
:param learning_rate: (float) the learning rate to use for training. By
default the values is the one specified in the model definition.
:param regularization_lambda: (float) the regularization lambda
parameter to use for training. By default the values is the one
specified in the model definition.
:param dropout_rate: (float) the dropout rate to use for training. By
default the values is the one specified in the model definition.
:param bucketing_field: (string) the bucketing field to use for
bucketing the data. By default the values is one specified in the
model definition.
:param gpus: (string, default: `None`) list of GPUs to use (it uses the
same syntax of CUDA_VISIBLE_DEVICES)
:param gpu_fraction: (float, default `1.0`) fraction of GPU memory to
initialize the process with
:param logging_level: (int, default: `logging.ERROR`) logging level to
use for logging. Use logging constants like `logging.DEBUG`,
`logging.INFO` and `logging.ERROR`. By default only errors will
be printed.
There are three ways to provide data: by dataframes using the `data_df`
parameter, by CSV using the `data_csv` parameter and by dictionary,
using the `data_dict` parameter.
The DataFrame approach uses data previously obtained and put in a
dataframe, the CSV approach loads data from a CSV file, while dict
approach uses data organized by keys representing columns and values
that are lists of the datapoints for each. For example a data set
consisting of two datapoints with a text and a class may be provided as
the following dict ``{'text_field_name}: ['text of the first datapoint',
text of the second datapoint'], 'class_filed_name':
['class_datapoints_1', 'class_datapoints_2']}`.
"""
logging.getLogger().setLevel(logging_level)
if logging_level in {logging.WARNING, logging.ERROR, logging.CRITICAL}:
set_disable_progressbar(True)
if (
self.model is None
or self.model_definition is None
or self.train_set_metadata is None
):
raise ValueError("Model has not been initialized or loaded")
if data_df is None:
data_df = self._read_data(data_csv, data_dict)
data_df.csv = data_csv
if batch_size is None:
batch_size = self.model_definition["training"]["batch_size"]
if learning_rate is None:
learning_rate = self.model_definition["training"]["learning_rate"]
if regularization_lambda is None:
regularization_lambda = self.model_definition["training"][
"regularization_lambda"
]
if dropout_rate is None:
dropout_rate = (self.model_definition["training"]["dropout_rate"],)
if bucketing_field is None:
bucketing_field = self.model_definition["training"]["bucketing_field"]
logging.debug("Preprocessing {} datapoints".format(len(data_df)))
features_to_load = (
self.model_definition["input_features"]
+ self.model_definition["output_features"]
)
preprocessed_data = build_data(
data_df,
features_to_load,
self.train_set_metadata,
self.model_definition["preprocessing"],
)
replace_text_feature_level(
self.model_definition["input_features"]
+ self.model_definition["output_features"],
[preprocessed_data],
)
dataset = Dataset(
preprocessed_data,
self.model_definition["input_features"],
self.model_definition["output_features"],
None,
)
logging.debug("Training batch")
self.model.train_online(
dataset,
batch_size=batch_size,
learning_rate=learning_rate,
regularization_lambda=regularization_lambda,
dropout_rate=dropout_rate,
bucketing_field=bucketing_field,
gpus=gpus,
gpu_fraction=gpu_fraction,
)
|
def train_online(
self,
data_df=None,
data_csv=None,
data_dict=None,
batch_size=None,
learning_rate=None,
regularization_lambda=None,
dropout_rate=None,
bucketing_field=None,
gpus=None,
gpu_fraction=1,
logging_level=logging.ERROR,
):
"""This function is used to perform one epoch of training of the model
on the specified dataset.
# Inputs
:param data_df: (DataFrame) dataframe containing data.
:param data_csv: (string) input data CSV file.
:param data_dict: (dict) input data dictionary. It is expected to
contain one key for each field and the values have to be lists of
the same length. Each index in the lists corresponds to one
datapoint. For example a data set consisting of two datapoints
with a text and a class may be provided as the following dict
``{'text_field_name': ['text of the first datapoint', text of the
second datapoint'], 'class_filed_name': ['class_datapoints_1',
'class_datapoints_2']}`.
:param batch_size: (int) the batch size to use for training. By default
it's the one specified in the model definition.
:param learning_rate: (float) the learning rate to use for training. By
default the values is the one specified in the model definition.
:param regularization_lambda: (float) the regularization lambda
parameter to use for training. By default the values is the one
specified in the model definition.
:param dropout_rate: (float) the dropout rate to use for training. By
default the values is the one specified in the model definition.
:param bucketing_field: (string) the bucketing field to use for
bucketing the data. By default the values is one specified in the
model definition.
:param gpus: (string, default: `None`) list of GPUs to use (it uses the
same syntax of CUDA_VISIBLE_DEVICES)
:param gpu_fraction: (float, default `1.0`) fraction of GPU memory to
initialize the process with
:param logging_level: (int, default: `logging.ERROR`) logging level to
use for logging. Use logging constants like `logging.DEBUG`,
`logging.INFO` and `logging.ERROR`. By default only errors will
be printed.
There are three ways to provide data: by dataframes using the `data_df`
parameter, by CSV using the `data_csv` parameter and by dictionary,
using the `data_dict` parameter.
The DataFrame approach uses data previously obtained and put in a
dataframe, the CSV approach loads data from a CSV file, while dict
approach uses data organized by keys representing columns and values
that are lists of the datapoints for each. For example a data set
consisting of two datapoints with a text and a class may be provided as
the following dict ``{'text_field_name}: ['text of the first datapoint',
text of the second datapoint'], 'class_filed_name':
['class_datapoints_1', 'class_datapoints_2']}`.
"""
logging.getLogger().setLevel(logging_level)
if logging_level in {logging.WARNING, logging.ERROR, logging.CRITICAL}:
set_disable_progressbar(True)
if (
self.model is None
or self.model_definition is None
or self.train_set_metadata is None
):
raise ValueError("Model has not been initialized or loaded")
if data_df is None:
data_df = self._read_data(data_csv, data_dict)
if batch_size is None:
batch_size = self.model_definition["training"]["batch_size"]
if learning_rate is None:
learning_rate = self.model_definition["training"]["learning_rate"]
if regularization_lambda is None:
regularization_lambda = self.model_definition["training"][
"regularization_lambda"
]
if dropout_rate is None:
dropout_rate = (self.model_definition["training"]["dropout_rate"],)
if bucketing_field is None:
bucketing_field = self.model_definition["training"]["bucketing_field"]
logging.debug("Preprocessing {} datapoints".format(len(data_df)))
features_to_load = (
self.model_definition["input_features"]
+ self.model_definition["output_features"]
)
preprocessed_data = build_data(
data_df,
features_to_load,
self.train_set_metadata,
self.model_definition["preprocessing"],
)
replace_text_feature_level(
self.model_definition["input_features"]
+ self.model_definition["output_features"],
[preprocessed_data],
)
dataset = Dataset(
preprocessed_data,
self.model_definition["input_features"],
self.model_definition["output_features"],
None,
)
logging.debug("Training batch")
self.model.train_online(
dataset,
batch_size=batch_size,
learning_rate=learning_rate,
regularization_lambda=regularization_lambda,
dropout_rate=dropout_rate,
bucketing_field=bucketing_field,
gpus=gpus,
gpu_fraction=gpu_fraction,
)
|
https://github.com/ludwig-ai/ludwig/issues/100
|
Traceback (most recent call last):
File "/home/andrey/.venvs/ludwig-learn/bin/ludwig", line 11, in <module>
sys.exit(main())
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/cli.py", line 86, in main
CLI()
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/cli.py", line 64, in __init__
getattr(self, args.command)()
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/cli.py", line 73, in predict
predict.cli(sys.argv[2:])
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/predict.py", line 379, in cli
full_predict(**vars(args))
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/predict.py", line 104, in full_predict
debug
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/predict.py", line 173, in predict
gpu_fraction=gpu_fraction
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/models/model.py", line 1182, in predict
only_predictions=only_predictions
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/models/model.py", line 756, in batch_evaluation
is_training=is_training
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 929, in run
run_metadata_ptr)
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1128, in _run
str(subfeed_t.get_shape())))
ValueError: Cannot feed value of shape (1,) for Tensor 'image_path/image_path:0', which has shape '(?, 100, 100, 3)'
|
ValueError
|
def add_feature_data(feature, dataset_df, data, metadata, preprocessing_parameters):
set_default_value(feature, "in_memory", preprocessing_parameters["in_memory"])
if "height" in preprocessing_parameters or "width" in preprocessing_parameters:
should_resize = True
try:
provided_height = int(preprocessing_parameters[HEIGHT])
provided_width = int(preprocessing_parameters[WIDTH])
except ValueError as e:
raise ValueError(
"Image height and width must be set and have "
"positive integer values: " + str(e)
)
if provided_height <= 0 or provided_width <= 0:
raise ValueError("Image height and width must be positive integers")
else:
should_resize = False
csv_path = None
if hasattr(dataset_df, "csv"):
csv_path = os.path.dirname(os.path.abspath(dataset_df.csv))
num_images = len(dataset_df)
height = 0
width = 0
num_channels = 1
if num_images > 0:
# here if a width and height have not been specified
# we assume that all images have the same wifth and im_height
# thus the width and height of the first one are the same
# of all the other ones
if csv_path is None and not os.path.isabs(dataset_df[feature["name"]][0]):
raise ValueError("Image file paths must be absolute")
first_image = imread(get_abs_path(csv_path, dataset_df[feature["name"]][0]))
height = first_image.shape[0]
width = first_image.shape[1]
if first_image.ndim == 2:
num_channels = 1
else:
num_channels = first_image.shape[2]
if should_resize:
height = provided_height
width = provided_width
metadata[feature["name"]]["preprocessing"]["height"] = height
metadata[feature["name"]]["preprocessing"]["width"] = width
metadata[feature["name"]]["preprocessing"]["num_channels"] = num_channels
if feature["in_memory"]:
data[feature["name"]] = np.empty(
(num_images, height, width, num_channels), dtype=np.int8
)
for i in range(len(dataset_df)):
img = imread(get_abs_path(csv_path, dataset_df[feature["name"]][i]))
if img.ndim == 2:
img = img.reshape((img.shape[0], img.shape[1], 1))
if should_resize:
img = resize_image(
img, (height, width), preprocessing_parameters["resize_method"]
)
data[feature["name"]][i, :, :, :] = img
else:
data_fp = os.path.splitext(dataset_df.csv)[0] + ".hdf5"
mode = "w"
if os.path.isfile(data_fp):
mode = "r+"
with h5py.File(data_fp, mode) as h5_file:
image_dataset = h5_file.create_dataset(
feature["name"] + "_data",
(num_images, height, width, num_channels),
dtype=np.uint8,
)
for i in range(len(dataset_df)):
img = imread(get_abs_path(csv_path, dataset_df[feature["name"]][i]))
if img.ndim == 2:
img = img.reshape((img.shape[0], img.shape[1], 1))
if should_resize:
img = resize_image(
img,
(height, width),
preprocessing_parameters["resize_method"],
)
image_dataset[i, :height, :width, :] = img
data[feature["name"]] = np.arange(num_images)
|
def add_feature_data(feature, dataset_df, data, metadata, preprocessing_parameters):
set_default_value(feature, "in_memory", preprocessing_parameters["in_memory"])
if "height" in preprocessing_parameters or "width" in preprocessing_parameters:
should_resize = True
try:
provided_height = int(preprocessing_parameters[HEIGHT])
provided_width = int(preprocessing_parameters[WIDTH])
except ValueError as e:
raise ValueError(
"Image height and width must be set and have "
"positive integer values: " + str(e)
)
if provided_height <= 0 or provided_width <= 0:
raise ValueError("Image height and width must be positive integers")
else:
should_resize = False
csv_path = os.path.dirname(os.path.abspath(dataset_df.csv))
num_images = len(dataset_df)
height = 0
width = 0
num_channels = 1
if num_images > 0:
# here if a width and height have not been specified
# we assume that all images have the same wifth and im_height
# thus the width and height of the first one are the same
# of all the other ones
first_image = imread(os.path.join(csv_path, dataset_df[feature["name"]][0]))
height = first_image.shape[0]
width = first_image.shape[1]
if first_image.ndim == 2:
num_channels = 1
else:
num_channels = first_image.shape[2]
if should_resize:
height = provided_height
width = provided_width
metadata[feature["name"]]["preprocessing"]["height"] = height
metadata[feature["name"]]["preprocessing"]["width"] = width
metadata[feature["name"]]["preprocessing"]["num_channels"] = num_channels
if feature["in_memory"]:
data[feature["name"]] = np.empty(
(num_images, height, width, num_channels), dtype=np.int8
)
for i in range(len(dataset_df)):
filename = os.path.join(csv_path, dataset_df[feature["name"]][i])
img = imread(filename)
if img.ndim == 2:
img = img.reshape((img.shape[0], img.shape[1], 1))
if should_resize:
img = resize_image(
img, (height, width), preprocessing_parameters["resize_method"]
)
data[feature["name"]][i, :, :, :] = img
else:
data_fp = os.path.splitext(dataset_df.csv)[0] + ".hdf5"
mode = "w"
if os.path.isfile(data_fp):
mode = "r+"
with h5py.File(data_fp, mode) as h5_file:
image_dataset = h5_file.create_dataset(
feature["name"] + "_data",
(num_images, height, width, num_channels),
dtype=np.uint8,
)
for i in range(len(dataset_df)):
filename = os.path.join(csv_path, dataset_df[feature["name"]][i])
img = imread(filename)
if img.ndim == 2:
img = img.reshape((img.shape[0], img.shape[1], 1))
if should_resize:
img = resize_image(
img,
(height, width),
preprocessing_parameters["resize_method"],
)
image_dataset[i, :height, :width, :] = img
data[feature["name"]] = np.arange(num_images)
|
https://github.com/ludwig-ai/ludwig/issues/100
|
Traceback (most recent call last):
File "/home/andrey/.venvs/ludwig-learn/bin/ludwig", line 11, in <module>
sys.exit(main())
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/cli.py", line 86, in main
CLI()
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/cli.py", line 64, in __init__
getattr(self, args.command)()
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/cli.py", line 73, in predict
predict.cli(sys.argv[2:])
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/predict.py", line 379, in cli
full_predict(**vars(args))
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/predict.py", line 104, in full_predict
debug
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/predict.py", line 173, in predict
gpu_fraction=gpu_fraction
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/models/model.py", line 1182, in predict
only_predictions=only_predictions
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/models/model.py", line 756, in batch_evaluation
is_training=is_training
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 929, in run
run_metadata_ptr)
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1128, in _run
str(subfeed_t.get_shape())))
ValueError: Cannot feed value of shape (1,) for Tensor 'image_path/image_path:0', which has shape '(?, 100, 100, 3)'
|
ValueError
|
def __init__(self, feature):
super().__init__(feature)
self.height = 0
self.width = 0
self.num_channels = 0
self.in_memory = True
self.encoder = "stacked_cnn"
encoder_parameters = self.overwrite_defaults(feature)
self.encoder_obj = self.get_image_encoder(encoder_parameters)
|
def __init__(self, feature):
super().__init__(feature)
self.height = 0
self.width = 0
self.num_channels = 0
self.in_memory = True
self.data_hdf5_fp = ""
self.encoder = "stacked_cnn"
encoder_parameters = self.overwrite_defaults(feature)
self.encoder_obj = self.get_image_encoder(encoder_parameters)
|
https://github.com/ludwig-ai/ludwig/issues/100
|
Traceback (most recent call last):
File "/home/andrey/.venvs/ludwig-learn/bin/ludwig", line 11, in <module>
sys.exit(main())
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/cli.py", line 86, in main
CLI()
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/cli.py", line 64, in __init__
getattr(self, args.command)()
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/cli.py", line 73, in predict
predict.cli(sys.argv[2:])
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/predict.py", line 379, in cli
full_predict(**vars(args))
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/predict.py", line 104, in full_predict
debug
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/predict.py", line 173, in predict
gpu_fraction=gpu_fraction
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/models/model.py", line 1182, in predict
only_predictions=only_predictions
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/models/model.py", line 756, in batch_evaluation
is_training=is_training
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 929, in run
run_metadata_ptr)
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1128, in _run
str(subfeed_t.get_shape())))
ValueError: Cannot feed value of shape (1,) for Tensor 'image_path/image_path:0', which has shape '(?, 100, 100, 3)'
|
ValueError
|
def update_model_definition_with_metadata(
input_feature, feature_metadata, *args, **kwargs
):
for dim in ["height", "width", "num_channels"]:
input_feature[dim] = feature_metadata["preprocessing"][dim]
|
def update_model_definition_with_metadata(
input_feature, feature_metadata, *args, **kwargs
):
for dim in ["height", "width", "num_channels"]:
input_feature[dim] = feature_metadata["preprocessing"][dim]
input_feature["data_hdf5_fp"] = kwargs["model_definition"]["data_hdf5_fp"]
|
https://github.com/ludwig-ai/ludwig/issues/100
|
Traceback (most recent call last):
File "/home/andrey/.venvs/ludwig-learn/bin/ludwig", line 11, in <module>
sys.exit(main())
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/cli.py", line 86, in main
CLI()
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/cli.py", line 64, in __init__
getattr(self, args.command)()
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/cli.py", line 73, in predict
predict.cli(sys.argv[2:])
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/predict.py", line 379, in cli
full_predict(**vars(args))
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/predict.py", line 104, in full_predict
debug
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/predict.py", line 173, in predict
gpu_fraction=gpu_fraction
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/models/model.py", line 1182, in predict
only_predictions=only_predictions
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/models/model.py", line 756, in batch_evaluation
is_training=is_training
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 929, in run
run_metadata_ptr)
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1128, in _run
str(subfeed_t.get_shape())))
ValueError: Cannot feed value of shape (1,) for Tensor 'image_path/image_path:0', which has shape '(?, 100, 100, 3)'
|
ValueError
|
def preprocess_for_prediction(
model_path,
split,
dataset_type="generic",
data_csv=None,
data_hdf5=None,
train_set_metadata=None,
only_predictions=False,
):
"""Preprocesses the dataset to parse it into a format that is usable by the
Ludwig core
:param model_path: The input data that is joined with the model
hyperparameter file to create the model definition file
:type model_path: Str
:param dataset_type: Generic
:type: Str
:param split: Splits the data into the train and test sets
:param data_csv: The CSV input data file
:param data_hdf5: The hdf5 data file if there is no csv data file
:param train_set_metadata: Train set metadata for the input features
:param only_predictions: If False does not load output features
:returns: Dataset, Train set metadata
"""
model_definition = load_json(
os.path.join(model_path, MODEL_HYPERPARAMETERS_FILE_NAME)
)
preprocessing_params = merge_dict(
default_preprocessing_parameters, model_definition["preprocessing"]
)
# Check if hdf5 and json already exist
if data_csv is not None:
data_hdf5_fp = os.path.splitext(data_csv)[0] + ".hdf5"
if os.path.isfile(data_hdf5_fp):
logging.info(
"Found hdf5 with the same filename of the csv, using it instead"
)
data_csv = None
data_hdf5 = data_hdf5_fp
# Load data
_, _, build_dataset, _ = get_dataset_fun(dataset_type)
train_set_metadata = load_metadata(train_set_metadata)
features = model_definition["input_features"] + (
[] if only_predictions else model_definition["output_features"]
)
if split == "full":
if data_hdf5 is not None:
dataset = load_data(
data_hdf5,
model_definition["input_features"],
[] if only_predictions else model_definition["output_features"],
split_data=False,
shuffle_training=False,
)
else:
dataset, train_set_metadata = build_dataset(
data_csv,
features,
preprocessing_params,
train_set_metadata=train_set_metadata,
)
else:
if data_hdf5 is not None:
training, test, validation = load_data(
data_hdf5,
model_definition["input_features"],
[] if only_predictions else model_definition["output_features"],
shuffle_training=False,
)
if split == "training":
dataset = training
elif split == "validation":
dataset = validation
else: # if split == 'test':
dataset = test
else:
dataset, train_set_metadata = build_dataset(
data_csv,
features,
preprocessing_params,
train_set_metadata=train_set_metadata,
)
replace_text_feature_level(
model_definition["input_features"]
+ ([] if only_predictions else model_definition["output_features"]),
[dataset],
)
dataset = Dataset(
dataset,
model_definition["input_features"],
[] if only_predictions else model_definition["output_features"],
data_hdf5_fp,
)
return dataset, train_set_metadata
|
def preprocess_for_prediction(
model_path,
split,
dataset_type="generic",
data_csv=None,
data_hdf5=None,
train_set_metadata=None,
only_predictions=False,
):
"""Preprocesses the dataset to parse it into a format that is usable by the
Ludwig core
:param model_path: The input data that is joined with the model
hyperparameter file to create the model definition file
:type model_path: Str
:param dataset_type: Generic
:type: Str
:param split: Splits the data into the train and test sets
:param data_csv: The CSV input data file
:param data_hdf5: The hdf5 data file if there is no csv data file
:param train_set_metadata: Train set metadata for the input features
:param only_predictions: If False does not load output features
:returns: Dataset, Train set metadata
"""
model_definition = load_json(
os.path.join(model_path, MODEL_HYPERPARAMETERS_FILE_NAME)
)
preprocessing_params = merge_dict(
default_preprocessing_parameters, model_definition["preprocessing"]
)
# Check if hdf5 and json already exist
if data_csv is not None:
data_hdf5_fp = os.path.splitext(data_csv)[0] + ".hdf5"
if os.path.isfile(data_hdf5_fp):
logging.info(
"Found hdf5 with the same filename of the csv, using it instead"
)
data_csv = None
data_hdf5 = data_hdf5_fp
# Load data
_, _, build_dataset, _ = get_dataset_fun(dataset_type)
train_set_metadata = load_metadata(train_set_metadata)
features = model_definition["input_features"] + (
[] if only_predictions else model_definition["output_features"]
)
if split == "full":
if data_hdf5 is not None:
dataset = load_data(
data_hdf5,
model_definition["input_features"],
[] if only_predictions else model_definition["output_features"],
split_data=False,
shuffle_training=False,
)
else:
dataset, train_set_metadata = build_dataset(
data_csv,
features,
preprocessing_params,
train_set_metadata=train_set_metadata,
)
else:
if data_hdf5 is not None:
training, test, validation = load_data(
data_hdf5,
model_definition["input_features"],
[] if only_predictions else model_definition["output_features"],
shuffle_training=False,
)
if split == "training":
dataset = training
elif split == "validation":
dataset = validation
else: # if split == 'test':
dataset = test
else:
dataset, train_set_metadata = build_dataset(
data_csv,
features,
preprocessing_params,
train_set_metadata=train_set_metadata,
)
replace_text_feature_level(
model_definition["input_features"]
+ ([] if only_predictions else model_definition["output_features"]),
[dataset],
)
dataset = Dataset(
dataset,
model_definition["input_features"],
[] if only_predictions else model_definition["output_features"],
data_hdf5,
)
return dataset, train_set_metadata
|
https://github.com/ludwig-ai/ludwig/issues/100
|
Traceback (most recent call last):
File "/home/andrey/.venvs/ludwig-learn/bin/ludwig", line 11, in <module>
sys.exit(main())
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/cli.py", line 86, in main
CLI()
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/cli.py", line 64, in __init__
getattr(self, args.command)()
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/cli.py", line 73, in predict
predict.cli(sys.argv[2:])
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/predict.py", line 379, in cli
full_predict(**vars(args))
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/predict.py", line 104, in full_predict
debug
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/predict.py", line 173, in predict
gpu_fraction=gpu_fraction
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/models/model.py", line 1182, in predict
only_predictions=only_predictions
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/ludwig/models/model.py", line 756, in batch_evaluation
is_training=is_training
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 929, in run
run_metadata_ptr)
File "/home/andrey/.venvs/ludwig-learn/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1128, in _run
str(subfeed_t.get_shape())))
ValueError: Cannot feed value of shape (1,) for Tensor 'image_path/image_path:0', which has shape '(?, 100, 100, 3)'
|
ValueError
|
def save_csv(data_fp, data):
with open(data_fp, "w", encoding="utf-8") as csv_file:
writer = csv.writer(csv_file)
for row in data:
if not isinstance(row, collections.Iterable) or isinstance(row, str):
row = [row]
writer.writerow(row)
|
def save_csv(data_fp, data):
writer = csv.writer(open(data_fp, "w"))
for row in data:
if not isinstance(row, collections.Iterable) or isinstance(row, str):
row = [row]
writer.writerow(row)
|
https://github.com/ludwig-ai/ludwig/issues/90
|
Traceback (most recent call last):
File "C:\Users\xxx\AppData\Local\Programs\Python\Python36\Scripts\ludwig-script.py", line 11, in <module>
load_entry_point('ludwig==0.1.0', 'console_scripts', 'ludwig')()
File "c:\users\xxx\appdata\local\programs\python\python36\lib\site-packages\ludwig\cli.py", line 86, in main
CLI()
File "c:\users\xxxi\appdata\local\programs\python\python36\lib\site-packages\ludwig\cli.py", line 64, in __init__
getattr(self, args.command)()
File "c:\users\xxx\appdata\local\programs\python\python36\lib\site-packages\ludwig\cli.py", line 73, in predict
predict.cli(sys.argv[2:])
File "c:\users\xxx\appdata\local\programs\python\python36\lib\site-packages\ludwig\predict.py", line 379, in cli
full_predict(**vars(args))
File "c:\users\xxx\appdata\local\programs\python\python36\lib\site-packages\ludwig\predict.py", line 120, in full_predict
save_prediction_outputs(postprocessed_output, experiment_dir_name)
File "c:\users\xxx\appdata\local\programs\python\python36\lib\site-packages\ludwig\predict.py", line 210, in save_prediction_outputs
save_csv(csv_filename.format(output_field, output_type), values)
File "c:\users\xxx\appdata\local\programs\python\python36\lib\site-packages\ludwig\utils\data_utils.py", line 60, in save_csv
writer.writerow(row)
File "c:\users\xxx\appdata\local\programs\python\python36\lib\encodings\cp1252.py", line 19, in encode
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
UnicodeEncodeError: 'charmap' codec can't encode character '\u264b' in position 0: character maps to <undefined>
|
UnicodeEncodeError
|
def replace_text_feature_level(model_definition, datasets):
for feature in (
model_definition["input_features"] + model_definition["output_features"]
):
if feature["type"] == TEXT:
for dataset in datasets:
dataset[feature["name"]] = dataset[
"{}_{}".format(feature["name"], feature["level"])
]
for level in ("word", "char"):
name_level = "{}_{}".format(feature["name"], level)
if name_level in dataset:
del dataset[name_level]
|
def replace_text_feature_level(model_definition, datasets):
for feature in (
model_definition["input_features"] + model_definition["output_features"]
):
if feature["type"] == TEXT:
for dataset in datasets:
dataset[feature["name"]] = dataset[
"{}_{}".format(feature["name"], feature["level"])
]
for level in ("word", "char"):
del dataset["{}_{}".format(feature["name"], level)]
|
https://github.com/ludwig-ai/ludwig/issues/56
|
Traceback (most recent call last):
File "/Users/user/.virtualenvs/ml/bin/ludwig", line 10, in <module>
sys.exit(main())
File "/Users/user/.virtualenvs/ml/lib/python3.6/site-packages/ludwig/cli.py", line 86, in main
CLI()
File "/Users/user/.virtualenvs/ml/lib/python3.6/site-packages/ludwig/cli.py", line 64, in __init__
getattr(self, args.command)()
File "/Users/user/.virtualenvs/ml/lib/python3.6/site-packages/ludwig/cli.py", line 70, in train
train.cli(sys.argv[2:])
File "/Users/user/.virtualenvs/ml/lib/python3.6/site-packages/ludwig/train.py", line 663, in cli
full_train(**vars(args))
File "/Users/user/.virtualenvs/ml/lib/python3.6/site-packages/ludwig/train.py", line 224, in full_train
random_seed=random_seed
File "/Users/user/.virtualenvs/ml/lib/python3.6/site-packages/ludwig/data/preprocessing.py", line 562, in preprocess_for_training
[training_set, validation_set, test_set]
File "/Users/user/.virtualenvs/ml/lib/python3.6/site-packages/ludwig/data/preprocessing.py", line 777, in replace_text_feature_level
level)
KeyError: 'name_word'
|
KeyError
|
def get_elements_by_categories(element_bicats, elements=None, doc=None):
# if source elements is provided
if elements:
return [
x
for x in elements
if get_builtincategory(x.Category.Name) in element_bicats
]
# otherwise collect from model
cat_filters = [DB.ElementCategoryFilter(x) for x in element_bicats if x]
elcats_filter = DB.LogicalOrFilter(framework.List[DB.ElementFilter](cat_filters))
return (
DB.FilteredElementCollector(doc or HOST_APP.doc)
.WherePasses(elcats_filter)
.WhereElementIsNotElementType()
.ToElements()
)
|
def get_elements_by_categories(element_bicats, elements=None, doc=None):
# if source elements is provided
if elements:
return [
x
for x in elements
if get_builtincategory(x.Category.Name) in element_bicats
]
# otherwise collect from model
cat_filters = [DB.ElementCategoryFilter(x) for x in element_bicats]
elcats_filter = DB.LogicalOrFilter(framework.List[DB.ElementFilter](cat_filters))
return (
DB.FilteredElementCollector(doc or HOST_APP.doc)
.WherePasses(elcats_filter)
.WhereElementIsNotElementType()
.ToElements()
)
|
https://github.com/eirannejad/pyRevit/issues/833
|
ERROR [pyrevit.revit.db.transaction] Error in TransactionGroup Context. Rolling back changes. | <type 'exceptions.Exception'>:The input argument "categoryId" of function `anonymous-namespace'::ElementCategoryFilter_constructor or one item in the collection is null at line 230 of file d:\ship\2019_px64\source\revit\revitdbapi\gensrc\APIBuiltInElementFiltersProxy.cpp.
Parameter name: categoryId
IronPython Traceback:
Traceback (most recent call last):
File "C:\Users\jriga\AppData\Roaming\pyRevit-Master\extensions\pyRevitTools.extension\pyRevit.tab\Modify.panel\edit2.stack\ReNumber.pushbutton\script.py", line 307, in <module>
File "C:\Users\jriga\AppData\Roaming\pyRevit-Master\extensions\pyRevitTools.extension\pyRevit.tab\Modify.panel\edit2.stack\ReNumber.pushbutton\script.py", line 191, in pick_and_renumber
File "C:\Users\jriga\AppData\Roaming\pyRevit-Master\extensions\pyRevitTools.extension\pyRevit.tab\Modify.panel\edit2.stack\ReNumber.pushbutton\script.py", line 122, in get_elements_dict
File "C:\Users\jriga\AppData\Roaming\pyRevit-Master\pyrevitlib\pyrevit\revit\db\query.py", line 261, in get_elements_by_categories
Exception: The input argument "categoryId" of function `anonymous-namespace'::ElementCategoryFilter_constructor or one item in the collection is null at line 230 of file d:\ship\2019_px64\source\revit\revitdbapi\gensrc\APIBuiltInElementFiltersProxy.cpp.
Parameter name: categoryId
Script Executor Traceback:
Autodesk.Revit.Exceptions.ArgumentNullException: The input argument "categoryId" of function `anonymous-namespace'::ElementCategoryFilter_constructor or one item in the collection is null at line 230 of file d:\ship\2019_px64\source\revit\revitdbapi\gensrc\APIBuiltInElementFiltersProxy.cpp.
Parameter name: categoryId
à Microsoft.Scripting.Interpreter.ThrowInstruction.Run(InterpretedFrame frame)
à Microsoft.Scripting.Interpreter.Interpreter.HandleException(InterpretedFrame frame, Exception exception)
à Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
à Microsoft.Scripting.Interpreter.LightLambda.Run2[T0,T1,TRet](T0 arg0, T1 arg1)
à IronPython.Compiler.PythonScriptCode.RunWorker(CodeContext ctx)
à PyRevitLabs.PyRevit.Runtime.IronPythonEngine.Execute(ScriptRuntime& runtime)
|
Exception
|
def save_options(self, sender, args):
# base
self._config.halftone = self.halftone.IsChecked
self._config.transparency = self.transparency.IsChecked
# projection lines
self._config.proj_line_color = self.proj_line_color.IsChecked
self._config.proj_line_pattern = self.proj_line_pattern.IsChecked
self._config.proj_line_weight = self.proj_line_weight.IsChecked
# projection forground pattern
self._config.proj_fill_color = self.proj_fill_color.IsChecked
self._config.proj_fill_pattern = self.proj_fill_pattern.IsChecked
self._config.proj_fill_pattern_visibility = (
self.proj_fill_pattern_visibility.IsChecked
)
# projection background pattern (Revit >= 2019)
if HOST_APP.is_newer_than(2019, or_equal=True):
self._config.proj_bg_fill_color = self.proj_bg_fill_color.IsChecked
self._config.proj_bg_fill_pattern = self.proj_bg_fill_pattern.IsChecked
self._config.proj_bg_fill_pattern_visibility = (
self.proj_bg_fill_pattern_visibility.IsChecked
)
# cut lines
self._config.cut_line_color = self.cut_line_color.IsChecked
self._config.cut_line_pattern = self.cut_line_pattern.IsChecked
self._config.cut_line_weight = self.cut_line_weight.IsChecked
# cut forground pattern
self._config.cut_fill_color = self.cut_fill_color.IsChecked
self._config.cut_fill_pattern = self.cut_fill_pattern.IsChecked
self._config.cut_fill_pattern_visibility = (
self.cut_fill_pattern_visibility.IsChecked
)
# cut background pattern (Revit >= 2019)
if HOST_APP.is_newer_than(2019, or_equal=True):
self._config.cut_bg_fill_color = self.cut_bg_fill_color.IsChecked
self._config.cut_bg_fill_pattern = self.cut_bg_fill_pattern.IsChecked
self._config.cut_bg_fill_pattern_visibility = (
self.cut_bg_fill_pattern_visibility.IsChecked
)
# dim overrides
self._config.dim_override = self.dim_override.IsChecked
self._config.dim_textposition = self.dim_textposition.IsChecked
self._config.dim_above = self.dim_above.IsChecked
self._config.dim_below = self.dim_below.IsChecked
self._config.dim_prefix = self.dim_prefix.IsChecked
self._config.dim_suffix = self.dim_suffix.IsChecked
script.save_config()
self.Close()
|
def save_options(self, sender, args):
self._config.halftone = self.halftone.IsChecked
self._config.transparency = self.transparency.IsChecked
self._config.proj_line_color = self.proj_line_color.IsChecked
self._config.proj_line_pattern = self.proj_line_pattern.IsChecked
self._config.proj_line_weight = self.proj_line_weight.IsChecked
self._config.proj_fill_color = self.proj_fill_color.IsChecked
self._config.proj_fill_pattern = self.proj_fill_pattern.IsChecked
self._config.proj_fill_pattern_visibility = (
self.proj_fill_pattern_visibility.IsChecked
)
self._config.proj_bg_fill_color = self.proj_bg_fill_color.IsChecked
self._config.proj_bg_fill_pattern = self.proj_bg_fill_pattern.IsChecked
self._config.proj_bg_fill_pattern_visibility = (
self.proj_bg_fill_pattern_visibility.IsChecked
)
self._config.cut_line_color = self.cut_line_color.IsChecked
self._config.cut_line_pattern = self.cut_line_pattern.IsChecked
self._config.cut_line_weight = self.cut_line_weight.IsChecked
self._config.cut_fill_color = self.cut_fill_color.IsChecked
self._config.cut_fill_pattern = self.cut_fill_pattern.IsChecked
self._config.cut_fill_pattern_visibility = (
self.cut_fill_pattern_visibility.IsChecked
)
self._config.cut_bg_fill_color = self.cut_bg_fill_color.IsChecked
self._config.cut_bg_fill_pattern = self.cut_bg_fill_pattern.IsChecked
self._config.cut_bg_fill_pattern_visibility = (
self.cut_bg_fill_pattern_visibility.IsChecked
)
self._config.dim_override = self.dim_override.IsChecked
self._config.dim_textposition = self.dim_textposition.IsChecked
self._config.dim_above = self.dim_above.IsChecked
self._config.dim_below = self.dim_below.IsChecked
self._config.dim_prefix = self.dim_prefix.IsChecked
self._config.dim_suffix = self.dim_suffix.IsChecked
script.save_config()
self.Close()
|
https://github.com/eirannejad/pyRevit/issues/471
|
IronPython Traceback:
Traceback (most recent call last):
File "C:\Users\rob.cross\AppData\Roaming\pyRevit-Master\extensions\pyRevitTools.extension\pyRevit.tab\Modify.panel\Match.pushbutton\config.py", line 158, in
File "C:\Users\rob.cross\AppData\Roaming\pyRevit-Master\extensions\pyRevitTools.extension\pyRevit.tab\Modify.panel\Match.pushbutton\config.py", line 127, in save_options
AttributeError: 'MatchPropConfigWindow' object has no attribute 'proj_bg_fill_color'
Script Executor Traceback:
System.MissingMemberException: 'MatchPropConfigWindow' object has no attribute 'proj_bg_fill_color'
at IronPython.Runtime.Binding.MetaUserObject.FastGetBinderHelper.<>c__DisplayClass16_0.b__1(CallSite site, Object self, CodeContext context)
at IronPython.Runtime.Types.GetMemberDelegates.SlotDict(CallSite site, Object self, CodeContext context)
at System.Dynamic.UpdateDelegates.UpdateAndExecute2[T0,T1,TRet](CallSite site, T0 arg0, T1 arg1)
at Microsoft.Scripting.Interpreter.DynamicInstruction`3.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run4[T0,T1,T2,T3,TRet](T0 arg0, T1 arg1, T2 arg2, T3 arg3)
at IronPython.Compiler.PythonCallTargets.OriginalCallTarget3(PythonFunction function, Object arg0, Object arg1, Object arg2)
at CallSite.Target(Closure , CallSite , Object , Object , RoutedEventArgs )
at System.Dynamic.UpdateDelegates.UpdateAndExecute3[T0,T1,T2,TRet](CallSite site, T0 arg0, T1 arg1, T2 arg2)
at _Scripting_(Object[] , Object , RoutedEventArgs )
at System.Windows.EventRoute.InvokeHandlersImpl(Object source, RoutedEventArgs args, Boolean reRaised)
at System.Windows.UIElement.RaiseEventImpl(DependencyObject sender, RoutedEventArgs args)
at System.Windows.Controls.Primitives.ButtonBase.OnClick()
at System.Windows.Controls.Button.OnClick()
at System.Windows.Controls.Primitives.ButtonBase.OnMouseLeftButtonUp(MouseButtonEventArgs e)
at System.Windows.RoutedEventArgs.InvokeHandler(Delegate handler, Object target)
at System.Windows.RoutedEventHandlerInfo.InvokeHandler(Object target, RoutedEventArgs routedEventArgs)
at System.Windows.EventRoute.InvokeHandlersImpl(Object source, RoutedEventArgs args, Boolean reRaised)
at System.Windows.UIElement.ReRaiseEventAs(DependencyObject sender, RoutedEventArgs args, RoutedEvent newEvent)
at System.Windows.UIElement.OnMouseUpThunk(Object sender, MouseButtonEventArgs e)
at System.Windows.RoutedEventArgs.InvokeHandler(Delegate handler, Object target)
at System.Windows.RoutedEventHandlerInfo.InvokeHandler(Object target, RoutedEventArgs routedEventArgs)
at System.Windows.EventRoute.InvokeHandlersImpl(Object source, RoutedEventArgs args, Boolean reRaised)
at System.Windows.UIElement.RaiseEventImpl(DependencyObject sender, RoutedEventArgs args)
at System.Windows.UIElement.RaiseTrustedEvent(RoutedEventArgs args)
at System.Windows.Input.InputManager.ProcessStagingArea()
at System.Windows.Input.InputManager.ProcessInput(InputEventArgs input)
at System.Windows.Input.InputProviderSite.ReportInput(InputReport inputReport)
at System.Windows.Interop.HwndMouseInputProvider.ReportInput(IntPtr hwnd, InputMode mode, Int32 timestamp, RawMouseActions actions, Int32 x, Int32 y, Int32 wheel)
at System.Windows.Interop.HwndMouseInputProvider.FilterMessage(IntPtr hwnd, WindowMessage msg, IntPtr wParam, IntPtr lParam, Boolean& handled)
at System.Windows.Interop.HwndSource.InputFilterMessage(IntPtr hwnd, Int32 msg, IntPtr wParam, IntPtr lParam, Boolean& handled)
at MS.Win32.HwndWrapper.WndProc(IntPtr hwnd, Int32 msg, IntPtr wParam, IntPtr lParam, Boolean& handled)
at MS.Win32.HwndSubclass.DispatcherCallbackOperation(Object o)
at System.Windows.Threading.ExceptionWrapper.InternalRealCall(Delegate callback, Object args, Int32 numArgs)
at System.Windows.Threading.ExceptionWrapper.TryCatchWhen(Object source, Delegate callback, Object args, Int32 numArgs, Delegate catchHandler)
at System.Windows.Threading.Dispatcher.LegacyInvokeImpl(DispatcherPriority priority, TimeSpan timeout, Delegate method, Object args, Int32 numArgs)
at MS.Win32.HwndSubclass.SubclassWndProc(IntPtr hwnd, Int32 msg, IntPtr wParam, IntPtr lParam)
at MS.Win32.UnsafeNativeMethods.DispatchMessage(MSG& msg)
at System.Windows.Threading.Dispatcher.PushFrameImpl(DispatcherFrame frame)
at System.Windows.Window.ShowHelper(Object booleanBox)
at System.Windows.Window.ShowDialog()
at Microsoft.Scripting.Interpreter.FuncCallInstruction`2.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run3[T0,T1,T2,TRet](T0 arg0, T1 arg1, T2 arg2)
at System.Dynamic.UpdateDelegates.UpdateAndExecute2[T0,T1,TRet](CallSite site, T0 arg0, T1 arg1)
at Microsoft.Scripting.Interpreter.DynamicInstruction`3.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run2[T0,T1,TRet](T0 arg0, T1 arg1)
at IronPython.Compiler.PythonScriptCode.RunWorker(CodeContext ctx)
at PyRevitBaseClasses.ScriptExecutor.ExecuteScript(PyRevitCommandRuntime& pyrvtCmd)
|
AttributeError
|
def select_sheets(
title="Select Sheets",
button_name="Select",
width=DEFAULT_INPUTWINDOW_WIDTH,
multiple=True,
filterfunc=None,
doc=None,
):
"""Standard form for selecting sheets.
Sheets are grouped into sheet sets and sheet set can be selected from
a drop down box at the top of window.
Args:
title (str, optional): list window title
button_name (str, optional): list window button caption
width (int, optional): width of list window
multiselect (bool, optional):
allow multi-selection (uses check boxes). defaults to True
filterfunc (function):
filter function to be applied to context items.
doc (DB.Document, optional):
source document for sheets; defaults to active document
Returns:
list[DB.ViewSheet]: list of selected sheets
Example:
>>> from pyrevit import forms
>>> forms.select_sheets()
... [<Autodesk.Revit.DB.ViewSheet object>,
... <Autodesk.Revit.DB.ViewSheet object>]
"""
doc = doc or HOST_APP.doc
all_ops = dict()
all_sheets = (
DB.FilteredElementCollector(doc)
.OfClass(DB.ViewSheet)
.WhereElementIsNotElementType()
.ToElements()
)
if filterfunc:
all_sheets = filter(filterfunc, all_sheets)
all_sheets_ops = sorted(
[SheetOption(x) for x in all_sheets], key=lambda x: x.number
)
all_ops["All Sheets"] = all_sheets_ops
sheetsets = revit.query.get_sheet_sets(doc)
for sheetset in sheetsets:
sheetset_sheets = [x for x in sheetset.Views if isinstance(x, DB.ViewSheet)]
if filterfunc:
sheetset_sheets = filter(filterfunc, sheetset_sheets)
sheetset_ops = sorted(
[SheetOption(x) for x in sheetset_sheets], key=lambda x: x.number
)
all_ops[sheetset.Name] = sheetset_ops
# ask user for multiple sheets
selected_sheets = SelectFromList.show(
all_ops,
title=title,
group_selector_title="Sheet Sets:",
button_name=button_name,
width=width,
multiselect=multiple,
checked_only=True,
)
return selected_sheets
|
def select_sheets(
title="Select Sheets",
button_name="Select",
width=DEFAULT_INPUTWINDOW_WIDTH,
multiple=True,
filterfunc=None,
doc=None,
):
"""Standard form for selecting sheets.
Sheets are grouped into sheet sets and sheet set can be selected from
a drop down box at the top of window.
Args:
title (str, optional): list window title
button_name (str, optional): list window button caption
width (int, optional): width of list window
multiselect (bool, optional):
allow multi-selection (uses check boxes). defaults to True
filterfunc (function):
filter function to be applied to context items.
doc (DB.Document, optional):
source document for sheets; defaults to active document
Returns:
list[DB.ViewSheet]: list of selected sheets
Example:
>>> from pyrevit import forms
>>> forms.select_sheets()
... [<Autodesk.Revit.DB.ViewSheet object>,
... <Autodesk.Revit.DB.ViewSheet object>]
"""
doc = doc or HOST_APP.doc
all_ops = dict()
all_sheets = (
DB.FilteredElementCollector(doc)
.OfClass(DB.ViewSheet)
.WhereElementIsNotElementType()
.ToElements()
)
if filterfunc:
all_sheets = filter(filterfunc, all_sheets)
all_sheets_ops = sorted(
[SheetOption(x) for x in all_sheets], key=lambda x: x.number
)
all_ops["All Sheets"] = all_sheets_ops
sheetsets = revit.query.get_sheet_sets(doc)
for sheetset in sheetsets:
sheetset_sheets = sheetset.Views
if filterfunc:
sheetset_sheets = filter(filterfunc, sheetset_sheets)
sheetset_ops = sorted(
[SheetOption(x) for x in sheetset_sheets], key=lambda x: x.number
)
all_ops[sheetset.Name] = sheetset_ops
# ask user for multiple sheets
selected_sheets = SelectFromList.show(
all_ops,
title=title,
group_selector_title="Sheet Sets:",
button_name=button_name,
width=width,
multiselect=multiple,
checked_only=True,
)
return selected_sheets
|
https://github.com/eirannejad/pyRevit/issues/388
|
IronPython Traceback:
Traceback (most recent call last):
File "C:\Users\Black\AppData\Roaming\pyRevit-Master\extensions\pyRevitTools.extension\pyRevit.tab\Drawing Set.panel\Revision.pulldown\Set Revision On Sheets.pushbutton\script.py", line 16, in
File "C:\Users\Black\AppData\Roaming\pyRevit-Master\pyrevitlib\pyrevit\forms\__init__.py", line 1346, in select_sheets
File "C:\Users\Black\AppData\Roaming\pyRevit-Master\pyrevitlib\pyrevit\forms\__init__.py", line 1347, in
File "C:\Users\Black\AppData\Roaming\pyRevit-Master\pyrevitlib\pyrevit\forms\__init__.py", line 1235, in number
AttributeError: 'View3D' object has no attribute 'SheetNumber'
Script Executor Traceback:
System.MissingMemberException: 'View3D' object has no attribute 'SheetNumber'
at IronPython.Runtime.Binding.PythonGetMemberBinder.FastErrorGet`1.GetError(CallSite site, TSelfType target, CodeContext context)
at System.Dynamic.UpdateDelegates.UpdateAndExecute2[T0,T1,TRet](CallSite site, T0 arg0, T1 arg1)
at IronPython.Runtime.Binding.PythonGetMemberBinder.FastPropertyGet`1.GetProperty(CallSite site, TSelfType target, CodeContext context)
at number$864(Closure , PythonFunction , Object )
at IronPython.Runtime.PythonProperty.__get__(CodeContext context, Object instance, Object owner)
at IronPython.Runtime.PythonProperty.TryGetValue(CodeContext context, Object instance, PythonType owner, Object& value)
at IronPython.Runtime.Types.GetMemberDelegates.SlotOnly(CallSite site, Object self, CodeContext context)
at $866(Closure , PythonFunction , Object )
at IronPython.Runtime.PythonContext.Call(CodeContext context, Object func, Object arg0)
at IronPython.Runtime.List.DoSort(CodeContext context, IComparer cmp, Object key, Boolean reverse, Int32 index, Int32 count)
at IronPython.Runtime.List.sort(CodeContext context, Object cmp, Object key, Boolean reverse)
at IronPython.Modules.Builtin.sorted(CodeContext context, Object iterable, Object cmp, Object key, Boolean reverse)
at Microsoft.Scripting.Interpreter.FuncCallInstruction`6.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run5[T0,T1,T2,T3,T4,TRet](T0 arg0, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
at Microsoft.Scripting.Interpreter.DynamicInstruction`5.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run7[T0,T1,T2,T3,T4,T5,T6,TRet](T0 arg0, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6)
at Microsoft.Scripting.Interpreter.FuncCallInstruction`9.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run4[T0,T1,T2,T3,TRet](T0 arg0, T1 arg1, T2 arg2, T3 arg3)
at System.Dynamic.UpdateDelegates.UpdateAndExecute3[T0,T1,T2,TRet](CallSite site, T0 arg0, T1 arg1, T2 arg2)
at Microsoft.Scripting.Interpreter.DynamicInstruction`4.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run2[T0,T1,TRet](T0 arg0, T1 arg1)
at IronPython.Compiler.PythonScriptCode.RunWorker(CodeContext ctx)
at PyRevitBaseClasses.ScriptExecutor.ExecuteScript(PyRevitCommandRuntime& pyrvtCmd)
|
AttributeError
|
def dependent(func):
func.is_dependent = True
func.is_wipe_action = True
return func
|
def dependent(func):
func.is_dependent = True
return func
|
https://github.com/eirannejad/pyRevit/issues/305
|
IronPython Traceback:
Traceback (most recent call last):
File "C:\Users\user05\AppData\Roaming\pyRevit\pyRevit-v4\pyRevit\extensions\pyRevitTools.extension\pyRevit.tab\Project.panel\Wipe.pulldown\Wipe Model Components.pushbutton\script.py", line 51, in
File "C:\Users\user05\AppData\Roaming\pyRevit\pyRevit-v4\pyRevit\extensions\pyRevitTools.extension\lib\pyrevittoolslib\wipeactions.py", line 804, in get_worksetcleaners
File "C:\Users\user05\AppData\Roaming\pyRevit\pyRevit-v4\pyRevit\extensions\pyRevitTools.extension\lib\pyrevittoolslib\wipeactions.py", line 780, in copy_func
NotImplementedError: The method or operation is not implemented.
Script Executor Traceback:
System.NotImplementedException: The method or operation is not implemented.
at Microsoft.Scripting.Interpreter.NewInstruction.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run8[T0,T1,T2,T3,T4,T5,T6,T7,TRet](T0 arg0, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
at System.Dynamic.UpdateDelegates.UpdateAndExecute7[T0,T1,T2,T3,T4,T5,T6,TRet](CallSite site, T0 arg0, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6)
at Microsoft.Scripting.Interpreter.FuncCallInstruction`10.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run8[T0,T1,T2,T3,T4,T5,T6,T7,TRet](T0 arg0, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
at IronPython.Compiler.Ast.CallExpression.Invoke5Instruction.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run3[T0,T1,T2,TRet](T0 arg0, T1 arg1, T2 arg2)
at System.Dynamic.UpdateDelegates.UpdateAndExecute4[T0,T1,T2,T3,TRet](CallSite site, T0 arg0, T1 arg1, T2 arg2, T3 arg3)
at Microsoft.Scripting.Interpreter.FuncCallInstruction`7.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run5[T0,T1,T2,T3,T4,TRet](T0 arg0, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
at IronPython.Compiler.Ast.CallExpression.Invoke2Instruction.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run1[T0,TRet](T0 arg0)
at System.Dynamic.UpdateDelegates.UpdateAndExecute2[T0,T1,TRet](CallSite site, T0 arg0, T1 arg1)
at Microsoft.Scripting.Interpreter.DynamicInstruction`3.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run2[T0,T1,TRet](T0 arg0, T1 arg1)
at IronPython.Compiler.PythonScriptCode.RunWorker(CodeContext ctx)
at Microsoft.Scripting.Hosting.ScriptSource.Execute(ScriptScope scope)
at PyRevitBaseClasses.ScriptExecutor.ExecuteScript(PyRevitCommandRuntime& pyrvtCmd)
|
NotImplementedError
|
def notdependent(func):
func.is_dependent = False
func.is_wipe_action = True
return func
|
def notdependent(func):
func.is_dependent = False
return func
|
https://github.com/eirannejad/pyRevit/issues/305
|
IronPython Traceback:
Traceback (most recent call last):
File "C:\Users\user05\AppData\Roaming\pyRevit\pyRevit-v4\pyRevit\extensions\pyRevitTools.extension\pyRevit.tab\Project.panel\Wipe.pulldown\Wipe Model Components.pushbutton\script.py", line 51, in
File "C:\Users\user05\AppData\Roaming\pyRevit\pyRevit-v4\pyRevit\extensions\pyRevitTools.extension\lib\pyrevittoolslib\wipeactions.py", line 804, in get_worksetcleaners
File "C:\Users\user05\AppData\Roaming\pyRevit\pyRevit-v4\pyRevit\extensions\pyRevitTools.extension\lib\pyrevittoolslib\wipeactions.py", line 780, in copy_func
NotImplementedError: The method or operation is not implemented.
Script Executor Traceback:
System.NotImplementedException: The method or operation is not implemented.
at Microsoft.Scripting.Interpreter.NewInstruction.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run8[T0,T1,T2,T3,T4,T5,T6,T7,TRet](T0 arg0, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
at System.Dynamic.UpdateDelegates.UpdateAndExecute7[T0,T1,T2,T3,T4,T5,T6,TRet](CallSite site, T0 arg0, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6)
at Microsoft.Scripting.Interpreter.FuncCallInstruction`10.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run8[T0,T1,T2,T3,T4,T5,T6,T7,TRet](T0 arg0, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
at IronPython.Compiler.Ast.CallExpression.Invoke5Instruction.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run3[T0,T1,T2,TRet](T0 arg0, T1 arg1, T2 arg2)
at System.Dynamic.UpdateDelegates.UpdateAndExecute4[T0,T1,T2,T3,TRet](CallSite site, T0 arg0, T1 arg1, T2 arg2, T3 arg3)
at Microsoft.Scripting.Interpreter.FuncCallInstruction`7.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run5[T0,T1,T2,T3,T4,TRet](T0 arg0, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
at IronPython.Compiler.Ast.CallExpression.Invoke2Instruction.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run1[T0,TRet](T0 arg0)
at System.Dynamic.UpdateDelegates.UpdateAndExecute2[T0,T1,TRet](CallSite site, T0 arg0, T1 arg1)
at Microsoft.Scripting.Interpreter.DynamicInstruction`3.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run2[T0,T1,TRet](T0 arg0, T1 arg1)
at IronPython.Compiler.PythonScriptCode.RunWorker(CodeContext ctx)
at Microsoft.Scripting.Hosting.ScriptSource.Execute(ScriptScope scope)
at PyRevitBaseClasses.ScriptExecutor.ExecuteScript(PyRevitCommandRuntime& pyrvtCmd)
|
NotImplementedError
|
def copy_func(f, workset_name):
new_funcname = "{}_{}".format(f.func_name, workset_name)
new_func = types.FunctionType(
f.func_code, f.func_globals, new_funcname, tuple([workset_name]), f.func_closure
)
# set the docstring
new_func.__doc__ = WORKSET_FUNC_DOCSTRING_TEMPLATE.format(workset_name)
new_func.is_dependent = False
return new_func
|
def copy_func(f, workset_name):
new_funcname = "{}_{}".format(f.func_name, workset_name)
new_func = types.FunctionType(
f.func_code, f.func_globals, new_funcname, tuple([workset_name]), f.func_closure
)
# set the docstring
new_func.__doc__ = 'Remove All Elements on Workset "{}"'.format(workset_name)
new_func.is_dependent = False
return new_func
|
https://github.com/eirannejad/pyRevit/issues/305
|
IronPython Traceback:
Traceback (most recent call last):
File "C:\Users\user05\AppData\Roaming\pyRevit\pyRevit-v4\pyRevit\extensions\pyRevitTools.extension\pyRevit.tab\Project.panel\Wipe.pulldown\Wipe Model Components.pushbutton\script.py", line 51, in
File "C:\Users\user05\AppData\Roaming\pyRevit\pyRevit-v4\pyRevit\extensions\pyRevitTools.extension\lib\pyrevittoolslib\wipeactions.py", line 804, in get_worksetcleaners
File "C:\Users\user05\AppData\Roaming\pyRevit\pyRevit-v4\pyRevit\extensions\pyRevitTools.extension\lib\pyrevittoolslib\wipeactions.py", line 780, in copy_func
NotImplementedError: The method or operation is not implemented.
Script Executor Traceback:
System.NotImplementedException: The method or operation is not implemented.
at Microsoft.Scripting.Interpreter.NewInstruction.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run8[T0,T1,T2,T3,T4,T5,T6,T7,TRet](T0 arg0, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
at System.Dynamic.UpdateDelegates.UpdateAndExecute7[T0,T1,T2,T3,T4,T5,T6,TRet](CallSite site, T0 arg0, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6)
at Microsoft.Scripting.Interpreter.FuncCallInstruction`10.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run8[T0,T1,T2,T3,T4,T5,T6,T7,TRet](T0 arg0, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
at IronPython.Compiler.Ast.CallExpression.Invoke5Instruction.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run3[T0,T1,T2,TRet](T0 arg0, T1 arg1, T2 arg2)
at System.Dynamic.UpdateDelegates.UpdateAndExecute4[T0,T1,T2,T3,TRet](CallSite site, T0 arg0, T1 arg1, T2 arg2, T3 arg3)
at Microsoft.Scripting.Interpreter.FuncCallInstruction`7.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run5[T0,T1,T2,T3,T4,TRet](T0 arg0, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
at IronPython.Compiler.Ast.CallExpression.Invoke2Instruction.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run1[T0,TRet](T0 arg0)
at System.Dynamic.UpdateDelegates.UpdateAndExecute2[T0,T1,TRet](CallSite site, T0 arg0, T1 arg1)
at Microsoft.Scripting.Interpreter.DynamicInstruction`3.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run2[T0,T1,TRet](T0 arg0, T1 arg1)
at IronPython.Compiler.PythonScriptCode.RunWorker(CodeContext ctx)
at Microsoft.Scripting.Hosting.ScriptSource.Execute(ScriptScope scope)
at PyRevitBaseClasses.ScriptExecutor.ExecuteScript(PyRevitCommandRuntime& pyrvtCmd)
|
NotImplementedError
|
def get_worksetcleaners():
workset_funcs = []
# if model is workshared, get a list of current worksets
if revit.doc.IsWorkshared:
cl = DB.FilteredWorksetCollector(revit.doc)
worksetlist = cl.OfKind(DB.WorksetKind.UserWorkset)
# duplicate the workset element remover function for each workset
for workset in worksetlist:
# copying functions is not implemented in IronPython 2.7.3
# this method initially used copy_func to create a func for
# each workset but now passes on the template func
# with appropriate arguments
docstr = WORKSET_FUNC_DOCSTRING_TEMPLATE.format(workset.Name)
workset_funcs.append(
WorksetFuncData(
func=template_workset_remover,
docstring=docstr,
args=(workset.Name,),
)
)
return workset_funcs
|
def get_worksetcleaners():
workset_funcs = []
# copying functions is not implemented in IronPython 2.7.3
if compat.IRONPY273:
return workset_funcs
# if model is workshared, get a list of current worksets
if revit.doc.IsWorkshared:
cl = DB.FilteredWorksetCollector(revit.doc)
worksetlist = cl.OfKind(DB.WorksetKind.UserWorkset)
# duplicate the workset element remover function for each workset
for workset in worksetlist:
workset_funcs.append(copy_func(template_workset_remover, workset.Name))
return workset_funcs
|
https://github.com/eirannejad/pyRevit/issues/305
|
IronPython Traceback:
Traceback (most recent call last):
File "C:\Users\user05\AppData\Roaming\pyRevit\pyRevit-v4\pyRevit\extensions\pyRevitTools.extension\pyRevit.tab\Project.panel\Wipe.pulldown\Wipe Model Components.pushbutton\script.py", line 51, in
File "C:\Users\user05\AppData\Roaming\pyRevit\pyRevit-v4\pyRevit\extensions\pyRevitTools.extension\lib\pyrevittoolslib\wipeactions.py", line 804, in get_worksetcleaners
File "C:\Users\user05\AppData\Roaming\pyRevit\pyRevit-v4\pyRevit\extensions\pyRevitTools.extension\lib\pyrevittoolslib\wipeactions.py", line 780, in copy_func
NotImplementedError: The method or operation is not implemented.
Script Executor Traceback:
System.NotImplementedException: The method or operation is not implemented.
at Microsoft.Scripting.Interpreter.NewInstruction.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run8[T0,T1,T2,T3,T4,T5,T6,T7,TRet](T0 arg0, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
at System.Dynamic.UpdateDelegates.UpdateAndExecute7[T0,T1,T2,T3,T4,T5,T6,TRet](CallSite site, T0 arg0, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6)
at Microsoft.Scripting.Interpreter.FuncCallInstruction`10.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run8[T0,T1,T2,T3,T4,T5,T6,T7,TRet](T0 arg0, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
at IronPython.Compiler.Ast.CallExpression.Invoke5Instruction.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run3[T0,T1,T2,TRet](T0 arg0, T1 arg1, T2 arg2)
at System.Dynamic.UpdateDelegates.UpdateAndExecute4[T0,T1,T2,T3,TRet](CallSite site, T0 arg0, T1 arg1, T2 arg2, T3 arg3)
at Microsoft.Scripting.Interpreter.FuncCallInstruction`7.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run5[T0,T1,T2,T3,T4,TRet](T0 arg0, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
at IronPython.Compiler.Ast.CallExpression.Invoke2Instruction.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run1[T0,TRet](T0 arg0)
at System.Dynamic.UpdateDelegates.UpdateAndExecute2[T0,T1,TRet](CallSite site, T0 arg0, T1 arg1)
at Microsoft.Scripting.Interpreter.DynamicInstruction`3.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run2[T0,T1,TRet](T0 arg0, T1 arg1)
at IronPython.Compiler.PythonScriptCode.RunWorker(CodeContext ctx)
at Microsoft.Scripting.Hosting.ScriptSource.Execute(ScriptScope scope)
at PyRevitBaseClasses.ScriptExecutor.ExecuteScript(PyRevitCommandRuntime& pyrvtCmd)
|
NotImplementedError
|
def __init__(self, name, default_state=False, wipe_action=None, wipe_args=None):
self.name = name
self.state = default_state
self.wipe_action = wipe_action
self.wipe_args = wipe_args
self.is_dependent = getattr(self.wipe_action, "is_dependent", False)
|
def __init__(self, name, default_state=False, wipe_action=None):
self.name = name
self.state = default_state
self.wipe_action = wipe_action
self.is_dependent = self.wipe_action.is_dependent
|
https://github.com/eirannejad/pyRevit/issues/305
|
IronPython Traceback:
Traceback (most recent call last):
File "C:\Users\user05\AppData\Roaming\pyRevit\pyRevit-v4\pyRevit\extensions\pyRevitTools.extension\pyRevit.tab\Project.panel\Wipe.pulldown\Wipe Model Components.pushbutton\script.py", line 51, in
File "C:\Users\user05\AppData\Roaming\pyRevit\pyRevit-v4\pyRevit\extensions\pyRevitTools.extension\lib\pyrevittoolslib\wipeactions.py", line 804, in get_worksetcleaners
File "C:\Users\user05\AppData\Roaming\pyRevit\pyRevit-v4\pyRevit\extensions\pyRevitTools.extension\lib\pyrevittoolslib\wipeactions.py", line 780, in copy_func
NotImplementedError: The method or operation is not implemented.
Script Executor Traceback:
System.NotImplementedException: The method or operation is not implemented.
at Microsoft.Scripting.Interpreter.NewInstruction.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run8[T0,T1,T2,T3,T4,T5,T6,T7,TRet](T0 arg0, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
at System.Dynamic.UpdateDelegates.UpdateAndExecute7[T0,T1,T2,T3,T4,T5,T6,TRet](CallSite site, T0 arg0, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6)
at Microsoft.Scripting.Interpreter.FuncCallInstruction`10.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run8[T0,T1,T2,T3,T4,T5,T6,T7,TRet](T0 arg0, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
at IronPython.Compiler.Ast.CallExpression.Invoke5Instruction.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run3[T0,T1,T2,TRet](T0 arg0, T1 arg1, T2 arg2)
at System.Dynamic.UpdateDelegates.UpdateAndExecute4[T0,T1,T2,T3,TRet](CallSite site, T0 arg0, T1 arg1, T2 arg2, T3 arg3)
at Microsoft.Scripting.Interpreter.FuncCallInstruction`7.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run5[T0,T1,T2,T3,T4,TRet](T0 arg0, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
at IronPython.Compiler.Ast.CallExpression.Invoke2Instruction.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run1[T0,TRet](T0 arg0)
at System.Dynamic.UpdateDelegates.UpdateAndExecute2[T0,T1,TRet](CallSite site, T0 arg0, T1 arg1)
at Microsoft.Scripting.Interpreter.DynamicInstruction`3.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.Interpreter.Run(InterpretedFrame frame)
at Microsoft.Scripting.Interpreter.LightLambda.Run2[T0,T1,TRet](T0 arg0, T1 arg1)
at IronPython.Compiler.PythonScriptCode.RunWorker(CodeContext ctx)
at Microsoft.Scripting.Hosting.ScriptSource.Execute(ScriptScope scope)
at PyRevitBaseClasses.ScriptExecutor.ExecuteScript(PyRevitCommandRuntime& pyrvtCmd)
|
NotImplementedError
|
def get(self, path: str) -> None:
parts = path.split("/")
component_name = parts[0]
component_root = self._registry.get_component_path(component_name)
if component_root is None:
self.write(f"{path} not found")
self.set_status(404)
return
filename = "/".join(parts[1:])
abspath = os.path.join(component_root, filename)
LOGGER.debug("ComponentRequestHandler: GET: %s -> %s", path, abspath)
try:
with open(abspath, "r", encoding="utf-8") as file:
contents = file.read()
except (OSError, UnicodeDecodeError) as e:
self.write(f"{path} read error: {e}")
self.set_status(404)
return
self.write(contents)
self.set_header("Content-Type", self.get_content_type(abspath))
self.set_extra_headers(path)
|
def get(self, path: str) -> None:
parts = path.split("/")
component_name = parts[0]
component_root = self._registry.get_component_path(component_name)
if component_root is None:
self.write(f"{path} not found")
self.set_status(404)
return
filename = "/".join(parts[1:])
abspath = os.path.join(component_root, filename)
LOGGER.debug("ComponentRequestHandler: GET: %s -> %s", path, abspath)
try:
with open(abspath, "r") as file:
contents = file.read()
except OSError as e:
self.write(f"{path} read error: {e}")
self.set_status(404)
return
self.write(contents)
self.set_header("Content-Type", self.get_content_type(abspath))
self.set_extra_headers(path)
|
https://github.com/streamlit/streamlit/issues/2606
|
2021-01-15 15:36:40.902 Uncaught exception GET /component/streamlit_material.core.base.streamlit_material/static/media/roboto-latin-500-normal.020c97dc.woff2 (::1)
HTTPServerRequest(protocol='http', host='localhost:8501', method='GET', uri='/component/streamlit_material.core.base.streamlit_material/static/media/roboto-latin-500-normal.020c97dc.woff2', version='HTTP/1.1', remote_ip='::1')
Traceback (most recent call last):
File "/home/user/.pyenv/versions/3.8.6/envs/streamlit_components/lib/python3.8/site-packages/tornado/web.py", line 1702, in _execute
result = method(*self.path_args, **self.path_kwargs)
File "/home/user/.pyenv/versions/3.8.6/envs/streamlit_components/lib/python3.8/site-packages/streamlit/components/v1/components.py", line 324, in get
contents = file.read()
File "/home/user/.pyenv/versions/3.8.6/lib/python3.8/codecs.py", line 322, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0x8f in position 18: invalid start byte
|
UnicodeDecodeError
|
def reset(cls):
"""Reset credentials by removing file.
This is used by `streamlit activate reset` in case a user wants
to start over.
"""
c = Credentials.get_current()
c.activation = None
try:
os.remove(c._conf_file)
except OSError as e:
LOGGER.error("Error removing credentials file: %s" % e)
|
def reset(cls):
"""Reset credentials by removing file.
This is used by `streamlit activate reset` in case a user wants
to start over.
"""
Credentials._singleton = None
c = Credentials()
try:
os.remove(c._conf_file)
except OSError as e:
LOGGER.error("Error removing credentials file: %s" % e)
|
https://github.com/streamlit/streamlit/issues/175
|
Exception in thread ScriptRunner.scriptThread:
Traceback (most recent call last):
File "/Users/adrien/.pyenv/versions/3.6.5/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/Users/adrien/.pyenv/versions/3.6.5/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/Users/adrien/.pyenv/versions/3.6.5/envs/streamlit-pre-launch/lib/python3.6/site-packages/streamlit/ScriptRunner.py", line 147, in _process_request_queue
self._run_script(data)
File "/Users/adrien/.pyenv/versions/3.6.5/envs/streamlit-pre-launch/lib/python3.6/site-packages/streamlit/ScriptRunner.py", line 238, in _run_script
self.on_event.send(ScriptRunnerEvent.SCRIPT_STARTED)
File "/Users/adrien/.pyenv/versions/3.6.5/envs/streamlit-pre-launch/lib/python3.6/site-packages/blinker/base.py", line 267, in send
for receiver in self.receivers_for(sender)]
File "/Users/adrien/.pyenv/versions/3.6.5/envs/streamlit-pre-launch/lib/python3.6/site-packages/blinker/base.py", line 267, in <listcomp>
for receiver in self.receivers_for(sender)]
File "/Users/adrien/.pyenv/versions/3.6.5/envs/streamlit-pre-launch/lib/python3.6/site-packages/streamlit/ReportSession.py", line 257, in _on_scriptrunner_event
self._maybe_enqueue_initialize_message()
File "/Users/adrien/.pyenv/versions/3.6.5/envs/streamlit-pre-launch/lib/python3.6/site-packages/streamlit/ReportSession.py", line 360, in _maybe_enqueue_initialize_message
imsg.user_info.email = Credentials.get_current().activation.email
AttributeError: 'NoneType' object has no attribute 'email'
|
AttributeError
|
def _open_binary_stream(uri, mode, transport_params):
"""Open an arbitrary URI in the specified binary mode.
Not all modes are supported for all protocols.
:arg uri: The URI to open. May be a string, or something else.
:arg str mode: The mode to open with. Must be rb, wb or ab.
:arg transport_params: Keyword argumens for the transport layer.
:returns: A file object and the filename
:rtype: tuple
"""
if mode not in ("rb", "rb+", "wb", "wb+", "ab", "ab+"):
#
# This should really be a ValueError, but for the sake of compatibility
# with older versions, which raise NotImplementedError, we do the same.
#
raise NotImplementedError("unsupported mode: %r" % mode)
if isinstance(uri, six.string_types):
# this method just routes the request to classes handling the specific storage
# schemes, depending on the URI protocol in `uri`
filename = uri.split("/")[-1]
parsed_uri = _parse_uri(uri)
if parsed_uri.scheme == "file":
fobj = io.open(parsed_uri.uri_path, mode)
return fobj, filename
elif parsed_uri.scheme in smart_open_ssh.SCHEMES:
fobj = smart_open_ssh.open(
parsed_uri.uri_path,
mode,
host=parsed_uri.host,
user=parsed_uri.user,
port=parsed_uri.port,
password=parsed_uri.password,
transport_params=transport_params,
)
return fobj, filename
elif parsed_uri.scheme in smart_open_s3.SUPPORTED_SCHEMES:
return _s3_open_uri(parsed_uri, mode, transport_params), filename
elif parsed_uri.scheme == "hdfs":
_check_kwargs(smart_open_hdfs.open, transport_params)
return smart_open_hdfs.open(parsed_uri.uri_path, mode), filename
elif parsed_uri.scheme == "webhdfs":
kw = _check_kwargs(smart_open_webhdfs.open, transport_params)
http_uri = smart_open_webhdfs.convert_to_http_uri(parsed_uri)
return smart_open_webhdfs.open(http_uri, mode, **kw), filename
elif parsed_uri.scheme.startswith("http"):
#
# The URI may contain a query string and fragments, which interfere
# with our compressed/uncompressed estimation, so we strip them.
#
filename = P.basename(urlparse.urlparse(uri).path)
kw = _check_kwargs(smart_open_http.open, transport_params)
return smart_open_http.open(uri, mode, **kw), filename
else:
raise NotImplementedError("scheme %r is not supported", parsed_uri.scheme)
elif hasattr(uri, "read"):
# simply pass-through if already a file-like
# we need to return something as the file name, but we don't know what
# so we probe for uri.name (e.g., this works with open() or tempfile.NamedTemporaryFile)
# if the value ends with COMPRESSED_EXT, we will note it in _compression_wrapper()
# if there is no such an attribute, we return "unknown" - this
# effectively disables any compression
filename = getattr(uri, "name", "unknown")
return uri, filename
else:
raise TypeError("don't know how to handle uri %r" % uri)
|
def _open_binary_stream(uri, mode, transport_params):
"""Open an arbitrary URI in the specified binary mode.
Not all modes are supported for all protocols.
:arg uri: The URI to open. May be a string, or something else.
:arg str mode: The mode to open with. Must be rb, wb or ab.
:arg transport_params: Keyword argumens for the transport layer.
:returns: A file object and the filename
:rtype: tuple
"""
if mode not in ("rb", "rb+", "wb", "wb+", "ab", "ab+"):
#
# This should really be a ValueError, but for the sake of compatibility
# with older versions, which raise NotImplementedError, we do the same.
#
raise NotImplementedError("unsupported mode: %r" % mode)
if isinstance(uri, six.string_types):
# this method just routes the request to classes handling the specific storage
# schemes, depending on the URI protocol in `uri`
filename = uri.split("/")[-1]
parsed_uri = _parse_uri(uri)
if parsed_uri.scheme == "file":
fobj = io.open(parsed_uri.uri_path, mode)
return fobj, filename
elif parsed_uri.scheme in smart_open_ssh.SCHEMES:
fobj = smart_open_ssh.open(
parsed_uri.uri_path,
mode,
host=parsed_uri.host,
user=parsed_uri.user,
port=parsed_uri.port,
password=parsed_uri.password,
transport_params=transport_params,
)
return fobj, filename
elif parsed_uri.scheme in smart_open_s3.SUPPORTED_SCHEMES:
return _s3_open_uri(parsed_uri, mode, transport_params), filename
elif parsed_uri.scheme == "hdfs":
_check_kwargs(smart_open_hdfs.open, transport_params)
return smart_open_hdfs.open(parsed_uri.uri_path, mode), filename
elif parsed_uri.scheme == "webhdfs":
kw = _check_kwargs(smart_open_webhdfs.open, transport_params)
return smart_open_webhdfs.open(parsed_uri.uri_path, mode, **kw), filename
elif parsed_uri.scheme.startswith("http"):
#
# The URI may contain a query string and fragments, which interfere
# with our compressed/uncompressed estimation, so we strip them.
#
filename = P.basename(urlparse.urlparse(uri).path)
kw = _check_kwargs(smart_open_http.open, transport_params)
return smart_open_http.open(uri, mode, **kw), filename
else:
raise NotImplementedError("scheme %r is not supported", parsed_uri.scheme)
elif hasattr(uri, "read"):
# simply pass-through if already a file-like
# we need to return something as the file name, but we don't know what
# so we probe for uri.name (e.g., this works with open() or tempfile.NamedTemporaryFile)
# if the value ends with COMPRESSED_EXT, we will note it in _compression_wrapper()
# if there is no such an attribute, we return "unknown" - this
# effectively disables any compression
filename = getattr(uri, "name", "unknown")
return uri, filename
else:
raise TypeError("don't know how to handle uri %r" % uri)
|
https://github.com/RaRe-Technologies/smart_open/issues/338
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-40-b63bd711d13c> in <module>
----> 1 smart_copy('./test_file.txt', 'webhdfs://XXXX@XXX.XXX.XXX.XXX:XXXXX/user/XXXX/smart_copy/test_file.txt')
<ipython-input-38-694f70cf0776> in smart_copy(source_file, sync_file)
3 '''
4 with open(source_file, 'rb') as source:
----> 5 with open(sync_file, 'wb') as sync:
6 for line in source:
7 sync.write(line)
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/smart_open_lib.py in open(uri, mode, buffering, encoding, errors, newline, closefd, opener, ignore_ext, transport_params)
346 except KeyError:
347 binary_mode = mode
--> 348 binary, filename = _open_binary_stream(uri, binary_mode, transport_params)
349 if ignore_ext:
350 decompressed = binary
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/smart_open_lib.py in _open_binary_stream(uri, mode, transport_params)
560 elif parsed_uri.scheme == "webhdfs":
561 kw = _check_kwargs(smart_open_webhdfs.open, transport_params)
--> 562 return smart_open_webhdfs.open(parsed_uri.uri_path, mode, **kw), filename
563 elif parsed_uri.scheme.startswith('http'):
564 #
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/webhdfs.py in open(uri, mode, min_part_size)
40 return BufferedInputBase(uri)
41 elif mode == 'wb':
---> 42 return BufferedOutputBase(uri, min_part_size=min_part_size)
43 else:
44 raise NotImplementedError('webhdfs support for mode %r not implemented' % mode)
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/webhdfs.py in __init__(self, uri_path, min_part_size)
129 params=payload, allow_redirects=False)
130 if not init_response.status_code == httplib.TEMPORARY_REDIRECT:
--> 131 raise WebHdfsException(str(init_response.status_code) + "\n" + init_response.content)
132 uri = init_response.headers['location']
133 response = requests.put(uri, data="", headers={'content-type': 'application/octet-stream'})
TypeError: can only concatenate str (not "bytes") to str
|
TypeError
|
def _my_urlsplit(url):
"""This is a hack to prevent the regular urlsplit from splitting around question marks.
A question mark (?) in a URL typically indicates the start of a
querystring, and the standard library's urlparse function handles the
querystring separately. Unfortunately, question marks can also appear
_inside_ the actual URL for some schemas like S3.
Replaces question marks with newlines prior to splitting. This is safe because:
1. The standard library's urlsplit completely ignores newlines
2. Raw newlines will never occur in innocuous URLs. They are always URL-encoded.
See Also
--------
https://github.com/python/cpython/blob/3.7/Lib/urllib/parse.py
https://github.com/RaRe-Technologies/smart_open/issues/285
"""
parsed_url = urlparse.urlsplit(url, allow_fragments=False)
if parsed_url.scheme not in smart_open_s3.SUPPORTED_SCHEMES or "?" not in url:
return parsed_url
sr = urlparse.urlsplit(url.replace("?", "\n"), allow_fragments=False)
return urlparse.SplitResult(
sr.scheme, sr.netloc, sr.path.replace("\n", "?"), "", ""
)
|
def _my_urlsplit(url):
"""This is a hack to prevent the regular urlsplit from splitting around question marks.
A question mark (?) in a URL typically indicates the start of a
querystring, and the standard library's urlparse function handles the
querystring separately. Unfortunately, question marks can also appear
_inside_ the actual URL for some schemas like S3.
Replaces question marks with newlines prior to splitting. This is safe because:
1. The standard library's urlsplit completely ignores newlines
2. Raw newlines will never occur in innocuous URLs. They are always URL-encoded.
See Also
--------
https://github.com/python/cpython/blob/3.7/Lib/urllib/parse.py
https://github.com/RaRe-Technologies/smart_open/issues/285
"""
if "?" not in url:
return urlparse.urlsplit(url, allow_fragments=False)
sr = urlparse.urlsplit(url.replace("?", "\n"), allow_fragments=False)
return urlparse.SplitResult(
sr.scheme, sr.netloc, sr.path.replace("\n", "?"), "", ""
)
|
https://github.com/RaRe-Technologies/smart_open/issues/338
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-40-b63bd711d13c> in <module>
----> 1 smart_copy('./test_file.txt', 'webhdfs://XXXX@XXX.XXX.XXX.XXX:XXXXX/user/XXXX/smart_copy/test_file.txt')
<ipython-input-38-694f70cf0776> in smart_copy(source_file, sync_file)
3 '''
4 with open(source_file, 'rb') as source:
----> 5 with open(sync_file, 'wb') as sync:
6 for line in source:
7 sync.write(line)
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/smart_open_lib.py in open(uri, mode, buffering, encoding, errors, newline, closefd, opener, ignore_ext, transport_params)
346 except KeyError:
347 binary_mode = mode
--> 348 binary, filename = _open_binary_stream(uri, binary_mode, transport_params)
349 if ignore_ext:
350 decompressed = binary
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/smart_open_lib.py in _open_binary_stream(uri, mode, transport_params)
560 elif parsed_uri.scheme == "webhdfs":
561 kw = _check_kwargs(smart_open_webhdfs.open, transport_params)
--> 562 return smart_open_webhdfs.open(parsed_uri.uri_path, mode, **kw), filename
563 elif parsed_uri.scheme.startswith('http'):
564 #
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/webhdfs.py in open(uri, mode, min_part_size)
40 return BufferedInputBase(uri)
41 elif mode == 'wb':
---> 42 return BufferedOutputBase(uri, min_part_size=min_part_size)
43 else:
44 raise NotImplementedError('webhdfs support for mode %r not implemented' % mode)
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/webhdfs.py in __init__(self, uri_path, min_part_size)
129 params=payload, allow_redirects=False)
130 if not init_response.status_code == httplib.TEMPORARY_REDIRECT:
--> 131 raise WebHdfsException(str(init_response.status_code) + "\n" + init_response.content)
132 uri = init_response.headers['location']
133 response = requests.put(uri, data="", headers={'content-type': 'application/octet-stream'})
TypeError: can only concatenate str (not "bytes") to str
|
TypeError
|
def _parse_uri(uri_as_string):
"""
Parse the given URI from a string.
Supported URI schemes are:
* file
* hdfs
* http
* https
* s3
* s3a
* s3n
* s3u
* webhdfs
.s3, s3a and s3n are treated the same way. s3u is s3 but without SSL.
Valid URI examples::
* s3://my_bucket/my_key
* s3://my_key:my_secret@my_bucket/my_key
* s3://my_key:my_secret@my_server:my_port@my_bucket/my_key
* hdfs:///path/file
* hdfs://path/file
* webhdfs://host:port/path/file
* ./local/path/file
* ~/local/path/file
* local/path/file
* ./local/path/file.gz
* file:///home/user/file
* file:///home/user/file.bz2
* [ssh|scp|sftp]://username@host//path/file
* [ssh|scp|sftp]://username@host/path/file
"""
if os.name == "nt":
# urlsplit doesn't work on Windows -- it parses the drive as the scheme...
if "://" not in uri_as_string:
# no protocol given => assume a local file
uri_as_string = "file://" + uri_as_string
parsed_uri = _my_urlsplit(uri_as_string)
if parsed_uri.scheme == "hdfs":
return _parse_uri_hdfs(parsed_uri)
elif parsed_uri.scheme == "webhdfs":
return parsed_uri
elif parsed_uri.scheme in smart_open_s3.SUPPORTED_SCHEMES:
return _parse_uri_s3x(parsed_uri)
elif parsed_uri.scheme == "file":
return _parse_uri_file(parsed_uri.netloc + parsed_uri.path)
elif parsed_uri.scheme in ("", None):
return _parse_uri_file(uri_as_string)
elif parsed_uri.scheme.startswith("http"):
return Uri(scheme=parsed_uri.scheme, uri_path=uri_as_string)
elif parsed_uri.scheme in smart_open_ssh.SCHEMES:
return _parse_uri_ssh(parsed_uri)
else:
raise NotImplementedError(
"unknown URI scheme %r in %r" % (parsed_uri.scheme, uri_as_string)
)
|
def _parse_uri(uri_as_string):
"""
Parse the given URI from a string.
Supported URI schemes are:
* file
* hdfs
* http
* https
* s3
* s3a
* s3n
* s3u
* webhdfs
.s3, s3a and s3n are treated the same way. s3u is s3 but without SSL.
Valid URI examples::
* s3://my_bucket/my_key
* s3://my_key:my_secret@my_bucket/my_key
* s3://my_key:my_secret@my_server:my_port@my_bucket/my_key
* hdfs:///path/file
* hdfs://path/file
* webhdfs://host:port/path/file
* ./local/path/file
* ~/local/path/file
* local/path/file
* ./local/path/file.gz
* file:///home/user/file
* file:///home/user/file.bz2
* [ssh|scp|sftp]://username@host//path/file
* [ssh|scp|sftp]://username@host/path/file
"""
if os.name == "nt":
# urlsplit doesn't work on Windows -- it parses the drive as the scheme...
if "://" not in uri_as_string:
# no protocol given => assume a local file
uri_as_string = "file://" + uri_as_string
parsed_uri = _my_urlsplit(uri_as_string)
if parsed_uri.scheme == "hdfs":
return _parse_uri_hdfs(parsed_uri)
elif parsed_uri.scheme == "webhdfs":
return _parse_uri_webhdfs(parsed_uri)
elif parsed_uri.scheme in smart_open_s3.SUPPORTED_SCHEMES:
return _parse_uri_s3x(parsed_uri)
elif parsed_uri.scheme == "file":
return _parse_uri_file(parsed_uri.netloc + parsed_uri.path)
elif parsed_uri.scheme in ("", None):
return _parse_uri_file(uri_as_string)
elif parsed_uri.scheme.startswith("http"):
return Uri(scheme=parsed_uri.scheme, uri_path=uri_as_string)
elif parsed_uri.scheme in smart_open_ssh.SCHEMES:
return _parse_uri_ssh(parsed_uri)
else:
raise NotImplementedError(
"unknown URI scheme %r in %r" % (parsed_uri.scheme, uri_as_string)
)
|
https://github.com/RaRe-Technologies/smart_open/issues/338
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-40-b63bd711d13c> in <module>
----> 1 smart_copy('./test_file.txt', 'webhdfs://XXXX@XXX.XXX.XXX.XXX:XXXXX/user/XXXX/smart_copy/test_file.txt')
<ipython-input-38-694f70cf0776> in smart_copy(source_file, sync_file)
3 '''
4 with open(source_file, 'rb') as source:
----> 5 with open(sync_file, 'wb') as sync:
6 for line in source:
7 sync.write(line)
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/smart_open_lib.py in open(uri, mode, buffering, encoding, errors, newline, closefd, opener, ignore_ext, transport_params)
346 except KeyError:
347 binary_mode = mode
--> 348 binary, filename = _open_binary_stream(uri, binary_mode, transport_params)
349 if ignore_ext:
350 decompressed = binary
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/smart_open_lib.py in _open_binary_stream(uri, mode, transport_params)
560 elif parsed_uri.scheme == "webhdfs":
561 kw = _check_kwargs(smart_open_webhdfs.open, transport_params)
--> 562 return smart_open_webhdfs.open(parsed_uri.uri_path, mode, **kw), filename
563 elif parsed_uri.scheme.startswith('http'):
564 #
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/webhdfs.py in open(uri, mode, min_part_size)
40 return BufferedInputBase(uri)
41 elif mode == 'wb':
---> 42 return BufferedOutputBase(uri, min_part_size=min_part_size)
43 else:
44 raise NotImplementedError('webhdfs support for mode %r not implemented' % mode)
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/webhdfs.py in __init__(self, uri_path, min_part_size)
129 params=payload, allow_redirects=False)
130 if not init_response.status_code == httplib.TEMPORARY_REDIRECT:
--> 131 raise WebHdfsException(str(init_response.status_code) + "\n" + init_response.content)
132 uri = init_response.headers['location']
133 response = requests.put(uri, data="", headers={'content-type': 'application/octet-stream'})
TypeError: can only concatenate str (not "bytes") to str
|
TypeError
|
def open(http_uri, mode, min_part_size=WEBHDFS_MIN_PART_SIZE):
"""
Parameters
----------
http_uri: str
webhdfs url converted to http REST url
min_part_size: int, optional
For writing only.
"""
if mode == "rb":
return BufferedInputBase(http_uri)
elif mode == "wb":
return BufferedOutputBase(http_uri, min_part_size=min_part_size)
else:
raise NotImplementedError("webhdfs support for mode %r not implemented" % mode)
|
def open(uri, mode, min_part_size=WEBHDFS_MIN_PART_SIZE):
"""
Parameters
----------
min_part_size: int, optional
For writing only.
"""
if mode == "rb":
return BufferedInputBase(uri)
elif mode == "wb":
return BufferedOutputBase(uri, min_part_size=min_part_size)
else:
raise NotImplementedError("webhdfs support for mode %r not implemented" % mode)
|
https://github.com/RaRe-Technologies/smart_open/issues/338
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-40-b63bd711d13c> in <module>
----> 1 smart_copy('./test_file.txt', 'webhdfs://XXXX@XXX.XXX.XXX.XXX:XXXXX/user/XXXX/smart_copy/test_file.txt')
<ipython-input-38-694f70cf0776> in smart_copy(source_file, sync_file)
3 '''
4 with open(source_file, 'rb') as source:
----> 5 with open(sync_file, 'wb') as sync:
6 for line in source:
7 sync.write(line)
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/smart_open_lib.py in open(uri, mode, buffering, encoding, errors, newline, closefd, opener, ignore_ext, transport_params)
346 except KeyError:
347 binary_mode = mode
--> 348 binary, filename = _open_binary_stream(uri, binary_mode, transport_params)
349 if ignore_ext:
350 decompressed = binary
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/smart_open_lib.py in _open_binary_stream(uri, mode, transport_params)
560 elif parsed_uri.scheme == "webhdfs":
561 kw = _check_kwargs(smart_open_webhdfs.open, transport_params)
--> 562 return smart_open_webhdfs.open(parsed_uri.uri_path, mode, **kw), filename
563 elif parsed_uri.scheme.startswith('http'):
564 #
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/webhdfs.py in open(uri, mode, min_part_size)
40 return BufferedInputBase(uri)
41 elif mode == 'wb':
---> 42 return BufferedOutputBase(uri, min_part_size=min_part_size)
43 else:
44 raise NotImplementedError('webhdfs support for mode %r not implemented' % mode)
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/webhdfs.py in __init__(self, uri_path, min_part_size)
129 params=payload, allow_redirects=False)
130 if not init_response.status_code == httplib.TEMPORARY_REDIRECT:
--> 131 raise WebHdfsException(str(init_response.status_code) + "\n" + init_response.content)
132 uri = init_response.headers['location']
133 response = requests.put(uri, data="", headers={'content-type': 'application/octet-stream'})
TypeError: can only concatenate str (not "bytes") to str
|
TypeError
|
def __init__(self, uri):
self._uri = uri
payload = {"op": "OPEN", "offset": 0}
self._response = requests.get(self._uri, params=payload, stream=True)
if self._response.status_code != httplib.OK:
raise WebHdfsException.from_response(self._response)
self._buf = b""
|
def __init__(self, uri):
self._uri = uri
payload = {"op": "OPEN", "offset": 0}
self._response = requests.get("http://" + self._uri, params=payload, stream=True)
self._buf = b""
|
https://github.com/RaRe-Technologies/smart_open/issues/338
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-40-b63bd711d13c> in <module>
----> 1 smart_copy('./test_file.txt', 'webhdfs://XXXX@XXX.XXX.XXX.XXX:XXXXX/user/XXXX/smart_copy/test_file.txt')
<ipython-input-38-694f70cf0776> in smart_copy(source_file, sync_file)
3 '''
4 with open(source_file, 'rb') as source:
----> 5 with open(sync_file, 'wb') as sync:
6 for line in source:
7 sync.write(line)
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/smart_open_lib.py in open(uri, mode, buffering, encoding, errors, newline, closefd, opener, ignore_ext, transport_params)
346 except KeyError:
347 binary_mode = mode
--> 348 binary, filename = _open_binary_stream(uri, binary_mode, transport_params)
349 if ignore_ext:
350 decompressed = binary
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/smart_open_lib.py in _open_binary_stream(uri, mode, transport_params)
560 elif parsed_uri.scheme == "webhdfs":
561 kw = _check_kwargs(smart_open_webhdfs.open, transport_params)
--> 562 return smart_open_webhdfs.open(parsed_uri.uri_path, mode, **kw), filename
563 elif parsed_uri.scheme.startswith('http'):
564 #
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/webhdfs.py in open(uri, mode, min_part_size)
40 return BufferedInputBase(uri)
41 elif mode == 'wb':
---> 42 return BufferedOutputBase(uri, min_part_size=min_part_size)
43 else:
44 raise NotImplementedError('webhdfs support for mode %r not implemented' % mode)
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/webhdfs.py in __init__(self, uri_path, min_part_size)
129 params=payload, allow_redirects=False)
130 if not init_response.status_code == httplib.TEMPORARY_REDIRECT:
--> 131 raise WebHdfsException(str(init_response.status_code) + "\n" + init_response.content)
132 uri = init_response.headers['location']
133 response = requests.put(uri, data="", headers={'content-type': 'application/octet-stream'})
TypeError: can only concatenate str (not "bytes") to str
|
TypeError
|
def __init__(self, uri, min_part_size=WEBHDFS_MIN_PART_SIZE):
"""
Parameters
----------
min_part_size: int, optional
For writing only.
"""
self._uri = uri
self._closed = False
self.min_part_size = min_part_size
# creating empty file first
payload = {"op": "CREATE", "overwrite": True}
init_response = requests.put(self._uri, params=payload, allow_redirects=False)
if not init_response.status_code == httplib.TEMPORARY_REDIRECT:
raise WebHdfsException.from_response(init_response)
uri = init_response.headers["location"]
response = requests.put(
uri, data="", headers={"content-type": "application/octet-stream"}
)
if not response.status_code == httplib.CREATED:
raise WebHdfsException.from_response(response)
self.lines = []
self.parts = 0
self.chunk_bytes = 0
self.total_size = 0
#
# This member is part of the io.BufferedIOBase interface.
#
self.raw = None
|
def __init__(self, uri_path, min_part_size=WEBHDFS_MIN_PART_SIZE):
"""
Parameters
----------
min_part_size: int, optional
For writing only.
"""
self.uri_path = uri_path
self._closed = False
self.min_part_size = min_part_size
# creating empty file first
payload = {"op": "CREATE", "overwrite": True}
init_response = requests.put(
"http://" + self.uri_path, params=payload, allow_redirects=False
)
if not init_response.status_code == httplib.TEMPORARY_REDIRECT:
raise WebHdfsException(
str(init_response.status_code) + "\n" + init_response.content
)
uri = init_response.headers["location"]
response = requests.put(
uri, data="", headers={"content-type": "application/octet-stream"}
)
if not response.status_code == httplib.CREATED:
raise WebHdfsException(str(response.status_code) + "\n" + response.content)
self.lines = []
self.parts = 0
self.chunk_bytes = 0
self.total_size = 0
#
# This member is part of the io.BufferedIOBase interface.
#
self.raw = None
|
https://github.com/RaRe-Technologies/smart_open/issues/338
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-40-b63bd711d13c> in <module>
----> 1 smart_copy('./test_file.txt', 'webhdfs://XXXX@XXX.XXX.XXX.XXX:XXXXX/user/XXXX/smart_copy/test_file.txt')
<ipython-input-38-694f70cf0776> in smart_copy(source_file, sync_file)
3 '''
4 with open(source_file, 'rb') as source:
----> 5 with open(sync_file, 'wb') as sync:
6 for line in source:
7 sync.write(line)
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/smart_open_lib.py in open(uri, mode, buffering, encoding, errors, newline, closefd, opener, ignore_ext, transport_params)
346 except KeyError:
347 binary_mode = mode
--> 348 binary, filename = _open_binary_stream(uri, binary_mode, transport_params)
349 if ignore_ext:
350 decompressed = binary
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/smart_open_lib.py in _open_binary_stream(uri, mode, transport_params)
560 elif parsed_uri.scheme == "webhdfs":
561 kw = _check_kwargs(smart_open_webhdfs.open, transport_params)
--> 562 return smart_open_webhdfs.open(parsed_uri.uri_path, mode, **kw), filename
563 elif parsed_uri.scheme.startswith('http'):
564 #
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/webhdfs.py in open(uri, mode, min_part_size)
40 return BufferedInputBase(uri)
41 elif mode == 'wb':
---> 42 return BufferedOutputBase(uri, min_part_size=min_part_size)
43 else:
44 raise NotImplementedError('webhdfs support for mode %r not implemented' % mode)
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/webhdfs.py in __init__(self, uri_path, min_part_size)
129 params=payload, allow_redirects=False)
130 if not init_response.status_code == httplib.TEMPORARY_REDIRECT:
--> 131 raise WebHdfsException(str(init_response.status_code) + "\n" + init_response.content)
132 uri = init_response.headers['location']
133 response = requests.put(uri, data="", headers={'content-type': 'application/octet-stream'})
TypeError: can only concatenate str (not "bytes") to str
|
TypeError
|
def _upload(self, data):
payload = {"op": "APPEND"}
init_response = requests.post(self._uri, params=payload, allow_redirects=False)
if not init_response.status_code == httplib.TEMPORARY_REDIRECT:
raise WebHdfsException.from_response(init_response)
uri = init_response.headers["location"]
response = requests.post(
uri, data=data, headers={"content-type": "application/octet-stream"}
)
if not response.status_code == httplib.OK:
raise WebHdfsException.from_response(response)
|
def _upload(self, data):
payload = {"op": "APPEND"}
init_response = requests.post(
"http://" + self.uri_path, params=payload, allow_redirects=False
)
if not init_response.status_code == httplib.TEMPORARY_REDIRECT:
raise WebHdfsException(
str(init_response.status_code) + "\n" + init_response.content
)
uri = init_response.headers["location"]
response = requests.post(
uri, data=data, headers={"content-type": "application/octet-stream"}
)
if not response.status_code == httplib.OK:
raise WebHdfsException(
str(response.status_code) + "\n" + repr(response.content)
)
|
https://github.com/RaRe-Technologies/smart_open/issues/338
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-40-b63bd711d13c> in <module>
----> 1 smart_copy('./test_file.txt', 'webhdfs://XXXX@XXX.XXX.XXX.XXX:XXXXX/user/XXXX/smart_copy/test_file.txt')
<ipython-input-38-694f70cf0776> in smart_copy(source_file, sync_file)
3 '''
4 with open(source_file, 'rb') as source:
----> 5 with open(sync_file, 'wb') as sync:
6 for line in source:
7 sync.write(line)
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/smart_open_lib.py in open(uri, mode, buffering, encoding, errors, newline, closefd, opener, ignore_ext, transport_params)
346 except KeyError:
347 binary_mode = mode
--> 348 binary, filename = _open_binary_stream(uri, binary_mode, transport_params)
349 if ignore_ext:
350 decompressed = binary
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/smart_open_lib.py in _open_binary_stream(uri, mode, transport_params)
560 elif parsed_uri.scheme == "webhdfs":
561 kw = _check_kwargs(smart_open_webhdfs.open, transport_params)
--> 562 return smart_open_webhdfs.open(parsed_uri.uri_path, mode, **kw), filename
563 elif parsed_uri.scheme.startswith('http'):
564 #
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/webhdfs.py in open(uri, mode, min_part_size)
40 return BufferedInputBase(uri)
41 elif mode == 'wb':
---> 42 return BufferedOutputBase(uri, min_part_size=min_part_size)
43 else:
44 raise NotImplementedError('webhdfs support for mode %r not implemented' % mode)
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/webhdfs.py in __init__(self, uri_path, min_part_size)
129 params=payload, allow_redirects=False)
130 if not init_response.status_code == httplib.TEMPORARY_REDIRECT:
--> 131 raise WebHdfsException(str(init_response.status_code) + "\n" + init_response.content)
132 uri = init_response.headers['location']
133 response = requests.put(uri, data="", headers={'content-type': 'application/octet-stream'})
TypeError: can only concatenate str (not "bytes") to str
|
TypeError
|
def __init__(self, msg="", status_code=None):
self.msg = msg
self.status_code = status_code
super(WebHdfsException, self).__init__(repr(self))
|
def __init__(self, msg=str()):
self.msg = msg
super(WebHdfsException, self).__init__(self.msg)
|
https://github.com/RaRe-Technologies/smart_open/issues/338
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-40-b63bd711d13c> in <module>
----> 1 smart_copy('./test_file.txt', 'webhdfs://XXXX@XXX.XXX.XXX.XXX:XXXXX/user/XXXX/smart_copy/test_file.txt')
<ipython-input-38-694f70cf0776> in smart_copy(source_file, sync_file)
3 '''
4 with open(source_file, 'rb') as source:
----> 5 with open(sync_file, 'wb') as sync:
6 for line in source:
7 sync.write(line)
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/smart_open_lib.py in open(uri, mode, buffering, encoding, errors, newline, closefd, opener, ignore_ext, transport_params)
346 except KeyError:
347 binary_mode = mode
--> 348 binary, filename = _open_binary_stream(uri, binary_mode, transport_params)
349 if ignore_ext:
350 decompressed = binary
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/smart_open_lib.py in _open_binary_stream(uri, mode, transport_params)
560 elif parsed_uri.scheme == "webhdfs":
561 kw = _check_kwargs(smart_open_webhdfs.open, transport_params)
--> 562 return smart_open_webhdfs.open(parsed_uri.uri_path, mode, **kw), filename
563 elif parsed_uri.scheme.startswith('http'):
564 #
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/webhdfs.py in open(uri, mode, min_part_size)
40 return BufferedInputBase(uri)
41 elif mode == 'wb':
---> 42 return BufferedOutputBase(uri, min_part_size=min_part_size)
43 else:
44 raise NotImplementedError('webhdfs support for mode %r not implemented' % mode)
~/Github/tools/.venv/lib/python3.7/site-packages/smart_open/webhdfs.py in __init__(self, uri_path, min_part_size)
129 params=payload, allow_redirects=False)
130 if not init_response.status_code == httplib.TEMPORARY_REDIRECT:
--> 131 raise WebHdfsException(str(init_response.status_code) + "\n" + init_response.content)
132 uri = init_response.headers['location']
133 response = requests.put(uri, data="", headers={'content-type': 'application/octet-stream'})
TypeError: can only concatenate str (not "bytes") to str
|
TypeError
|
def _parse_uri(uri_as_string):
"""
Parse the given URI from a string.
Supported URI schemes are:
* file
* hdfs
* http
* https
* s3
* s3a
* s3n
* s3u
* webhdfs
.s3, s3a and s3n are treated the same way. s3u is s3 but without SSL.
Valid URI examples::
* s3://my_bucket/my_key
* s3://my_key:my_secret@my_bucket/my_key
* s3://my_key:my_secret@my_server:my_port@my_bucket/my_key
* hdfs:///path/file
* hdfs://path/file
* webhdfs://host:port/path/file
* ./local/path/file
* ~/local/path/file
* local/path/file
* ./local/path/file.gz
* file:///home/user/file
* file:///home/user/file.bz2
"""
if os.name == "nt":
# urlsplit doesn't work on Windows -- it parses the drive as the scheme...
if "://" not in uri_as_string:
# no protocol given => assume a local file
uri_as_string = "file://" + uri_as_string
parsed_uri = urlsplit(uri_as_string, allow_fragments=False)
if parsed_uri.scheme == "hdfs":
return _parse_uri_hdfs(parsed_uri)
elif parsed_uri.scheme == "webhdfs":
return _parse_uri_webhdfs(parsed_uri)
elif parsed_uri.scheme in smart_open_s3.SUPPORTED_SCHEMES:
return _parse_uri_s3x(parsed_uri)
elif parsed_uri.scheme == "file":
return _parse_uri_file(parsed_uri.netloc + parsed_uri.path)
elif parsed_uri.scheme in ("", None):
return _parse_uri_file(uri_as_string)
elif parsed_uri.scheme.startswith("http"):
return Uri(scheme=parsed_uri.scheme, uri_path=uri_as_string)
else:
raise NotImplementedError(
"unknown URI scheme %r in %r" % (parsed_uri.scheme, uri_as_string)
)
|
def _parse_uri(uri_as_string):
"""
Parse the given URI from a string.
Supported URI schemes are:
* file
* hdfs
* http
* https
* s3
* s3a
* s3n
* s3u
* webhdfs
.s3, s3a and s3n are treated the same way. s3u is s3 but without SSL.
Valid URI examples::
* s3://my_bucket/my_key
* s3://my_key:my_secret@my_bucket/my_key
* s3://my_key:my_secret@my_server:my_port@my_bucket/my_key
* hdfs:///path/file
* hdfs://path/file
* webhdfs://host:port/path/file
* ./local/path/file
* ~/local/path/file
* local/path/file
* ./local/path/file.gz
* file:///home/user/file
* file:///home/user/file.bz2
"""
if os.name == "nt":
# urlsplit doesn't work on Windows -- it parses the drive as the scheme...
if "://" not in uri_as_string:
# no protocol given => assume a local file
uri_as_string = "file://" + uri_as_string
parsed_uri = urlsplit(uri_as_string, allow_fragments=False)
if parsed_uri.scheme == "hdfs":
return _parse_uri_hdfs(parsed_uri)
elif parsed_uri.scheme == "webhdfs":
return _parse_uri_webhdfs(parsed_uri)
elif parsed_uri.scheme in smart_open_s3.SUPPORTED_SCHEMES:
return _parse_uri_s3x(parsed_uri)
elif parsed_uri.scheme in ("file", "", None):
return _parse_uri_file(parsed_uri)
elif parsed_uri.scheme.startswith("http"):
return Uri(scheme=parsed_uri.scheme, uri_path=uri_as_string)
else:
raise NotImplementedError(
"unknown URI scheme %r in %r" % (parsed_uri.scheme, uri_as_string)
)
|
https://github.com/RaRe-Technologies/smart_open/issues/123
|
smart_open.smart_open('//anaconda/envs/python3/lib/python3.5/site-packages/gensim/test/test_data/lee_background.cor','r')
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
<ipython-input-12-6a05871c0775> in <module>()
----> 1 smart_open.smart_open('//anaconda/envs/python3/lib/python3.5/site-packages/gensim/test/test_data/lee_background.cor','r')
//anaconda/envs/python3/lib/python3.5/site-packages/smart_open/smart_open_lib.py in smart_open(uri, mode, **kw)
138 # local files -- both read & write supported
139 # compression, if any, is determined by the filename extension (.gz, .bz2)
--> 140 return file_smart_open(parsed_uri.uri_path, mode)
141 elif parsed_uri.scheme in ("s3", "s3n", "s3u"):
142 kwargs = {}
//anaconda/envs/python3/lib/python3.5/site-packages/smart_open/smart_open_lib.py in file_smart_open(fname, mode)
642
643 """
--> 644 return compression_wrapper(open(fname, mode), fname, mode)
645
646
FileNotFoundError: [Errno 2] No such file or directory: 'anaconda/envs/python3/lib/python3.5/site-packages/gensim/test/test_data/lee_background.cor'
|
FileNotFoundError
|
def _parse_uri_file(input_path):
# '~/tmp' may be expanded to '/Users/username/tmp'
uri_path = os.path.expanduser(input_path)
if not uri_path:
raise RuntimeError("invalid file URI: %s" % input_path)
return Uri(scheme="file", uri_path=uri_path)
|
def _parse_uri_file(parsed_uri):
assert parsed_uri.scheme in (None, "", "file")
uri_path = parsed_uri.netloc + parsed_uri.path
# '~/tmp' may be expanded to '/Users/username/tmp'
uri_path = os.path.expanduser(uri_path)
if not uri_path:
raise RuntimeError("invalid file URI: %s" % str(parsed_uri))
return Uri(scheme="file", uri_path=uri_path)
|
https://github.com/RaRe-Technologies/smart_open/issues/123
|
smart_open.smart_open('//anaconda/envs/python3/lib/python3.5/site-packages/gensim/test/test_data/lee_background.cor','r')
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
<ipython-input-12-6a05871c0775> in <module>()
----> 1 smart_open.smart_open('//anaconda/envs/python3/lib/python3.5/site-packages/gensim/test/test_data/lee_background.cor','r')
//anaconda/envs/python3/lib/python3.5/site-packages/smart_open/smart_open_lib.py in smart_open(uri, mode, **kw)
138 # local files -- both read & write supported
139 # compression, if any, is determined by the filename extension (.gz, .bz2)
--> 140 return file_smart_open(parsed_uri.uri_path, mode)
141 elif parsed_uri.scheme in ("s3", "s3n", "s3u"):
142 kwargs = {}
//anaconda/envs/python3/lib/python3.5/site-packages/smart_open/smart_open_lib.py in file_smart_open(fname, mode)
642
643 """
--> 644 return compression_wrapper(open(fname, mode), fname, mode)
645
646
FileNotFoundError: [Errno 2] No such file or directory: 'anaconda/envs/python3/lib/python3.5/site-packages/gensim/test/test_data/lee_background.cor'
|
FileNotFoundError
|
def _shortcut_open(uri, mode, **kw):
"""Try to open the URI using the standard library io.open function.
This can be much faster than the alternative of opening in binary mode and
then decoding.
This is only possible under the following conditions:
1. Opening a local file
2. Ignore extension is set to True
If it is not possible to use the built-in open for the specified URI, returns None.
:param str uri: A string indicating what to open.
:param str mode: The mode to pass to the open function.
:param dict kw:
:returns: The opened file
:rtype: file
"""
if not isinstance(uri, six.string_types):
return None
parsed_uri = _parse_uri(uri)
if parsed_uri.scheme != "file":
return None
_, extension = P.splitext(parsed_uri.uri_path)
ignore_extension = kw.get("ignore_extension", False)
if extension in (".gz", ".bz2") and not ignore_extension:
return None
#
# https://docs.python.org/2/library/functions.html#open
#
# buffering: 0: off; 1: on; negative number: use system default
#
buffering = kw.get("buffering", -1)
open_kwargs = {}
errors = kw.get("errors")
if errors is not None:
open_kwargs["errors"] = errors
encoding = kw.get("encoding")
if encoding is not None:
open_kwargs["encoding"] = encoding
mode = mode.replace("b", "")
#
# Under Py3, the built-in open accepts kwargs, and it's OK to use that.
# Under Py2, the built-in open _doesn't_ accept kwargs, but we still use it
# whenever possible (see issue #207). If we're under Py2 and have to use
# kwargs, then we have no option other to use io.open.
#
if six.PY3:
return open(parsed_uri.uri_path, mode, buffering=buffering, **open_kwargs)
elif not open_kwargs:
return open(parsed_uri.uri_path, mode, buffering=buffering)
return io.open(parsed_uri.uri_path, mode, buffering=buffering, **open_kwargs)
|
def _shortcut_open(uri, mode, **kw):
"""Try to open the URI using the standard library io.open function.
This can be much faster than the alternative of opening in binary mode and
then decoding.
This is only possible under the following conditions:
1. Opening a local file
2. Ignore extension is set to True
If it is not possible to use the built-in open for the specified URI, returns None.
:param str uri: A string indicating what to open.
:param str mode: The mode to pass to the open function.
:param dict kw:
:returns: The opened file
:rtype: file
"""
if not isinstance(uri, six.string_types):
return None
parsed_uri = _parse_uri(uri)
if parsed_uri.scheme != "file":
return None
_, extension = P.splitext(parsed_uri.uri_path)
ignore_extension = kw.get("ignore_extension", False)
if extension in (".gz", ".bz2") and not ignore_extension:
return None
open_kwargs = {}
errors = kw.get("errors")
if errors is not None:
open_kwargs["errors"] = errors
encoding = kw.get("encoding")
if encoding is not None:
open_kwargs["encoding"] = encoding
mode = mode.replace("b", "")
return io.open(parsed_uri.uri_path, mode, **open_kwargs)
|
https://github.com/RaRe-Technologies/smart_open/issues/207
|
IOError Traceback (most recent call last)
<ipython-input-1-afdc56d6c4b5> in <module>()
2 from gensim.test.utils import datapath
3
----> 4 m = FastText.load_fasttext_format(datapath("lee_fasttext"))
/home/ivan/release/gensim/gensim/models/fasttext.py in load_fasttext_format(cls, model_file, encoding)
697 model_file += '.bin'
698 model.file_name = model_file
--> 699 model.load_binary_data(encoding=encoding)
700 return model
701
/home/ivan/release/gensim/gensim/models/fasttext.py in load_binary_data(self, encoding)
712 self._load_model_params(f)
713 self._load_dict(f, encoding=encoding)
--> 714 self._load_vectors(f)
715
716 def _load_model_params(self, file_handle):
/home/ivan/release/gensim/gensim/models/fasttext.py in _load_vectors(self, file_handle)
819
820 self.num_original_vectors = num_vectors
--> 821 self.wv.vectors_ngrams = np.fromfile(file_handle, dtype=dtype, count=num_vectors * dim)
822 self.wv.vectors_ngrams = self.wv.vectors_ngrams.reshape((num_vectors, dim))
823 assert self.wv.vectors_ngrams.shape == (
IOError: first argument must be an open file
|
IOError
|
def open(bucket_id, key_id, mode, **kwargs):
logger.debug("%r", locals())
if mode not in MODES:
raise NotImplementedError("bad mode: %r expected one of %r" % (mode, MODES))
encoding = kwargs.pop("encoding", "utf-8")
errors = kwargs.pop("errors", None)
newline = kwargs.pop("newline", None)
line_buffering = kwargs.pop("line_buffering", False)
s3_min_part_size = kwargs.pop("s3_min_part_size", DEFAULT_MIN_PART_SIZE)
if mode in (READ, READ_BINARY):
fileobj = SeekableBufferedInputBase(bucket_id, key_id, **kwargs)
elif mode in (WRITE, WRITE_BINARY):
fileobj = BufferedOutputBase(
bucket_id, key_id, min_part_size=s3_min_part_size, **kwargs
)
else:
assert False
if mode in (READ, WRITE):
return io.TextIOWrapper(
fileobj,
encoding=encoding,
errors=errors,
newline=newline,
line_buffering=line_buffering,
)
elif mode in (READ_BINARY, WRITE_BINARY):
return fileobj
else:
assert False
|
def open(bucket_id, key_id, mode, **kwargs):
logger.debug("%r", locals())
if mode not in MODES:
raise NotImplementedError("bad mode: %r expected one of %r" % (mode, MODES))
buffer_size = kwargs.pop("buffer_size", io.DEFAULT_BUFFER_SIZE)
encoding = kwargs.pop("encoding", "utf-8")
errors = kwargs.pop("errors", None)
newline = kwargs.pop("newline", None)
line_buffering = kwargs.pop("line_buffering", False)
s3_min_part_size = kwargs.pop("s3_min_part_size", DEFAULT_MIN_PART_SIZE)
if mode in (READ, READ_BINARY):
fileobj = BufferedInputBase(bucket_id, key_id, **kwargs)
elif mode in (WRITE, WRITE_BINARY):
fileobj = BufferedOutputBase(
bucket_id, key_id, min_part_size=s3_min_part_size, **kwargs
)
else:
assert False
if mode in (READ, WRITE):
return io.TextIOWrapper(
fileobj,
encoding=encoding,
errors=errors,
newline=newline,
line_buffering=line_buffering,
)
elif mode in (READ_BINARY, WRITE_BINARY):
return fileobj
else:
assert False
|
https://github.com/RaRe-Technologies/smart_open/issues/153
|
(smartopen)sergeyich:issue152 misha$ git rev-parse --short HEAD
d10166c
(smartopen)sergeyich:issue152 misha$ time python reproduce.py s3://commoncrawl/crawl-002/2010/09/25/0/1285411480200_0.arc.gz
0it [00:00, ?it/s]Traceback (most recent call last):
File "reproduce.py", line 5, in <module>
for i, _ in enumerate(tqdm(smart_open(sys.argv[1], 'rb'))):
File "/Users/misha/envs/smartopen/lib/python3.6/site-packages/tqdm/_tqdm.py", line 953, in __iter__
for obj in iterable:
TypeError: 'closing' object is not iterable
real 0m3.131s
user 0m0.906s
sys 0m0.151s
|
TypeError
|
def __init__(self, s3_object):
self.position = 0
self._object = s3_object
self._body = s3_object.get()["Body"]
|
def __init__(self, s3_object):
self.position = 0
self._object = s3_object
self._content_length = self._object.content_length
|
https://github.com/RaRe-Technologies/smart_open/issues/153
|
(smartopen)sergeyich:issue152 misha$ git rev-parse --short HEAD
d10166c
(smartopen)sergeyich:issue152 misha$ time python reproduce.py s3://commoncrawl/crawl-002/2010/09/25/0/1285411480200_0.arc.gz
0it [00:00, ?it/s]Traceback (most recent call last):
File "reproduce.py", line 5, in <module>
for i, _ in enumerate(tqdm(smart_open(sys.argv[1], 'rb'))):
File "/Users/misha/envs/smartopen/lib/python3.6/site-packages/tqdm/_tqdm.py", line 953, in __iter__
for obj in iterable:
TypeError: 'closing' object is not iterable
real 0m3.131s
user 0m0.906s
sys 0m0.151s
|
TypeError
|
def read(self, size=-1):
if size == -1:
return self._body.read()
return self._body.read(size)
|
def read(self, size=-1):
if self.position == self._content_length:
return b""
if size <= 0:
end = None
else:
end = min(self._content_length, self.position + size)
range_string = _range_string(self.position, stop=end)
logger.debug("range_string: %r", range_string)
body = self._object.get(Range=range_string)["Body"].read()
self.position += len(body)
return body
|
https://github.com/RaRe-Technologies/smart_open/issues/153
|
(smartopen)sergeyich:issue152 misha$ git rev-parse --short HEAD
d10166c
(smartopen)sergeyich:issue152 misha$ time python reproduce.py s3://commoncrawl/crawl-002/2010/09/25/0/1285411480200_0.arc.gz
0it [00:00, ?it/s]Traceback (most recent call last):
File "reproduce.py", line 5, in <module>
for i, _ in enumerate(tqdm(smart_open(sys.argv[1], 'rb'))):
File "/Users/misha/envs/smartopen/lib/python3.6/site-packages/tqdm/_tqdm.py", line 953, in __iter__
for obj in iterable:
TypeError: 'closing' object is not iterable
real 0m3.131s
user 0m0.906s
sys 0m0.151s
|
TypeError
|
def __init__(
self,
bucket,
key,
buffer_size=DEFAULT_BUFFER_SIZE,
line_terminator=BINARY_NEWLINE,
**kwargs,
):
session = boto3.Session(profile_name=kwargs.pop("profile_name", None))
s3 = session.resource("s3", **kwargs)
self._object = s3.Object(bucket, key)
self._raw_reader = RawReader(self._object)
self._content_length = self._object.content_length
self._current_pos = 0
self._buffer = b""
self._eof = False
self._buffer_size = buffer_size
self._line_terminator = line_terminator
#
# This member is part of the io.BufferedIOBase interface.
#
self.raw = None
|
def __init__(self, bucket, key, **kwargs):
session = boto3.Session(profile_name=kwargs.pop("profile_name", None))
s3 = session.resource("s3", **kwargs)
self._object = s3.Object(bucket, key)
self._raw_reader = RawReader(self._object)
self._content_length = self._object.content_length
self._current_pos = 0
self._buffer = b""
self._eof = False
#
# This member is part of the io.BufferedIOBase interface.
#
self.raw = None
|
https://github.com/RaRe-Technologies/smart_open/issues/153
|
(smartopen)sergeyich:issue152 misha$ git rev-parse --short HEAD
d10166c
(smartopen)sergeyich:issue152 misha$ time python reproduce.py s3://commoncrawl/crawl-002/2010/09/25/0/1285411480200_0.arc.gz
0it [00:00, ?it/s]Traceback (most recent call last):
File "reproduce.py", line 5, in <module>
for i, _ in enumerate(tqdm(smart_open(sys.argv[1], 'rb'))):
File "/Users/misha/envs/smartopen/lib/python3.6/site-packages/tqdm/_tqdm.py", line 953, in __iter__
for obj in iterable:
TypeError: 'closing' object is not iterable
real 0m3.131s
user 0m0.906s
sys 0m0.151s
|
TypeError
|
def seekable(self):
return False
|
def seekable(self):
"""If False, seek(), tell() and truncate() will raise IOError.
We offer only seek support, and no truncate support."""
return True
|
https://github.com/RaRe-Technologies/smart_open/issues/153
|
(smartopen)sergeyich:issue152 misha$ git rev-parse --short HEAD
d10166c
(smartopen)sergeyich:issue152 misha$ time python reproduce.py s3://commoncrawl/crawl-002/2010/09/25/0/1285411480200_0.arc.gz
0it [00:00, ?it/s]Traceback (most recent call last):
File "reproduce.py", line 5, in <module>
for i, _ in enumerate(tqdm(smart_open(sys.argv[1], 'rb'))):
File "/Users/misha/envs/smartopen/lib/python3.6/site-packages/tqdm/_tqdm.py", line 953, in __iter__
for obj in iterable:
TypeError: 'closing' object is not iterable
real 0m3.131s
user 0m0.906s
sys 0m0.151s
|
TypeError
|
def read(self, size=-1):
"""Read up to size bytes from the object and return them."""
if size <= 0:
if len(self._buffer):
from_buf = self._read_from_buffer(len(self._buffer))
else:
from_buf = b""
self._current_pos = self._content_length
return from_buf + self._raw_reader.read()
#
# Return unused data first
#
if len(self._buffer) >= size:
return self._read_from_buffer(size)
#
# If the stream is finished, return what we have.
#
if self._eof:
return self._read_from_buffer(len(self._buffer))
#
# Fill our buffer to the required size.
#
# logger.debug('filling %r byte-long buffer up to %r bytes', len(self._buffer), size)
self._fill_buffer(size)
return self._read_from_buffer(size)
|
def read(self, size=-1):
"""Read up to size bytes from the object and return them."""
if size <= 0:
if len(self._buffer):
from_buf = self._read_from_buffer(len(self._buffer))
else:
from_buf = b""
self._current_pos = self._content_length
return from_buf + self._raw_reader.read()
#
# Return unused data first
#
if len(self._buffer) >= size:
return self._read_from_buffer(size)
#
# If the stream is finished, return what we have.
#
if self._eof:
return self._read_from_buffer(len(self._buffer))
#
# Fill our buffer to the required size.
#
# logger.debug('filling %r byte-long buffer up to %r bytes', len(self._buffer), size)
while len(self._buffer) < size and not self._eof:
raw = self._raw_reader.read(size=io.DEFAULT_BUFFER_SIZE)
if len(raw):
self._buffer += raw
else:
logger.debug("reached EOF while filling buffer")
self._eof = True
return self._read_from_buffer(size)
|
https://github.com/RaRe-Technologies/smart_open/issues/153
|
(smartopen)sergeyich:issue152 misha$ git rev-parse --short HEAD
d10166c
(smartopen)sergeyich:issue152 misha$ time python reproduce.py s3://commoncrawl/crawl-002/2010/09/25/0/1285411480200_0.arc.gz
0it [00:00, ?it/s]Traceback (most recent call last):
File "reproduce.py", line 5, in <module>
for i, _ in enumerate(tqdm(smart_open(sys.argv[1], 'rb'))):
File "/Users/misha/envs/smartopen/lib/python3.6/site-packages/tqdm/_tqdm.py", line 953, in __iter__
for obj in iterable:
TypeError: 'closing' object is not iterable
real 0m3.131s
user 0m0.906s
sys 0m0.151s
|
TypeError
|
def __init__(self, bucket, key, min_part_size=DEFAULT_MIN_PART_SIZE, **kwargs):
if min_part_size < MIN_MIN_PART_SIZE:
logger.warning(
"S3 requires minimum part size >= 5MB; \
multipart upload may fail"
)
session = boto3.Session(profile_name=kwargs.pop("profile_name", None))
s3 = session.resource("s3", **kwargs)
#
# https://stackoverflow.com/questions/26871884/how-can-i-easily-determine-if-a-boto-3-s3-bucket-resource-exists
#
try:
s3.meta.client.head_bucket(Bucket=bucket)
except botocore.client.ClientError:
raise ValueError(
"the bucket %r does not exist, or is forbidden for access" % bucket
)
self._object = s3.Object(bucket, key)
self._min_part_size = min_part_size
self._mp = self._object.initiate_multipart_upload()
self._buf = io.BytesIO()
self._total_bytes = 0
self._total_parts = 0
self._parts = []
#
# This member is part of the io.BufferedIOBase interface.
#
self.raw = None
|
def __init__(self, bucket, key, min_part_size=DEFAULT_MIN_PART_SIZE, **kwargs):
if min_part_size < MIN_MIN_PART_SIZE:
logger.warning(
"S3 requires minimum part size >= 5MB; \
multipart upload may fail"
)
session = boto3.Session(profile_name=kwargs.pop("profile_name", None))
s3 = session.resource("s3", **kwargs)
#
# https://stackoverflow.com/questions/26871884/how-can-i-easily-determine-if-a-boto-3-s3-bucket-resource-exists
#
s3.create_bucket(Bucket=bucket)
self._object = s3.Object(bucket, key)
self._min_part_size = min_part_size
self._mp = self._object.initiate_multipart_upload()
self._buf = io.BytesIO()
self._total_bytes = 0
self._total_parts = 0
self._parts = []
#
# This member is part of the io.BufferedIOBase interface.
#
self.raw = None
|
https://github.com/RaRe-Technologies/smart_open/issues/153
|
(smartopen)sergeyich:issue152 misha$ git rev-parse --short HEAD
d10166c
(smartopen)sergeyich:issue152 misha$ time python reproduce.py s3://commoncrawl/crawl-002/2010/09/25/0/1285411480200_0.arc.gz
0it [00:00, ?it/s]Traceback (most recent call last):
File "reproduce.py", line 5, in <module>
for i, _ in enumerate(tqdm(smart_open(sys.argv[1], 'rb'))):
File "/Users/misha/envs/smartopen/lib/python3.6/site-packages/tqdm/_tqdm.py", line 953, in __iter__
for obj in iterable:
TypeError: 'closing' object is not iterable
real 0m3.131s
user 0m0.906s
sys 0m0.151s
|
TypeError
|
def smart_open(uri, mode="rb", **kw):
"""
Open the given S3 / HDFS / filesystem file pointed to by `uri` for reading or writing.
The only supported modes for now are 'rb' (read, default) and 'wb' (replace & write).
The reads/writes are memory efficient (streamed) and therefore suitable for
arbitrarily large files.
The `uri` can be either:
1. a URI for the local filesystem (compressed ``.gz`` or ``.bz2`` files handled automatically):
`./lines.txt`, `/home/joe/lines.txt.gz`, `file:///home/joe/lines.txt.bz2`
2. a URI for HDFS: `hdfs:///some/path/lines.txt`
3. a URI for Amazon's S3 (can also supply credentials inside the URI):
`s3://my_bucket/lines.txt`, `s3://my_aws_key_id:key_secret@my_bucket/lines.txt`
4. an instance of the boto.s3.key.Key class.
Examples::
>>> # stream lines from http; you can use context managers too:
>>> with smart_open.smart_open('http://www.google.com') as fin:
... for line in fin:
... print line
>>> # stream lines from S3; you can use context managers too:
>>> with smart_open.smart_open('s3://mybucket/mykey.txt') as fin:
... for line in fin:
... print line
>>> # you can also use a boto.s3.key.Key instance directly:
>>> key = boto.connect_s3().get_bucket("my_bucket").get_key("my_key")
>>> with smart_open.smart_open(key) as fin:
... for line in fin:
... print line
>>> # stream line-by-line from an HDFS file
>>> for line in smart_open.smart_open('hdfs:///user/hadoop/my_file.txt'):
... print line
>>> # stream content *into* S3:
>>> with smart_open.smart_open('s3://mybucket/mykey.txt', 'wb') as fout:
... for line in ['first line', 'second line', 'third line']:
... fout.write(line + '\n')
>>> # stream from/to (compressed) local files:
>>> for line in smart_open.smart_open('/home/radim/my_file.txt'):
... print line
>>> for line in smart_open.smart_open('/home/radim/my_file.txt.gz'):
... print line
>>> with smart_open.smart_open('/home/radim/my_file.txt.gz', 'wb') as fout:
... fout.write("hello world!\n")
>>> with smart_open.smart_open('/home/radim/another.txt.bz2', 'wb') as fout:
... fout.write("good bye!\n")
>>> # stream from/to (compressed) local files with Expand ~ and ~user constructions:
>>> for line in smart_open.smart_open('~/my_file.txt'):
... print line
>>> for line in smart_open.smart_open('my_file.txt'):
... print line
"""
logger.debug("%r", locals())
#
# This is a work-around for the problem described in Issue #144.
# If the user has explicitly specified an encoding, then assume they want
# us to open the destination in text mode, instead of the default binary.
#
# If we change the default mode to be text, and match the normal behavior
# of Py2 and 3, then the above assumption will be unnecessary.
#
if kw.get("encoding") is not None and "b" in mode:
mode = mode.replace("b", "")
# validate mode parameter
if not isinstance(mode, six.string_types):
raise TypeError("mode should be a string")
if isinstance(uri, six.string_types):
# this method just routes the request to classes handling the specific storage
# schemes, depending on the URI protocol in `uri`
parsed_uri = ParseUri(uri)
if parsed_uri.scheme in ("file",):
# local files -- both read & write supported
# compression, if any, is determined by the filename extension (.gz, .bz2)
encoding = kw.pop("encoding", None)
errors = kw.pop("errors", DEFAULT_ERRORS)
return file_smart_open(
parsed_uri.uri_path, mode, encoding=encoding, errors=errors
)
elif parsed_uri.scheme in ("s3", "s3n", "s3u"):
return s3_open_uri(parsed_uri, mode, **kw)
elif parsed_uri.scheme in ("hdfs",):
encoding = kw.pop("encoding", None)
if encoding is not None:
warnings.warn(
_ISSUE_146_FSTR
% {"encoding": encoding, "scheme": parsed_uri.scheme}
)
if mode in ("r", "rb"):
return HdfsOpenRead(parsed_uri, **kw)
if mode in ("w", "wb"):
return HdfsOpenWrite(parsed_uri, **kw)
else:
raise NotImplementedError(
"file mode %s not supported for %r scheme", mode, parsed_uri.scheme
)
elif parsed_uri.scheme in ("webhdfs",):
encoding = kw.pop("encoding", None)
if encoding is not None:
warnings.warn(
_ISSUE_146_FSTR
% {"encoding": encoding, "scheme": parsed_uri.scheme}
)
if mode in ("r", "rb"):
return WebHdfsOpenRead(parsed_uri, **kw)
elif mode in ("w", "wb"):
return WebHdfsOpenWrite(parsed_uri, **kw)
else:
raise NotImplementedError(
"file mode %s not supported for %r scheme", mode, parsed_uri.scheme
)
elif parsed_uri.scheme.startswith("http"):
encoding = kw.pop("encoding", None)
if encoding is not None:
warnings.warn(
_ISSUE_146_FSTR
% {"encoding": encoding, "scheme": parsed_uri.scheme}
)
if mode in ("r", "rb"):
return HttpOpenRead(parsed_uri, **kw)
else:
raise NotImplementedError(
"file mode %s not supported for %r scheme", mode, parsed_uri.scheme
)
else:
raise NotImplementedError("scheme %r is not supported", parsed_uri.scheme)
elif isinstance(uri, boto.s3.key.Key):
return s3_open_key(uri, mode, **kw)
elif hasattr(uri, "read"):
# simply pass-through if already a file-like
return uri
else:
raise TypeError("don't know how to handle uri %s" % repr(uri))
|
def smart_open(uri, mode="rb", **kw):
"""
Open the given S3 / HDFS / filesystem file pointed to by `uri` for reading or writing.
The only supported modes for now are 'rb' (read, default) and 'wb' (replace & write).
The reads/writes are memory efficient (streamed) and therefore suitable for
arbitrarily large files.
The `uri` can be either:
1. a URI for the local filesystem (compressed ``.gz`` or ``.bz2`` files handled automatically):
`./lines.txt`, `/home/joe/lines.txt.gz`, `file:///home/joe/lines.txt.bz2`
2. a URI for HDFS: `hdfs:///some/path/lines.txt`
3. a URI for Amazon's S3 (can also supply credentials inside the URI):
`s3://my_bucket/lines.txt`, `s3://my_aws_key_id:key_secret@my_bucket/lines.txt`
4. an instance of the boto.s3.key.Key class.
Examples::
>>> # stream lines from http; you can use context managers too:
>>> with smart_open.smart_open('http://www.google.com') as fin:
... for line in fin:
... print line
>>> # stream lines from S3; you can use context managers too:
>>> with smart_open.smart_open('s3://mybucket/mykey.txt') as fin:
... for line in fin:
... print line
>>> # you can also use a boto.s3.key.Key instance directly:
>>> key = boto.connect_s3().get_bucket("my_bucket").get_key("my_key")
>>> with smart_open.smart_open(key) as fin:
... for line in fin:
... print line
>>> # stream line-by-line from an HDFS file
>>> for line in smart_open.smart_open('hdfs:///user/hadoop/my_file.txt'):
... print line
>>> # stream content *into* S3:
>>> with smart_open.smart_open('s3://mybucket/mykey.txt', 'wb') as fout:
... for line in ['first line', 'second line', 'third line']:
... fout.write(line + '\n')
>>> # stream from/to (compressed) local files:
>>> for line in smart_open.smart_open('/home/radim/my_file.txt'):
... print line
>>> for line in smart_open.smart_open('/home/radim/my_file.txt.gz'):
... print line
>>> with smart_open.smart_open('/home/radim/my_file.txt.gz', 'wb') as fout:
... fout.write("hello world!\n")
>>> with smart_open.smart_open('/home/radim/another.txt.bz2', 'wb') as fout:
... fout.write("good bye!\n")
>>> # stream from/to (compressed) local files with Expand ~ and ~user constructions:
>>> for line in smart_open.smart_open('~/my_file.txt'):
... print line
>>> for line in smart_open.smart_open('my_file.txt'):
... print line
"""
logger.debug("%r", locals())
#
# This is a work-around for the problem described in Issue #144.
# If the user has explicitly specified an encoding, then assume they want
# us to open the destination in text mode, instead of the default binary.
#
# If we change the default mode to be text, and match the normal behavior
# of Py2 and 3, then the above assumption will be unnecessary.
#
if kw.get("encoding") is not None and "b" in mode:
mode = mode.replace("b", "")
# validate mode parameter
if not isinstance(mode, six.string_types):
raise TypeError("mode should be a string")
if isinstance(uri, six.string_types):
# this method just routes the request to classes handling the specific storage
# schemes, depending on the URI protocol in `uri`
parsed_uri = ParseUri(uri)
if parsed_uri.scheme in ("file",):
# local files -- both read & write supported
# compression, if any, is determined by the filename extension (.gz, .bz2)
return file_smart_open(
parsed_uri.uri_path, mode, encoding=kw.pop("encoding", None)
)
elif parsed_uri.scheme in ("s3", "s3n", "s3u"):
return s3_open_uri(parsed_uri, mode, **kw)
elif parsed_uri.scheme in ("hdfs",):
encoding = kw.pop("encoding", None)
if encoding is not None:
warnings.warn(
_ISSUE_146_FSTR
% {"encoding": encoding, "scheme": parsed_uri.scheme}
)
if mode in ("r", "rb"):
return HdfsOpenRead(parsed_uri, **kw)
if mode in ("w", "wb"):
return HdfsOpenWrite(parsed_uri, **kw)
else:
raise NotImplementedError(
"file mode %s not supported for %r scheme", mode, parsed_uri.scheme
)
elif parsed_uri.scheme in ("webhdfs",):
encoding = kw.pop("encoding", None)
if encoding is not None:
warnings.warn(
_ISSUE_146_FSTR
% {"encoding": encoding, "scheme": parsed_uri.scheme}
)
if mode in ("r", "rb"):
return WebHdfsOpenRead(parsed_uri, **kw)
elif mode in ("w", "wb"):
return WebHdfsOpenWrite(parsed_uri, **kw)
else:
raise NotImplementedError(
"file mode %s not supported for %r scheme", mode, parsed_uri.scheme
)
elif parsed_uri.scheme.startswith("http"):
encoding = kw.pop("encoding", None)
if encoding is not None:
warnings.warn(
_ISSUE_146_FSTR
% {"encoding": encoding, "scheme": parsed_uri.scheme}
)
if mode in ("r", "rb"):
return HttpOpenRead(parsed_uri, **kw)
else:
raise NotImplementedError(
"file mode %s not supported for %r scheme", mode, parsed_uri.scheme
)
else:
raise NotImplementedError("scheme %r is not supported", parsed_uri.scheme)
elif isinstance(uri, boto.s3.key.Key):
return s3_open_key(uri, mode, **kw)
elif hasattr(uri, "read"):
# simply pass-through if already a file-like
return uri
else:
raise TypeError("don't know how to handle uri %s" % repr(uri))
|
https://github.com/RaRe-Technologies/smart_open/issues/153
|
(smartopen)sergeyich:issue152 misha$ git rev-parse --short HEAD
d10166c
(smartopen)sergeyich:issue152 misha$ time python reproduce.py s3://commoncrawl/crawl-002/2010/09/25/0/1285411480200_0.arc.gz
0it [00:00, ?it/s]Traceback (most recent call last):
File "reproduce.py", line 5, in <module>
for i, _ in enumerate(tqdm(smart_open(sys.argv[1], 'rb'))):
File "/Users/misha/envs/smartopen/lib/python3.6/site-packages/tqdm/_tqdm.py", line 953, in __iter__
for obj in iterable:
TypeError: 'closing' object is not iterable
real 0m3.131s
user 0m0.906s
sys 0m0.151s
|
TypeError
|
def s3_open_uri(parsed_uri, mode, **kwargs):
logger.debug("%r", locals())
if parsed_uri.access_id is not None:
kwargs["aws_access_key_id"] = parsed_uri.access_id
if parsed_uri.access_secret is not None:
kwargs["aws_secret_access_key"] = parsed_uri.access_secret
# Get an S3 host. It is required for sigv4 operations.
host = kwargs.pop("host", None)
if host is not None:
kwargs["endpoint_url"] = "http://" + host
#
# TODO: this is the wrong place to handle ignore_extension.
# It should happen at the highest level in the smart_open function, because
# it influences other file systems as well, not just S3.
#
if kwargs.pop("ignore_extension", False):
codec = None
else:
codec = _detect_codec(parsed_uri.key_id)
#
# Codecs work on a byte-level, so the underlying S3 object should
# always be reading bytes.
#
if mode in (smart_open_s3.READ, smart_open_s3.READ_BINARY):
s3_mode = smart_open_s3.READ_BINARY
elif mode in (smart_open_s3.WRITE, smart_open_s3.WRITE_BINARY):
s3_mode = smart_open_s3.WRITE_BINARY
else:
raise NotImplementedError("mode %r not implemented for S3" % mode)
#
# TODO: I'm not sure how to handle this with boto3. Any ideas?
#
# https://github.com/boto/boto3/issues/334
#
# _setup_unsecured_mode()
encoding = kwargs.get("encoding")
errors = kwargs.get("errors", DEFAULT_ERRORS)
fobj = smart_open_s3.open(
parsed_uri.bucket_id, parsed_uri.key_id, s3_mode, **kwargs
)
decompressed_fobj = _CODECS[codec](fobj, mode)
decoded_fobj = encoding_wrapper(
decompressed_fobj, mode, encoding=encoding, errors=errors
)
return decoded_fobj
|
def s3_open_uri(parsed_uri, mode, **kwargs):
logger.debug("%r", locals())
if parsed_uri.access_id is not None:
kwargs["aws_access_key_id"] = parsed_uri.access_id
if parsed_uri.access_secret is not None:
kwargs["aws_secret_access_key"] = parsed_uri.access_secret
# Get an S3 host. It is required for sigv4 operations.
host = kwargs.pop("host", None)
if host is not None:
kwargs["endpoint_url"] = "http://" + host
#
# TODO: this is the wrong place to handle ignore_extension.
# It should happen at the highest level in the smart_open function, because
# it influences other file systems as well, not just S3.
#
if kwargs.pop("ignore_extension", False):
codec = None
else:
codec = _detect_codec(parsed_uri.key_id)
#
# Codecs work on a byte-level, so the underlying S3 object should
# always be reading bytes.
#
if codec and mode in (smart_open_s3.READ, smart_open_s3.READ_BINARY):
s3_mode = smart_open_s3.READ_BINARY
elif codec and mode in (smart_open_s3.WRITE, smart_open_s3.WRITE_BINARY):
s3_mode = smart_open_s3.WRITE_BINARY
else:
s3_mode = mode
#
# TODO: I'm not sure how to handle this with boto3. Any ideas?
#
# https://github.com/boto/boto3/issues/334
#
# _setup_unsecured_mode()
fobj = smart_open_s3.open(
parsed_uri.bucket_id, parsed_uri.key_id, s3_mode, **kwargs
)
return _CODECS[codec](fobj, mode)
|
https://github.com/RaRe-Technologies/smart_open/issues/153
|
(smartopen)sergeyich:issue152 misha$ git rev-parse --short HEAD
d10166c
(smartopen)sergeyich:issue152 misha$ time python reproduce.py s3://commoncrawl/crawl-002/2010/09/25/0/1285411480200_0.arc.gz
0it [00:00, ?it/s]Traceback (most recent call last):
File "reproduce.py", line 5, in <module>
for i, _ in enumerate(tqdm(smart_open(sys.argv[1], 'rb'))):
File "/Users/misha/envs/smartopen/lib/python3.6/site-packages/tqdm/_tqdm.py", line 953, in __iter__
for obj in iterable:
TypeError: 'closing' object is not iterable
real 0m3.131s
user 0m0.906s
sys 0m0.151s
|
TypeError
|
def s3_open_key(key, mode, **kwargs):
logger.debug("%r", locals())
#
# TODO: handle boto3 keys as well
#
host = kwargs.pop("host", None)
if host is not None:
kwargs["endpoint_url"] = "http://" + host
if kwargs.pop("ignore_extension", False):
codec = None
else:
codec = _detect_codec(key.name)
#
# Codecs work on a byte-level, so the underlying S3 object should
# always be reading bytes.
#
if mode in (smart_open_s3.READ, smart_open_s3.READ_BINARY):
s3_mode = smart_open_s3.READ_BINARY
elif mode in (smart_open_s3.WRITE, smart_open_s3.WRITE_BINARY):
s3_mode = smart_open_s3.WRITE_BINARY
else:
raise NotImplementedError("mode %r not implemented for S3" % mode)
logging.debug("codec: %r mode: %r s3_mode: %r", codec, mode, s3_mode)
encoding = kwargs.get("encoding")
errors = kwargs.get("errors", DEFAULT_ERRORS)
fobj = smart_open_s3.open(key.bucket.name, key.name, s3_mode, **kwargs)
decompressed_fobj = _CODECS[codec](fobj, mode)
decoded_fobj = encoding_wrapper(
decompressed_fobj, mode, encoding=encoding, errors=errors
)
return decoded_fobj
|
def s3_open_key(key, mode, **kwargs):
logger.debug("%r", locals())
#
# TODO: handle boto3 keys as well
#
host = kwargs.pop("host", None)
if host is not None:
kwargs["endpoint_url"] = "http://" + host
if kwargs.pop("ignore_extension", False):
codec = None
else:
codec = _detect_codec(key.name)
#
# Codecs work on a byte-level, so the underlying S3 object should
# always be reading bytes.
#
if codec and mode in (smart_open_s3.READ, smart_open_s3.READ_BINARY):
s3_mode = smart_open_s3.READ_BINARY
elif codec and mode in (smart_open_s3.WRITE, smart_open_s3.WRITE_BINARY):
s3_mode = smart_open_s3.WRITE_BINARY
else:
s3_mode = mode
logging.debug("codec: %r mode: %r s3_mode: %r", codec, mode, s3_mode)
fobj = smart_open_s3.open(key.bucket.name, key.name, s3_mode, **kwargs)
return _CODECS[codec](fobj, mode)
|
https://github.com/RaRe-Technologies/smart_open/issues/153
|
(smartopen)sergeyich:issue152 misha$ git rev-parse --short HEAD
d10166c
(smartopen)sergeyich:issue152 misha$ time python reproduce.py s3://commoncrawl/crawl-002/2010/09/25/0/1285411480200_0.arc.gz
0it [00:00, ?it/s]Traceback (most recent call last):
File "reproduce.py", line 5, in <module>
for i, _ in enumerate(tqdm(smart_open(sys.argv[1], 'rb'))):
File "/Users/misha/envs/smartopen/lib/python3.6/site-packages/tqdm/_tqdm.py", line 953, in __iter__
for obj in iterable:
TypeError: 'closing' object is not iterable
real 0m3.131s
user 0m0.906s
sys 0m0.151s
|
TypeError
|
def _wrap_gzip(fileobj, mode):
return gzip.GzipFile(fileobj=fileobj, mode=mode)
|
def _wrap_gzip(fileobj, mode):
return contextlib.closing(gzip.GzipFile(fileobj=fileobj, mode=mode))
|
https://github.com/RaRe-Technologies/smart_open/issues/153
|
(smartopen)sergeyich:issue152 misha$ git rev-parse --short HEAD
d10166c
(smartopen)sergeyich:issue152 misha$ time python reproduce.py s3://commoncrawl/crawl-002/2010/09/25/0/1285411480200_0.arc.gz
0it [00:00, ?it/s]Traceback (most recent call last):
File "reproduce.py", line 5, in <module>
for i, _ in enumerate(tqdm(smart_open(sys.argv[1], 'rb'))):
File "/Users/misha/envs/smartopen/lib/python3.6/site-packages/tqdm/_tqdm.py", line 953, in __iter__
for obj in iterable:
TypeError: 'closing' object is not iterable
real 0m3.131s
user 0m0.906s
sys 0m0.151s
|
TypeError
|
def encoding_wrapper(fileobj, mode, encoding=None, errors=DEFAULT_ERRORS):
"""Decode bytes into text, if necessary.
If mode specifies binary access, does nothing, unless the encoding is
specified. A non-null encoding implies text mode.
:arg fileobj: must quack like a filehandle object.
:arg str mode: is the mode which was originally requested by the user.
:arg str encoding: The text encoding to use. If mode is binary, overrides mode.
:arg str errors: The method to use when handling encoding/decoding errors.
:returns: a file object
"""
logger.debug("encoding_wrapper: %r", locals())
#
# If the mode is binary, but the user specified an encoding, assume they
# want text. If we don't make this assumption, ignore the encoding and
# return bytes, smart_open behavior will diverge from the built-in open:
#
# open(filename, encoding='utf-8') returns a text stream in Py3
# smart_open(filename, encoding='utf-8') would return a byte stream
# without our assumption, because the default mode is rb.
#
if "b" in mode and encoding is None:
return fileobj
if encoding is None:
encoding = SYSTEM_ENCODING
if mode[0] == "r":
decoder = codecs.getreader(encoding)
else:
decoder = codecs.getwriter(encoding)
return decoder(fileobj, errors=errors)
|
def encoding_wrapper(fileobj, mode, encoding=None):
"""Decode bytes into text, if necessary.
If mode specifies binary access, does nothing, unless the encoding is
specified. A non-null encoding implies text mode.
:arg fileobj: must quack like a filehandle object.
:arg str mode: is the mode which was originally requested by the user.
:arg encoding: The text encoding to use. If mode is binary, overrides mode.
:returns: a file object
"""
logger.debug("encoding_wrapper: %r", locals())
#
# If the mode is binary, but the user specified an encoding, assume they
# want text. If we don't make this assumption, ignore the encoding and
# return bytes, smart_open behavior will diverge from the built-in open:
#
# open(filename, encoding='utf-8') returns a text stream in Py3
# smart_open(filename, encoding='utf-8') would return a byte stream
# without our assumption, because the default mode is rb.
#
if "b" in mode and encoding is None:
return fileobj
if encoding is None:
encoding = SYSTEM_ENCODING
if mode[0] == "r":
decoder = codecs.getreader(encoding)
else:
decoder = codecs.getwriter(encoding)
return decoder(fileobj)
|
https://github.com/RaRe-Technologies/smart_open/issues/153
|
(smartopen)sergeyich:issue152 misha$ git rev-parse --short HEAD
d10166c
(smartopen)sergeyich:issue152 misha$ time python reproduce.py s3://commoncrawl/crawl-002/2010/09/25/0/1285411480200_0.arc.gz
0it [00:00, ?it/s]Traceback (most recent call last):
File "reproduce.py", line 5, in <module>
for i, _ in enumerate(tqdm(smart_open(sys.argv[1], 'rb'))):
File "/Users/misha/envs/smartopen/lib/python3.6/site-packages/tqdm/_tqdm.py", line 953, in __iter__
for obj in iterable:
TypeError: 'closing' object is not iterable
real 0m3.131s
user 0m0.906s
sys 0m0.151s
|
TypeError
|
def file_smart_open(fname, mode="rb", encoding=None, errors=DEFAULT_ERRORS):
"""
Stream from/to local filesystem, transparently (de)compressing gzip and bz2
files if necessary.
:arg str fname: The path to the file to open.
:arg str mode: The mode in which to open the file.
:arg str encoding: The text encoding to use.
:arg str errors: The method to use when handling encoding/decoding errors.
:returns: A file object
"""
#
# This is how we get from the filename to the end result.
# Decompression is optional, but it always accepts bytes and returns bytes.
# Decoding is also optional, accepts bytes and returns text.
# The diagram below is for reading, for writing, the flow is from right to
# left, but the code is identical.
#
# open as binary decompress? decode?
# filename ---------------> bytes -------------> bytes ---------> text
# raw_fobj decompressed_fobj decoded_fobj
#
try: # TODO need to fix this place (for cases with r+ and so on)
raw_mode = {"r": "rb", "w": "wb", "a": "ab"}[mode]
except KeyError:
raw_mode = mode
raw_fobj = open(fname, raw_mode)
decompressed_fobj = compression_wrapper(raw_fobj, fname, raw_mode)
decoded_fobj = encoding_wrapper(
decompressed_fobj, mode, encoding=encoding, errors=errors
)
return decoded_fobj
|
def file_smart_open(fname, mode="rb", encoding=None):
"""
Stream from/to local filesystem, transparently (de)compressing gzip and bz2
files if necessary.
:arg str fname: The path to the file to open.
:arg str mode: The mode in which to open the file.
:arg str encoding: The text encoding to use.
:returns: A file object
"""
#
# This is how we get from the filename to the end result.
# Decompression is optional, but it always accepts bytes and returns bytes.
# Decoding is also optional, accepts bytes and returns text.
# The diagram below is for reading, for writing, the flow is from right to
# left, but the code is identical.
#
# open as binary decompress? decode?
# filename ---------------> bytes -------------> bytes ---------> text
# raw_fobj decompressed_fobj decoded_fobj
#
try: # TODO need to fix this place (for cases with r+ and so on)
raw_mode = {"r": "rb", "w": "wb", "a": "ab"}[mode]
except KeyError:
raw_mode = mode
raw_fobj = open(fname, raw_mode)
decompressed_fobj = compression_wrapper(raw_fobj, fname, raw_mode)
decoded_fobj = encoding_wrapper(decompressed_fobj, mode, encoding=encoding)
return decoded_fobj
|
https://github.com/RaRe-Technologies/smart_open/issues/153
|
(smartopen)sergeyich:issue152 misha$ git rev-parse --short HEAD
d10166c
(smartopen)sergeyich:issue152 misha$ time python reproduce.py s3://commoncrawl/crawl-002/2010/09/25/0/1285411480200_0.arc.gz
0it [00:00, ?it/s]Traceback (most recent call last):
File "reproduce.py", line 5, in <module>
for i, _ in enumerate(tqdm(smart_open(sys.argv[1], 'rb'))):
File "/Users/misha/envs/smartopen/lib/python3.6/site-packages/tqdm/_tqdm.py", line 953, in __iter__
for obj in iterable:
TypeError: 'closing' object is not iterable
real 0m3.131s
user 0m0.906s
sys 0m0.151s
|
TypeError
|
def __init__(self, uri, default_scheme="file"):
"""
Assume `default_scheme` if no scheme given in `uri`.
"""
if os.name == "nt":
# urlsplit doesn't work on Windows -- it parses the drive as the scheme...
if "://" not in uri:
# no protocol given => assume a local file
uri = "file://" + uri
parsed_uri = urlsplit(uri, allow_fragments=False)
self.scheme = parsed_uri.scheme if parsed_uri.scheme else default_scheme
if self.scheme == "hdfs":
self.uri_path = parsed_uri.netloc + parsed_uri.path
self.uri_path = "/" + self.uri_path.lstrip("/")
if not self.uri_path:
raise RuntimeError("invalid HDFS URI: %s" % uri)
elif self.scheme == "webhdfs":
self.uri_path = parsed_uri.netloc + "/webhdfs/v1" + parsed_uri.path
if parsed_uri.query:
self.uri_path += "?" + parsed_uri.query
if not self.uri_path:
raise RuntimeError("invalid WebHDFS URI: %s" % uri)
elif self.scheme in ("s3", "s3n"):
self.bucket_id = (parsed_uri.netloc + parsed_uri.path).split("@")
self.key_id = None
if len(self.bucket_id) == 1:
# URI without credentials: s3://bucket/object
self.bucket_id, self.key_id = self.bucket_id[0].split("/", 1)
# "None" credentials are interpreted as "look for credentials in other locations" by boto
self.access_id, self.access_secret = None, None
elif len(self.bucket_id) == 2 and len(self.bucket_id[0].split(":")) == 2:
# URI in full format: s3://key:secret@bucket/object
# access key id: [A-Z0-9]{20}
# secret access key: [A-Za-z0-9/+=]{40}
acc, self.bucket_id = self.bucket_id
self.access_id, self.access_secret = acc.split(":")
self.bucket_id, self.key_id = self.bucket_id.split("/", 1)
else:
# more than 1 '@' means invalid uri
# Bucket names must be at least 3 and no more than 63 characters long.
# Bucket names must be a series of one or more labels.
# Adjacent labels are separated by a single period (.).
# Bucket names can contain lowercase letters, numbers, and hyphens.
# Each label must start and end with a lowercase letter or a number.
raise RuntimeError("invalid S3 URI: %s" % uri)
elif self.scheme == "file":
self.uri_path = parsed_uri.netloc + parsed_uri.path
# '~/tmp' may be expanded to '/Users/username/tmp'
self.uri_path = os.path.expanduser(self.uri_path)
if not self.uri_path:
raise RuntimeError("invalid file URI: %s" % uri)
else:
raise NotImplementedError("unknown URI scheme %r in %r" % (self.scheme, uri))
|
def __init__(self, uri, default_scheme="file"):
"""
Assume `default_scheme` if no scheme given in `uri`.
"""
if os.name == "nt":
# urlsplit doesn't work on Windows -- it parses the drive as the scheme...
if "://" not in uri:
# no protocol given => assume a local file
uri = "file://" + uri
parsed_uri = urlsplit(uri)
self.scheme = parsed_uri.scheme if parsed_uri.scheme else default_scheme
if self.scheme == "hdfs":
self.uri_path = parsed_uri.netloc + parsed_uri.path
self.uri_path = "/" + self.uri_path.lstrip("/")
if not self.uri_path:
raise RuntimeError("invalid HDFS URI: %s" % uri)
elif self.scheme == "webhdfs":
self.uri_path = parsed_uri.netloc + "/webhdfs/v1" + parsed_uri.path
if parsed_uri.query:
self.uri_path += "?" + parsed_uri.query
if not self.uri_path:
raise RuntimeError("invalid WebHDFS URI: %s" % uri)
elif self.scheme in ("s3", "s3n"):
self.bucket_id = (parsed_uri.netloc + parsed_uri.path).split("@")
self.key_id = None
if len(self.bucket_id) == 1:
# URI without credentials: s3://bucket/object
self.bucket_id, self.key_id = self.bucket_id[0].split("/", 1)
# "None" credentials are interpreted as "look for credentials in other locations" by boto
self.access_id, self.access_secret = None, None
elif len(self.bucket_id) == 2 and len(self.bucket_id[0].split(":")) == 2:
# URI in full format: s3://key:secret@bucket/object
# access key id: [A-Z0-9]{20}
# secret access key: [A-Za-z0-9/+=]{40}
acc, self.bucket_id = self.bucket_id
self.access_id, self.access_secret = acc.split(":")
self.bucket_id, self.key_id = self.bucket_id.split("/", 1)
else:
# more than 1 '@' means invalid uri
# Bucket names must be at least 3 and no more than 63 characters long.
# Bucket names must be a series of one or more labels.
# Adjacent labels are separated by a single period (.).
# Bucket names can contain lowercase letters, numbers, and hyphens.
# Each label must start and end with a lowercase letter or a number.
raise RuntimeError("invalid S3 URI: %s" % uri)
elif self.scheme == "file":
self.uri_path = parsed_uri.netloc + parsed_uri.path
# '~/tmp' may be expanded to '/Users/username/tmp'
self.uri_path = os.path.expanduser(self.uri_path)
if not self.uri_path:
raise RuntimeError("invalid file URI: %s" % uri)
else:
raise NotImplementedError("unknown URI scheme %r in %r" % (self.scheme, uri))
|
https://github.com/RaRe-Technologies/smart_open/issues/92
|
In [2]: smart_open('aa#aa')
---------------------------------------------------------------------------
IOError Traceback (most recent call last)
<ipython-input-2-e0a7775bdb92> in <module>()
----> 1 s('aa#aa')
/usr/local/lib/python2.7/dist-packages/smart_open-1.2.1-py2.7.egg/smart_open/smart_open_lib.pyc in smart_open(uri, mode)
87 # local files -- both read & write supported
88 # compression, if any, is determined by the filename extension (.gz, .bz2)
---> 89 return file_smart_open(parsed_uri.uri_path, mode)
90
91 if mode in ('r', 'rb'):
/usr/local/lib/python2.7/dist-packages/smart_open-1.2.1-py2.7.egg/smart_open/smart_open_lib.pyc in file_smart_open(fname, mode)
299 return make_closing(GzipFile)(fname, mode)
300
--> 301 return open(fname, mode)
302
303
IOError: [Errno 2] No such file or directory: 'aa'
|
IOError
|
def _get_many_from_db_backend(self, async_tasks) -> Mapping[str, EventBufferValueType]:
task_ids = _tasks_list_to_task_ids(async_tasks)
session = app.backend.ResultSession()
task_cls = getattr(app.backend, "task_cls", TaskDb)
with session_cleanup(session):
tasks = session.query(task_cls).filter(task_cls.task_id.in_(task_ids)).all()
task_results = [app.backend.meta_from_decoded(task.to_dict()) for task in tasks]
task_results_by_task_id = {
task_result["task_id"]: task_result for task_result in task_results
}
return self._prepare_state_and_info_by_task_dict(task_ids, task_results_by_task_id)
|
def _get_many_from_db_backend(self, async_tasks) -> Mapping[str, EventBufferValueType]:
task_ids = _tasks_list_to_task_ids(async_tasks)
session = app.backend.ResultSession()
task_cls = app.backend.task_cls
with session_cleanup(session):
tasks = session.query(task_cls).filter(task_cls.task_id.in_(task_ids)).all()
task_results = [app.backend.meta_from_decoded(task.to_dict()) for task in tasks]
task_results_by_task_id = {
task_result["task_id"]: task_result for task_result in task_results
}
return self._prepare_state_and_info_by_task_dict(task_ids, task_results_by_task_id)
|
https://github.com/apache/airflow/issues/14586
|
[2021-03-03 17:31:19,393] {scheduler_job.py:1298} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/airflow/jobs/scheduler_job.py", line 1280, in _execute
self._run_scheduler_loop()
File "/usr/local/lib/python3.6/dist-packages/airflow/jobs/scheduler_job.py", line 1384, in _run_scheduler_loop
self.executor.heartbeat()
File "/usr/local/lib/python3.6/dist-packages/airflow/executors/base_executor.py", line 162, in heartbeat
self.sync()
File "/usr/local/lib/python3.6/dist-packages/airflow/executors/celery_executor.py", line 340, in sync
self.update_all_task_states()
File "/usr/local/lib/python3.6/dist-packages/airflow/executors/celery_executor.py", line 399, in update_all_task_states
state_and_info_by_celery_task_id = self.bulk_state_fetcher.get_many(self.tasks.values())
File "/usr/local/lib/python3.6/dist-packages/airflow/executors/celery_executor.py", line 552, in get_many
result = self._get_many_from_db_backend(async_results)
File "/usr/local/lib/python3.6/dist-packages/airflow/executors/celery_executor.py", line 570, in _get_many_from_db_backend
task_cls = app.backend.task_cls
AttributeError: 'DatabaseBackend' object has no attribute 'task_cls'
[2021-03-03 17:31:20,396] {process_utils.py:100} INFO - Sending Signals.SIGTERM to GPID 3852
[2021-03-03 17:31:20,529] {process_utils.py:66} INFO - Process psutil.Process(pid=3996, status='terminated', started='17:31:19') (3996) terminated with exit code None
[2021-03-03 17:31:20,533] {process_utils.py:66} INFO - Process psutil.Process(pid=3997, status='terminated', started='17:31:19') (3997) terminated with exit code None
[2021-03-03 17:31:20,533] {process_utils.py:206} INFO - Waiting up to 5 seconds for processes to exit...
[2021-03-03 17:31:20,540] {process_utils.py:66} INFO - Process psutil.Process(pid=3852, status='terminated', exitcode=0, started='17:31:13') (3852) terminated with exit code 0
[2021-03-03 17:31:20,540] {scheduler_job.py:1301} INFO - Exited execute loop
|
AttributeError
|
def _execute(self, session=None):
"""
Initializes all components required to run a dag for a specified date range and
calls helper method to execute the tasks.
"""
ti_status = BackfillJob._DagRunTaskStatus()
start_date = self.bf_start_date
# Get intervals between the start/end dates, which will turn into dag runs
run_dates = self.dag.get_run_dates(start_date=start_date, end_date=self.bf_end_date)
if self.run_backwards:
tasks_that_depend_on_past = [
t.task_id for t in self.dag.task_dict.values() if t.depends_on_past
]
if tasks_that_depend_on_past:
raise AirflowException(
"You cannot backfill backwards because one or more tasks depend_on_past: {}".format(
",".join(tasks_that_depend_on_past)
)
)
run_dates = run_dates[::-1]
if len(run_dates) == 0:
self.log.info("No run dates were found for the given dates and dag interval.")
return
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor_class not in (
executor_constants.LOCAL_EXECUTOR,
executor_constants.SEQUENTIAL_EXECUTOR,
executor_constants.DASK_EXECUTOR,
):
pickle = DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.job_id = "backfill"
executor.start()
ti_status.total_runs = len(run_dates) # total dag runs in backfill
try: # pylint: disable=too-many-nested-blocks
remaining_dates = ti_status.total_runs
while remaining_dates > 0:
dates_to_process = [
run_date
for run_date in run_dates
if run_date not in ti_status.executed_dag_run_dates
]
self._execute_for_run_dates(
run_dates=dates_to_process,
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session,
)
remaining_dates = ti_status.total_runs - len(
ti_status.executed_dag_run_dates
)
err = self._collect_errors(ti_status=ti_status, session=session)
if err:
raise BackfillUnfinished(err, ti_status)
if remaining_dates > 0:
self.log.info(
"max_active_runs limit for dag %s has been reached "
" - waiting for other dag runs to finish",
self.dag_id,
)
time.sleep(self.delay_on_limit_secs)
except (KeyboardInterrupt, SystemExit):
self.log.warning("Backfill terminated by user.")
# TODO: we will need to terminate running task instances and set the
# state to failed.
self._set_unfinished_dag_runs_to_failed(ti_status.active_runs)
finally:
session.commit()
executor.end()
self.log.info("Backfill done. Exiting.")
|
def _execute(self, session=None):
"""
Initializes all components required to run a dag for a specified date range and
calls helper method to execute the tasks.
"""
ti_status = BackfillJob._DagRunTaskStatus()
start_date = self.bf_start_date
# Get intervals between the start/end dates, which will turn into dag runs
run_dates = self.dag.get_run_dates(start_date=start_date, end_date=self.bf_end_date)
if self.run_backwards:
tasks_that_depend_on_past = [
t.task_id for t in self.dag.task_dict.values() if t.depends_on_past
]
if tasks_that_depend_on_past:
raise AirflowException(
"You cannot backfill backwards because one or more tasks depend_on_past: {}".format(
",".join(tasks_that_depend_on_past)
)
)
run_dates = run_dates[::-1]
if len(run_dates) == 0:
self.log.info("No run dates were found for the given dates and dag interval.")
return
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor_class not in (
executor_constants.LOCAL_EXECUTOR,
executor_constants.SEQUENTIAL_EXECUTOR,
executor_constants.DASK_EXECUTOR,
):
pickle = DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
ti_status.total_runs = len(run_dates) # total dag runs in backfill
try: # pylint: disable=too-many-nested-blocks
remaining_dates = ti_status.total_runs
while remaining_dates > 0:
dates_to_process = [
run_date
for run_date in run_dates
if run_date not in ti_status.executed_dag_run_dates
]
self._execute_for_run_dates(
run_dates=dates_to_process,
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session,
)
remaining_dates = ti_status.total_runs - len(
ti_status.executed_dag_run_dates
)
err = self._collect_errors(ti_status=ti_status, session=session)
if err:
raise BackfillUnfinished(err, ti_status)
if remaining_dates > 0:
self.log.info(
"max_active_runs limit for dag %s has been reached "
" - waiting for other dag runs to finish",
self.dag_id,
)
time.sleep(self.delay_on_limit_secs)
except (KeyboardInterrupt, SystemExit):
self.log.warning("Backfill terminated by user.")
# TODO: we will need to terminate running task instances and set the
# state to failed.
self._set_unfinished_dag_runs_to_failed(ti_status.active_runs)
finally:
session.commit()
executor.end()
self.log.info("Backfill done. Exiting.")
|
https://github.com/apache/airflow/issues/13805
|
Something bad has happened.
Please consider letting us know by creating a bug report using GitHub.
Python version: 3.8.7
Airflow version: 2.0.0
Node: airflow-web-ffdd89d6-h98vj
-------------------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.8/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.8/site-packages/airflow/www/auth.py", line 34, in decorated
return func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/airflow/www/decorators.py", line 60, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/airflow/www/views.py", line 1366, in run
executor.start()
File "/usr/local/lib/python3.8/site-packages/airflow/executors/kubernetes_executor.py", line 493, in start
raise AirflowException("Could not get scheduler_job_id")
airflow.exceptions.AirflowException: Could not get scheduler_job_id
|
airflow.exceptions.AirflowException
|
def run(self):
"""Runs Task Instance."""
dag_id = request.form.get("dag_id")
task_id = request.form.get("task_id")
origin = get_safe_url(request.form.get("origin"))
dag = current_app.dag_bag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.form.get("execution_date")
execution_date = timezone.parse(execution_date)
ignore_all_deps = request.form.get("ignore_all_deps") == "true"
ignore_task_deps = request.form.get("ignore_task_deps") == "true"
ignore_ti_state = request.form.get("ignore_ti_state") == "true"
executor = ExecutorLoader.get_default_executor()
valid_celery_config = False
valid_kubernetes_config = False
try:
from airflow.executors.celery_executor import CeleryExecutor # noqa
valid_celery_config = isinstance(executor, CeleryExecutor)
except ImportError:
pass
try:
from airflow.executors.kubernetes_executor import KubernetesExecutor # noqa
valid_kubernetes_config = isinstance(executor, KubernetesExecutor)
except ImportError:
pass
if not valid_celery_config and not valid_kubernetes_config:
flash("Only works with the Celery or Kubernetes executors, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be run
dep_context = DepContext(
deps=RUNNING_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join(
[f"{dep.dep_name}: {dep.reason}" for dep in failed_deps]
)
flash(
"Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error",
)
return redirect(origin)
executor.job_id = "manual"
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
)
executor.heartbeat()
flash(f"Sent {ti} to the message queue, it should start any moment now.")
return redirect(origin)
|
def run(self):
"""Runs Task Instance."""
dag_id = request.form.get("dag_id")
task_id = request.form.get("task_id")
origin = get_safe_url(request.form.get("origin"))
dag = current_app.dag_bag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.form.get("execution_date")
execution_date = timezone.parse(execution_date)
ignore_all_deps = request.form.get("ignore_all_deps") == "true"
ignore_task_deps = request.form.get("ignore_task_deps") == "true"
ignore_ti_state = request.form.get("ignore_ti_state") == "true"
executor = ExecutorLoader.get_default_executor()
valid_celery_config = False
valid_kubernetes_config = False
try:
from airflow.executors.celery_executor import CeleryExecutor # noqa
valid_celery_config = isinstance(executor, CeleryExecutor)
except ImportError:
pass
try:
from airflow.executors.kubernetes_executor import KubernetesExecutor # noqa
valid_kubernetes_config = isinstance(executor, KubernetesExecutor)
except ImportError:
pass
if not valid_celery_config and not valid_kubernetes_config:
flash("Only works with the Celery or Kubernetes executors, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be run
dep_context = DepContext(
deps=RUNNING_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join(
[f"{dep.dep_name}: {dep.reason}" for dep in failed_deps]
)
flash(
"Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error",
)
return redirect(origin)
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
)
executor.heartbeat()
flash(f"Sent {ti} to the message queue, it should start any moment now.")
return redirect(origin)
|
https://github.com/apache/airflow/issues/13805
|
Something bad has happened.
Please consider letting us know by creating a bug report using GitHub.
Python version: 3.8.7
Airflow version: 2.0.0
Node: airflow-web-ffdd89d6-h98vj
-------------------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.8/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.8/site-packages/airflow/www/auth.py", line 34, in decorated
return func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/airflow/www/decorators.py", line 60, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/airflow/www/views.py", line 1366, in run
executor.start()
File "/usr/local/lib/python3.8/site-packages/airflow/executors/kubernetes_executor.py", line 493, in start
raise AirflowException("Could not get scheduler_job_id")
airflow.exceptions.AirflowException: Could not get scheduler_job_id
|
airflow.exceptions.AirflowException
|
def _emit_duration_stats_for_finished_state(self):
if self.state == State.RUNNING:
return
if self.start_date is None:
self.log.warning(
"Failed to record duration of %s: start_date is not set.", self
)
return
if self.end_date is None:
self.log.warning("Failed to record duration of %s: end_date is not set.", self)
return
duration = self.end_date - self.start_date
if self.state is State.SUCCESS:
Stats.timing(f"dagrun.duration.success.{self.dag_id}", duration)
elif self.state == State.FAILED:
Stats.timing(f"dagrun.duration.failed.{self.dag_id}", duration)
|
def _emit_duration_stats_for_finished_state(self):
if self.state == State.RUNNING:
return
duration = self.end_date - self.start_date
if self.state is State.SUCCESS:
Stats.timing(f"dagrun.duration.success.{self.dag_id}", duration)
elif self.state == State.FAILED:
Stats.timing(f"dagrun.duration.failed.{self.dag_id}", duration)
|
https://github.com/apache/airflow/issues/14384
|
Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1280, in _execute
self._run_scheduler_loop()
File "/usr/local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1382, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/usr/local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1521, in _do_scheduling
self._schedule_dag_run(dag_run, active_runs_by_dag_id.get(dag_run.dag_id, set()), session)
File "/usr/local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1760, in _schedule_dag_run
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
File "/usr/local/lib/python3.8/site-packages/airflow/utils/session.py", line 62, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/airflow/models/dagrun.py", line 478, in update_state
self._emit_duration_stats_for_finished_state()
File "/usr/local/lib/python3.8/site-packages/airflow/models/dagrun.py", line 615, in _emit_duration_stats_for_finished_state
duration = self.end_date - self.start_date
TypeError: unsupported operand type(s) for -: 'datetime.datetime' and 'NoneType'
|
TypeError
|
def _check_missing_providers(providers):
current_airflow_version = Version(__import__("airflow").__version__)
if current_airflow_version >= Version("2.0.0"):
prefix = "apache-airflow-providers-"
else:
prefix = "apache-airflow-backport-providers-"
for provider in providers:
dist_name = prefix + provider
try:
distribution(dist_name)
except PackageNotFoundError:
yield "Please install `{}`".format(dist_name)
|
def _check_missing_providers(providers):
current_airflow_version = Version(__import__("airflow").__version__)
if current_airflow_version.major >= 2:
prefix = "apache-airflow-providers-"
else:
prefix = "apache-airflow-backport-providers-"
for provider in providers:
dist_name = prefix + provider
try:
distribution(dist_name)
except PackageNotFoundError:
yield "Please install `{}`".format(dist_name)
|
https://github.com/apache/airflow/issues/14359
|
airflow@staging-airflow1:~$ airflow upgrade_check --config upgrade.yml
Using config file: upgrade.yml
=========================================================================================== STATUS ==========================================================================================
Check for latest versions of apache-airflow and checker............................................................................................................................FAIL
Remove airflow.AirflowMacroPlugin class............................................................................................................................................SUCCESS
Ensure users are not using custom metaclasses in custom operators..................................................................................................................SUCCESS
Chain between DAG and operator not allowed.........................................................................................................................................SUCCESS
Connection.conn_type is not nullable...............................................................................................................................................SUCCESS
Custom Executors now require full path.............................................................................................................................................SUCCESS
Hooks that run DB functions must inherit from DBApiHook............................................................................................................................FAIL
Fernet is enabled by default.......................................................................................................................................................SUCCESS
GCP service account key deprecation................................................................................................................................................SUCCESS
Unify hostname_callable option in core section.....................................................................................................................................SUCCESS
Traceback (most recent call last):
File "/usr/local/bin/airflow", line 37, in <module>
args.func(args)
File "/usr/local/lib/python3.6/dist-packages/airflow/upgrade/checker.py", line 118, in run
all_problems = check_upgrade(formatter, rules)
File "/usr/local/lib/python3.6/dist-packages/airflow/upgrade/checker.py", line 38, in check_upgrade
rule_status = RuleStatus.from_rule(rule)
File "/usr/local/lib/python3.6/dist-packages/airflow/upgrade/problem.py", line 48, in from_rule
messages = list(result)
File "/usr/local/lib/python3.6/dist-packages/airflow/upgrade/rules/import_changes.py", line 126, in _check_missing_providers
if current_airflow_version.major >= 2:
AttributeError: 'Version' object has no attribute 'major'
|
AttributeError
|
def __init__(self, *args, **kwargs) -> None:
kwargs["client_type"] = "s3"
self.extra_args = {}
if "extra_args" in kwargs:
self.extra_args = kwargs["extra_args"]
if not isinstance(self.extra_args, dict):
raise ValueError(f"extra_args '{self.extra_args!r}' must be of type {dict}")
del kwargs["extra_args"]
self.transfer_config = TransferConfig()
if "transfer_config_args" in kwargs:
transport_config_args = kwargs["transfer_config_args"]
if not isinstance(transport_config_args, dict):
raise ValueError(
f"transfer_config_args '{transport_config_args!r} must be of type {dict}"
)
self.transfer_config = TransferConfig(**transport_config_args)
del kwargs["transfer_config_args"]
super().__init__(*args, **kwargs)
|
def __init__(self, *args, **kwargs) -> None:
kwargs["client_type"] = "s3"
self.extra_args = {}
if "extra_args" in kwargs:
self.extra_args = kwargs["extra_args"]
if not isinstance(self.extra_args, dict):
raise ValueError(f"extra_args '{self.extra_args!r}' must be of type {dict}")
del kwargs["extra_args"]
super().__init__(*args, **kwargs)
|
https://github.com/apache/airflow/issues/14089
|
[2021-02-05 02:11:30,103] {hooks.py:210} DEBUG - Event needs-retry.s3.HeadObject: calling handler <botocore.retryhandler.RetryHandler object at 0x7f097f048110>
[2021-02-05 02:11:30,103] {retryhandler.py:187} DEBUG - No retry needed.
[2021-02-05 02:11:30,103] {hooks.py:210} DEBUG - Event needs-retry.s3.HeadObject: calling handler <bound method S3RegionRedirector.redirect_from_error of <botocore.utils.S3RegionRedirector object at 0x7f097f0293d0>>
[2021-02-05 02:11:30,103] {utils.py:1187} DEBUG - S3 request was previously to an accesspoint, not redirecting.
[2021-02-05 02:11:30,105] {utils.py:580} DEBUG - Acquiring 0
[2021-02-05 02:11:30,105] {futures.py:277} DEBUG - TransferCoordinator(transfer_id=0) cancel(cannot schedule new futures after interpreter shutdown) called
[2021-02-05 02:11:30,105] {s3_task_handler.py:193} ERROR - Could not write logs to s3://arn:aws:s3:us-west-2:<ACCOUNT>:accesspoint:<BUCKET,PATH>/2021-02-05T02:04:23.265117+00:00/1.log
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/amazon/aws/log/s3_task_handler.py", line 190, in s3_write
encrypt=conf.getboolean('logging', 'ENCRYPT_S3_LOGS'),
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 61, in wrapper
return func(*bound_args.args, **bound_args.kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 90, in wrapper
return func(*bound_args.args, **bound_args.kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 547, in load_string
self._upload_file_obj(file_obj, key, bucket_name, replace, encrypt, acl_policy)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 638, in _upload_file_obj
client.upload_fileobj(file_obj, bucket_name, key, ExtraArgs=extra_args)
File "/home/airflow/.local/lib/python3.7/site-packages/boto3/s3/inject.py", line 538, in upload_fileobj
extra_args=ExtraArgs, subscribers=subscribers)
File "/home/airflow/.local/lib/python3.7/site-packages/s3transfer/manager.py", line 313, in upload
call_args, UploadSubmissionTask, extra_main_kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/s3transfer/manager.py", line 471, in _submit_transfer
main_kwargs=main_kwargs
File "/home/airflow/.local/lib/python3.7/site-packages/s3transfer/futures.py", line 467, in submit
future = ExecutorFuture(self._executor.submit(task))
File "/usr/local/lib/python3.7/concurrent/futures/thread.py", line 165, in submit
raise RuntimeError('cannot schedule new futures after '
RuntimeError: cannot schedule new futures after interpreter shutdown
|
RuntimeError
|
def load_file(
self,
filename: str,
key: str,
bucket_name: Optional[str] = None,
replace: bool = False,
encrypt: bool = False,
gzip: bool = False,
acl_policy: Optional[str] = None,
) -> None:
"""
Loads a local file to S3
:param filename: name of the file to load.
:type filename: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists. If replace is False and the key exists, an
error will be raised.
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
:param gzip: If True, the file will be compressed locally
:type gzip: bool
:param acl_policy: String specifying the canned ACL policy for the file being
uploaded to the S3 bucket.
:type acl_policy: str
"""
if not replace and self.check_for_key(key, bucket_name):
raise ValueError(f"The key {key} already exists.")
extra_args = self.extra_args
if encrypt:
extra_args["ServerSideEncryption"] = "AES256"
if gzip:
with open(filename, "rb") as f_in:
filename_gz = f_in.name + ".gz"
with gz.open(filename_gz, "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
filename = filename_gz
if acl_policy:
extra_args["ACL"] = acl_policy
client = self.get_conn()
client.upload_file(
filename, bucket_name, key, ExtraArgs=extra_args, Config=self.transfer_config
)
|
def load_file(
self,
filename: str,
key: str,
bucket_name: Optional[str] = None,
replace: bool = False,
encrypt: bool = False,
gzip: bool = False,
acl_policy: Optional[str] = None,
) -> None:
"""
Loads a local file to S3
:param filename: name of the file to load.
:type filename: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists. If replace is False and the key exists, an
error will be raised.
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
:param gzip: If True, the file will be compressed locally
:type gzip: bool
:param acl_policy: String specifying the canned ACL policy for the file being
uploaded to the S3 bucket.
:type acl_policy: str
"""
if not replace and self.check_for_key(key, bucket_name):
raise ValueError(f"The key {key} already exists.")
extra_args = self.extra_args
if encrypt:
extra_args["ServerSideEncryption"] = "AES256"
if gzip:
with open(filename, "rb") as f_in:
filename_gz = f_in.name + ".gz"
with gz.open(filename_gz, "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
filename = filename_gz
if acl_policy:
extra_args["ACL"] = acl_policy
client = self.get_conn()
client.upload_file(filename, bucket_name, key, ExtraArgs=extra_args)
|
https://github.com/apache/airflow/issues/14089
|
[2021-02-05 02:11:30,103] {hooks.py:210} DEBUG - Event needs-retry.s3.HeadObject: calling handler <botocore.retryhandler.RetryHandler object at 0x7f097f048110>
[2021-02-05 02:11:30,103] {retryhandler.py:187} DEBUG - No retry needed.
[2021-02-05 02:11:30,103] {hooks.py:210} DEBUG - Event needs-retry.s3.HeadObject: calling handler <bound method S3RegionRedirector.redirect_from_error of <botocore.utils.S3RegionRedirector object at 0x7f097f0293d0>>
[2021-02-05 02:11:30,103] {utils.py:1187} DEBUG - S3 request was previously to an accesspoint, not redirecting.
[2021-02-05 02:11:30,105] {utils.py:580} DEBUG - Acquiring 0
[2021-02-05 02:11:30,105] {futures.py:277} DEBUG - TransferCoordinator(transfer_id=0) cancel(cannot schedule new futures after interpreter shutdown) called
[2021-02-05 02:11:30,105] {s3_task_handler.py:193} ERROR - Could not write logs to s3://arn:aws:s3:us-west-2:<ACCOUNT>:accesspoint:<BUCKET,PATH>/2021-02-05T02:04:23.265117+00:00/1.log
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/amazon/aws/log/s3_task_handler.py", line 190, in s3_write
encrypt=conf.getboolean('logging', 'ENCRYPT_S3_LOGS'),
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 61, in wrapper
return func(*bound_args.args, **bound_args.kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 90, in wrapper
return func(*bound_args.args, **bound_args.kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 547, in load_string
self._upload_file_obj(file_obj, key, bucket_name, replace, encrypt, acl_policy)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 638, in _upload_file_obj
client.upload_fileobj(file_obj, bucket_name, key, ExtraArgs=extra_args)
File "/home/airflow/.local/lib/python3.7/site-packages/boto3/s3/inject.py", line 538, in upload_fileobj
extra_args=ExtraArgs, subscribers=subscribers)
File "/home/airflow/.local/lib/python3.7/site-packages/s3transfer/manager.py", line 313, in upload
call_args, UploadSubmissionTask, extra_main_kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/s3transfer/manager.py", line 471, in _submit_transfer
main_kwargs=main_kwargs
File "/home/airflow/.local/lib/python3.7/site-packages/s3transfer/futures.py", line 467, in submit
future = ExecutorFuture(self._executor.submit(task))
File "/usr/local/lib/python3.7/concurrent/futures/thread.py", line 165, in submit
raise RuntimeError('cannot schedule new futures after '
RuntimeError: cannot schedule new futures after interpreter shutdown
|
RuntimeError
|
def _upload_file_obj(
self,
file_obj: BytesIO,
key: str,
bucket_name: Optional[str] = None,
replace: bool = False,
encrypt: bool = False,
acl_policy: Optional[str] = None,
) -> None:
if not replace and self.check_for_key(key, bucket_name):
raise ValueError(f"The key {key} already exists.")
extra_args = self.extra_args
if encrypt:
extra_args["ServerSideEncryption"] = "AES256"
if acl_policy:
extra_args["ACL"] = acl_policy
client = self.get_conn()
client.upload_fileobj(
file_obj,
bucket_name,
key,
ExtraArgs=extra_args,
Config=self.transfer_config,
)
|
def _upload_file_obj(
self,
file_obj: BytesIO,
key: str,
bucket_name: Optional[str] = None,
replace: bool = False,
encrypt: bool = False,
acl_policy: Optional[str] = None,
) -> None:
if not replace and self.check_for_key(key, bucket_name):
raise ValueError(f"The key {key} already exists.")
extra_args = self.extra_args
if encrypt:
extra_args["ServerSideEncryption"] = "AES256"
if acl_policy:
extra_args["ACL"] = acl_policy
client = self.get_conn()
client.upload_fileobj(file_obj, bucket_name, key, ExtraArgs=extra_args)
|
https://github.com/apache/airflow/issues/14089
|
[2021-02-05 02:11:30,103] {hooks.py:210} DEBUG - Event needs-retry.s3.HeadObject: calling handler <botocore.retryhandler.RetryHandler object at 0x7f097f048110>
[2021-02-05 02:11:30,103] {retryhandler.py:187} DEBUG - No retry needed.
[2021-02-05 02:11:30,103] {hooks.py:210} DEBUG - Event needs-retry.s3.HeadObject: calling handler <bound method S3RegionRedirector.redirect_from_error of <botocore.utils.S3RegionRedirector object at 0x7f097f0293d0>>
[2021-02-05 02:11:30,103] {utils.py:1187} DEBUG - S3 request was previously to an accesspoint, not redirecting.
[2021-02-05 02:11:30,105] {utils.py:580} DEBUG - Acquiring 0
[2021-02-05 02:11:30,105] {futures.py:277} DEBUG - TransferCoordinator(transfer_id=0) cancel(cannot schedule new futures after interpreter shutdown) called
[2021-02-05 02:11:30,105] {s3_task_handler.py:193} ERROR - Could not write logs to s3://arn:aws:s3:us-west-2:<ACCOUNT>:accesspoint:<BUCKET,PATH>/2021-02-05T02:04:23.265117+00:00/1.log
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/amazon/aws/log/s3_task_handler.py", line 190, in s3_write
encrypt=conf.getboolean('logging', 'ENCRYPT_S3_LOGS'),
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 61, in wrapper
return func(*bound_args.args, **bound_args.kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 90, in wrapper
return func(*bound_args.args, **bound_args.kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 547, in load_string
self._upload_file_obj(file_obj, key, bucket_name, replace, encrypt, acl_policy)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 638, in _upload_file_obj
client.upload_fileobj(file_obj, bucket_name, key, ExtraArgs=extra_args)
File "/home/airflow/.local/lib/python3.7/site-packages/boto3/s3/inject.py", line 538, in upload_fileobj
extra_args=ExtraArgs, subscribers=subscribers)
File "/home/airflow/.local/lib/python3.7/site-packages/s3transfer/manager.py", line 313, in upload
call_args, UploadSubmissionTask, extra_main_kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/s3transfer/manager.py", line 471, in _submit_transfer
main_kwargs=main_kwargs
File "/home/airflow/.local/lib/python3.7/site-packages/s3transfer/futures.py", line 467, in submit
future = ExecutorFuture(self._executor.submit(task))
File "/usr/local/lib/python3.7/concurrent/futures/thread.py", line 165, in submit
raise RuntimeError('cannot schedule new futures after '
RuntimeError: cannot schedule new futures after interpreter shutdown
|
RuntimeError
|
def hook(self):
"""Returns S3Hook."""
remote_conn_id = conf.get("logging", "REMOTE_LOG_CONN_ID")
try:
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
return S3Hook(remote_conn_id, transfer_config_args={"use_threads": False})
except Exception as e: # pylint: disable=broad-except
self.log.exception(
'Could not create an S3Hook with connection id "%s". '
"Please make sure that airflow[aws] is installed and "
'the S3 connection exists. Exception : "%s"',
remote_conn_id,
e,
)
return None
|
def hook(self):
"""Returns S3Hook."""
remote_conn_id = conf.get("logging", "REMOTE_LOG_CONN_ID")
try:
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
return S3Hook(remote_conn_id)
except Exception as e: # pylint: disable=broad-except
self.log.exception(
'Could not create an S3Hook with connection id "%s". '
"Please make sure that airflow[aws] is installed and "
'the S3 connection exists. Exception : "%s"',
remote_conn_id,
e,
)
return None
|
https://github.com/apache/airflow/issues/14089
|
[2021-02-05 02:11:30,103] {hooks.py:210} DEBUG - Event needs-retry.s3.HeadObject: calling handler <botocore.retryhandler.RetryHandler object at 0x7f097f048110>
[2021-02-05 02:11:30,103] {retryhandler.py:187} DEBUG - No retry needed.
[2021-02-05 02:11:30,103] {hooks.py:210} DEBUG - Event needs-retry.s3.HeadObject: calling handler <bound method S3RegionRedirector.redirect_from_error of <botocore.utils.S3RegionRedirector object at 0x7f097f0293d0>>
[2021-02-05 02:11:30,103] {utils.py:1187} DEBUG - S3 request was previously to an accesspoint, not redirecting.
[2021-02-05 02:11:30,105] {utils.py:580} DEBUG - Acquiring 0
[2021-02-05 02:11:30,105] {futures.py:277} DEBUG - TransferCoordinator(transfer_id=0) cancel(cannot schedule new futures after interpreter shutdown) called
[2021-02-05 02:11:30,105] {s3_task_handler.py:193} ERROR - Could not write logs to s3://arn:aws:s3:us-west-2:<ACCOUNT>:accesspoint:<BUCKET,PATH>/2021-02-05T02:04:23.265117+00:00/1.log
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/amazon/aws/log/s3_task_handler.py", line 190, in s3_write
encrypt=conf.getboolean('logging', 'ENCRYPT_S3_LOGS'),
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 61, in wrapper
return func(*bound_args.args, **bound_args.kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 90, in wrapper
return func(*bound_args.args, **bound_args.kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 547, in load_string
self._upload_file_obj(file_obj, key, bucket_name, replace, encrypt, acl_policy)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/s3.py", line 638, in _upload_file_obj
client.upload_fileobj(file_obj, bucket_name, key, ExtraArgs=extra_args)
File "/home/airflow/.local/lib/python3.7/site-packages/boto3/s3/inject.py", line 538, in upload_fileobj
extra_args=ExtraArgs, subscribers=subscribers)
File "/home/airflow/.local/lib/python3.7/site-packages/s3transfer/manager.py", line 313, in upload
call_args, UploadSubmissionTask, extra_main_kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/s3transfer/manager.py", line 471, in _submit_transfer
main_kwargs=main_kwargs
File "/home/airflow/.local/lib/python3.7/site-packages/s3transfer/futures.py", line 467, in submit
future = ExecutorFuture(self._executor.submit(task))
File "/usr/local/lib/python3.7/concurrent/futures/thread.py", line 165, in submit
raise RuntimeError('cannot schedule new futures after '
RuntimeError: cannot schedule new futures after interpreter shutdown
|
RuntimeError
|
def _init_file(self, ti):
"""
Create log directory and give it correct permissions.
:param ti: task instance object
:return: relative log path of the given task instance
"""
# To handle log writing when tasks are impersonated, the log files need to
# be writable by the user that runs the Airflow command and the user
# that is impersonated. This is mainly to handle corner cases with the
# SubDagOperator. When the SubDagOperator is run, all of the operators
# run under the impersonated user and create appropriate log files
# as the impersonated user. However, if the user manually runs tasks
# of the SubDagOperator through the UI, then the log files are created
# by the user that runs the Airflow command. For example, the Airflow
# run command may be run by the `airflow_sudoable` user, but the Airflow
# tasks may be run by the `airflow` user. If the log files are not
# writable by both users, then it's possible that re-running a task
# via the UI (or vice versa) results in a permission error as the task
# tries to write to a log file created by the other user.
relative_path = self._render_filename(ti, ti.try_number)
full_path = os.path.join(self.local_base, relative_path)
directory = os.path.dirname(full_path)
# Create the log file and give it group writable permissions
# TODO(aoen): Make log dirs and logs globally readable for now since the SubDag
# operator is not compatible with impersonation (e.g. if a Celery executor is used
# for a SubDag operator and the SubDag operator has a different owner than the
# parent DAG)
Path(directory).mkdir(mode=0o777, parents=True, exist_ok=True)
if not os.path.exists(full_path):
open(full_path, "a").close()
# TODO: Investigate using 444 instead of 666.
try:
os.chmod(full_path, 0o666)
except OSError:
logging.warning("OSError while change ownership of the log file")
return full_path
|
def _init_file(self, ti):
"""
Create log directory and give it correct permissions.
:param ti: task instance object
:return: relative log path of the given task instance
"""
# To handle log writing when tasks are impersonated, the log files need to
# be writable by the user that runs the Airflow command and the user
# that is impersonated. This is mainly to handle corner cases with the
# SubDagOperator. When the SubDagOperator is run, all of the operators
# run under the impersonated user and create appropriate log files
# as the impersonated user. However, if the user manually runs tasks
# of the SubDagOperator through the UI, then the log files are created
# by the user that runs the Airflow command. For example, the Airflow
# run command may be run by the `airflow_sudoable` user, but the Airflow
# tasks may be run by the `airflow` user. If the log files are not
# writable by both users, then it's possible that re-running a task
# via the UI (or vice versa) results in a permission error as the task
# tries to write to a log file created by the other user.
relative_path = self._render_filename(ti, ti.try_number)
full_path = os.path.join(self.local_base, relative_path)
directory = os.path.dirname(full_path)
# Create the log file and give it group writable permissions
# TODO(aoen): Make log dirs and logs globally readable for now since the SubDag
# operator is not compatible with impersonation (e.g. if a Celery executor is used
# for a SubDag operator and the SubDag operator has a different owner than the
# parent DAG)
Path(directory).mkdir(mode=0o777, parents=True, exist_ok=True)
if not os.path.exists(full_path):
open(full_path, "a").close()
# TODO: Investigate using 444 instead of 666.
os.chmod(full_path, 0o666)
return full_path
|
https://github.com/apache/airflow/issues/12669
|
Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 8, in <module>
sys.exit(main())
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/__main__.py", line 40, in main
args.func(args)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/cli/cli_parser.py", line 50, in command
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/utils/cli.py", line 86, in wrapper
return f(*args, **kwargs)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/cli/commands/task_command.py", line 179, in task_run
ti.init_run_context(raw=args.raw)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/models/taskinstance.py", line 1922, in init_run_context
self._set_context(self)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/utils/log/logging_mixin.py", line 54, in _set_context
set_context(self.log, context)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/utils/log/logging_mixin.py", line 173, in set_context
handler.set_context(value)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/utils/log/file_task_handler.py", line 54, in set_context
local_loc = self._init_file(ti)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/utils/log/file_task_handler.py", line 256, in _init_file
os.chmod(full_path, 0o666)
PermissionError: [Errno 1] Operation not permitted: '/opt/airflow/logs/dagname/jobname/2020-11-27T20:57:00+00:00/1.log'
|
PermissionError
|
def _init_file(self, ti):
"""
Create log directory and give it correct permissions.
:param ti: task instance object
:return: relative log path of the given task instance
"""
# To handle log writing when tasks are impersonated, the log files need to
# be writable by the user that runs the Airflow command and the user
# that is impersonated. This is mainly to handle corner cases with the
# SubDagOperator. When the SubDagOperator is run, all of the operators
# run under the impersonated user and create appropriate log files
# as the impersonated user. However, if the user manually runs tasks
# of the SubDagOperator through the UI, then the log files are created
# by the user that runs the Airflow command. For example, the Airflow
# run command may be run by the `airflow_sudoable` user, but the Airflow
# tasks may be run by the `airflow` user. If the log files are not
# writable by both users, then it's possible that re-running a task
# via the UI (or vice versa) results in a permission error as the task
# tries to write to a log file created by the other user.
relative_path = self._render_filename(ti, ti.try_number)
full_path = os.path.join(self.local_base, relative_path)
directory = os.path.dirname(full_path)
# Create the log file and give it group writable permissions
# TODO(aoen): Make log dirs and logs globally readable for now since the SubDag
# operator is not compatible with impersonation (e.g. if a Celery executor is used
# for a SubDag operator and the SubDag operator has a different owner than the
# parent DAG)
if not os.path.exists(directory):
# Create the directory as globally writable using custom mkdirs
# as os.makedirs doesn't set mode properly.
mkdirs(directory, 0o777)
if not os.path.exists(full_path):
open(full_path, "a").close()
# TODO: Investigate using 444 instead of 666.
try:
os.chmod(full_path, 0o666)
except OSError:
logging.warning("OSError while change ownership of the log file")
return full_path
|
def _init_file(self, ti):
"""
Create log directory and give it correct permissions.
:param ti: task instance object
:return: relative log path of the given task instance
"""
# To handle log writing when tasks are impersonated, the log files need to
# be writable by the user that runs the Airflow command and the user
# that is impersonated. This is mainly to handle corner cases with the
# SubDagOperator. When the SubDagOperator is run, all of the operators
# run under the impersonated user and create appropriate log files
# as the impersonated user. However, if the user manually runs tasks
# of the SubDagOperator through the UI, then the log files are created
# by the user that runs the Airflow command. For example, the Airflow
# run command may be run by the `airflow_sudoable` user, but the Airflow
# tasks may be run by the `airflow` user. If the log files are not
# writable by both users, then it's possible that re-running a task
# via the UI (or vice versa) results in a permission error as the task
# tries to write to a log file created by the other user.
relative_path = self._render_filename(ti, ti.try_number)
full_path = os.path.join(self.local_base, relative_path)
directory = os.path.dirname(full_path)
# Create the log file and give it group writable permissions
# TODO(aoen): Make log dirs and logs globally readable for now since the SubDag
# operator is not compatible with impersonation (e.g. if a Celery executor is used
# for a SubDag operator and the SubDag operator has a different owner than the
# parent DAG)
if not os.path.exists(directory):
# Create the directory as globally writable using custom mkdirs
# as os.makedirs doesn't set mode properly.
mkdirs(directory, 0o777)
if not os.path.exists(full_path):
open(full_path, "a").close()
# TODO: Investigate using 444 instead of 666.
os.chmod(full_path, 0o666)
return full_path
|
https://github.com/apache/airflow/issues/12669
|
Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 8, in <module>
sys.exit(main())
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/__main__.py", line 40, in main
args.func(args)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/cli/cli_parser.py", line 50, in command
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/utils/cli.py", line 86, in wrapper
return f(*args, **kwargs)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/cli/commands/task_command.py", line 179, in task_run
ti.init_run_context(raw=args.raw)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/models/taskinstance.py", line 1922, in init_run_context
self._set_context(self)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/utils/log/logging_mixin.py", line 54, in _set_context
set_context(self.log, context)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/utils/log/logging_mixin.py", line 173, in set_context
handler.set_context(value)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/utils/log/file_task_handler.py", line 54, in set_context
local_loc = self._init_file(ti)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/utils/log/file_task_handler.py", line 256, in _init_file
os.chmod(full_path, 0o666)
PermissionError: [Errno 1] Operation not permitted: '/opt/airflow/logs/dagname/jobname/2020-11-27T20:57:00+00:00/1.log'
|
PermissionError
|
def execute_async(
self,
key: TaskInstanceKey,
command: CommandType,
queue: Optional[str] = None,
executor_config: Optional[Any] = None,
) -> None:
"""Executes task asynchronously"""
self.log.info(
"Add task %s with command %s with executor_config %s",
key,
command,
executor_config,
)
try:
kube_executor_config = PodGenerator.from_obj(executor_config)
except Exception: # pylint: disable=broad-except
self.log.error("Invalid executor_config for %s", key)
self.fail(key=key, info="Invalid executor_config passed")
return
if executor_config:
pod_template_file = executor_config.get("pod_template_override", None)
else:
pod_template_file = None
if not self.task_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
self.event_buffer[key] = (State.QUEUED, self.scheduler_job_id)
self.task_queue.put((key, command, kube_executor_config, pod_template_file))
|
def execute_async(
self,
key: TaskInstanceKey,
command: CommandType,
queue: Optional[str] = None,
executor_config: Optional[Any] = None,
) -> None:
"""Executes task asynchronously"""
self.log.info(
"Add task %s with command %s with executor_config %s",
key,
command,
executor_config,
)
kube_executor_config = PodGenerator.from_obj(executor_config)
if executor_config:
pod_template_file = executor_config.get("pod_template_override", None)
else:
pod_template_file = None
if not self.task_queue:
raise AirflowException(NOT_STARTED_MESSAGE)
self.event_buffer[key] = (State.QUEUED, self.scheduler_job_id)
self.task_queue.put((key, command, kube_executor_config, pod_template_file))
|
https://github.com/apache/airflow/issues/14182
|
[2021-02-10 21:09:27,469] {scheduler_job.py:1298} ERROR - Exception when executing SchedulerJob._run_schedu
ler_loop
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1280, in _execute
self._run_scheduler_loop()
File "/usr/local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1384, in _run_scheduler
_loop
self.executor.heartbeat()
File "/usr/local/lib/python3.8/site-packages/airflow/executors/base_executor.py", line 158, in heartbeat
self.trigger_tasks(open_slots)
File "/usr/local/lib/python3.8/site-packages/airflow/executors/base_executor.py", line 188, in trigger_ta
sks
self.execute_async(key=key, command=command, queue=None, executor_config=ti.executor_config)
File "/usr/local/lib/python3.8/site-packages/airflow/executors/kubernetes_executor.py", line 493, in exec
ute_async
kube_executor_config = PodGenerator.from_obj(executor_config)
File "/usr/local/lib/python3.8/site-packages/airflow/kubernetes/pod_generator.py", line 175, in from_obj
k8s_legacy_object = obj.get("KubernetesExecutor", None)
AttributeError: 'V1Pod' object has no attribute 'get'
[2021-02-10 21:09:28,475] {process_utils.py:100} INFO - Sending Signals.SIGTERM to GPID 60
[2021-02-10 21:09:29,222] {process_utils.py:66} INFO - Process psutil.Process(pid=66, status='terminated',
started='21:09:27') (66) terminated with exit code None
[2021-02-10 21:09:29,697] {process_utils.py:206} INFO - Waiting up to 5 seconds for processes to exit...
[2021-02-10 21:09:29,716] {process_utils.py:66} INFO - Process psutil.Process(pid=75, status='terminated',
started='21:09:28') (75) terminated with exit code None
[2021-02-10 21:09:29,717] {process_utils.py:66} INFO - Process psutil.Process(pid=60, status='terminated',
exitcode=0, started='21:09:27') (60) terminated with exit code 0
[2021-02-10 21:09:29,717] {scheduler_job.py:1301} INFO - Exited execute loop
|
AttributeError
|
def clear(args):
logging.basicConfig(level=settings.LOGGING_LEVEL, format=settings.SIMPLE_LOG_FORMAT)
dags = get_dags(args)
if args.task_regex:
for idx, dag in enumerate(dags):
dags[idx] = dag.sub_dag(
task_regex=args.task_regex,
include_downstream=args.downstream,
include_upstream=args.upstream,
)
if args.yes:
args.no_confirm = args.yes
DAG.clear_dags(
dags,
start_date=args.start_date,
end_date=args.end_date,
only_failed=args.only_failed,
only_running=args.only_running,
confirm_prompt=not args.no_confirm,
include_subdags=not args.exclude_subdags,
include_parentdag=not args.exclude_parentdag,
)
|
def clear(args):
logging.basicConfig(level=settings.LOGGING_LEVEL, format=settings.SIMPLE_LOG_FORMAT)
dags = get_dags(args)
if args.task_regex:
for idx, dag in enumerate(dags):
dags[idx] = dag.sub_dag(
task_regex=args.task_regex,
include_downstream=args.downstream,
include_upstream=args.upstream,
)
DAG.clear_dags(
dags,
start_date=args.start_date,
end_date=args.end_date,
only_failed=args.only_failed,
only_running=args.only_running,
confirm_prompt=not args.no_confirm,
include_subdags=not args.exclude_subdags,
include_parentdag=not args.exclude_parentdag,
)
|
https://github.com/apache/airflow/issues/14171
|
Traceback (most recent call last):
File "/home/ec2-user/venv/bin/airflow", line 37, in <module>
args.func(args)
File "/home/ec2-user/venv/lib/python3.7/site-packages/airflow/utils/cli.py", line 233, in wrapper
func(args)
File "/home/ec2-user/venv/lib/python3.7/site-packages/airflow/utils/cli.py", line 81, in wrapper
return f(*args, **kwargs)
File "/home/ec2-user/venv/lib/python3.7/site-packages/airflow/bin/cli.py", line 867, in clear
confirm_prompt=not args.no_confirm,
AttributeError: 'Namespace' object has no attribute 'no_confirm'
|
AttributeError
|
def __init__(self, celery_executor, kubernetes_executor):
super().__init__()
self._job_id: Optional[str] = None
self.celery_executor = celery_executor
self.kubernetes_executor = kubernetes_executor
|
def __init__(self, celery_executor, kubernetes_executor):
super().__init__()
self.celery_executor = celery_executor
self.kubernetes_executor = kubernetes_executor
|
https://github.com/apache/airflow/issues/13263
|
[2020-12-22 20:38:42,532: INFO/MainProcess] Connected to redis://:**@airflow-redis:6379/0
[2020-12-22 20:38:42,546: INFO/MainProcess] mingle: searching for neighbors
[2020-12-22 20:38:43,545: INFO/MainProcess] mingle: all alone
[2020-12-22 20:38:43,581: INFO/MainProcess] celery@airflow-worker-0 ready.
[2020-12-22 20:38:43,590: INFO/MainProcess] Received task: airflow.executors.celery_executor.execute_command[81f701fd-e379-4ff7-9b20-e6c88123a3cb]
[2020-12-22 20:38:43,596: INFO/MainProcess] Received task: airflow.executors.celery_executor.execute_command[9d6bf5eb-fbde-4b13-a171-27d6e8e1ee43]
[2020-12-22 20:38:43,600: INFO/MainProcess] Received task: airflow.executors.celery_executor.execute_command[736f0f62-34f2-4ae4-92d5-e88ebf771c16]
[2020-12-22 20:38:43,606: INFO/MainProcess] Received task: airflow.executors.celery_executor.execute_command[ce2e8872-aac2-4463-a9e0-1c8dbe607bee]
[2020-12-22 20:38:43,615: INFO/MainProcess] Events of group {task} enabled by remote.
[2020-12-22 20:38:43,726: INFO/ForkPoolWorker-1] Executing command in Celery: ['airflow', 'tasks', 'run', 'my_example_bash_operator', 'runme_0', '2020-12-22T20:38:01.271670+00:00', '--local', '--pool', 'default_pool', '--subdir', '/opt/airflow/dags/test_dag.py']
[2020-12-22 20:38:43,746: INFO/ForkPoolWorker-2] Executing command in Celery: ['airflow', 'tasks', 'run', 'my_example_bash_operator', 'runme_1', '2020-12-22T20:38:01.271670+00:00', '--local', '--pool', 'default_pool', '--subdir', '/opt/airflow/dags/test_dag.py']
[2020-12-22 20:38:43,762: INFO/ForkPoolWorker-7] Executing command in Celery: ['airflow', 'tasks', 'run', 'my_example_bash_operator', 'runme_2', '2020-12-22T20:38:01.271670+00:00', '--local', '--pool', 'default_pool', '--subdir', '/opt/airflow/dags/test_dag.py']
[2020-12-22 20:38:43,769: INFO/ForkPoolWorker-8] Executing command in Celery: ['airflow', 'tasks', 'run', 'my_example_bash_operator', 'also_run_this', '2020-12-22T20:38:01.271670+00:00', '--local', '--pool', 'default_pool', '--subdir', '/opt/airflow/dags/test_dag.py']
[2020-12-22 20:38:44,055: INFO/ForkPoolWorker-8] Filling up the DagBag from /opt/airflow/dags/test_dag.py
[2020-12-22 20:38:44,085: INFO/ForkPoolWorker-2] Filling up the DagBag from /opt/airflow/dags/test_dag.py
[2020-12-22 20:38:44,091: INFO/ForkPoolWorker-1] Filling up the DagBag from /opt/airflow/dags/test_dag.py
[2020-12-22 20:38:44,142: INFO/ForkPoolWorker-7] Filling up the DagBag from /opt/airflow/dags/test_dag.py
[2020-12-22 20:38:44,324: WARNING/ForkPoolWorker-8] Running <TaskInstance: my_example_bash_operator.also_run_this 2020-12-22T20:38:01.271670+00:00 [queued]> on host airflow-worker-0.airflow-worker.airflow.svc.cluster.local
[2020-12-22 20:38:44,406: WARNING/ForkPoolWorker-2] Running <TaskInstance: my_example_bash_operator.runme_1 2020-12-22T20:38:01.271670+00:00 [queued]> on host airflow-worker-0.airflow-worker.airflow.svc.cluster.local
[2020-12-22 20:38:44,438: WARNING/ForkPoolWorker-1] Running <TaskInstance: my_example_bash_operator.runme_0 2020-12-22T20:38:01.271670+00:00 [queued]> on host airflow-worker-0.airflow-worker.airflow.svc.cluster.local
[2020-12-22 20:38:44,495: ERROR/ForkPoolWorker-8] Failed to execute task daemonic processes are not allowed to have children.
[2020-12-22 20:38:44,543: ERROR/ForkPoolWorker-8] Task airflow.executors.celery_executor.execute_command[9d6bf5eb-fbde-4b13-a171-27d6e8e1ee43] raised unexpected: AirflowException('Celery command failed on host: airflow-worker-0.airflow-worker.airflow.svc.cluster.local')
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.7/site-packages/celery/app/trace.py", line 412, in trace_task
R = retval = fun(*args, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/celery/app/trace.py", line 704, in __protected_call__
return self.run(*args, **kwargs)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/executors/celery_executor.py", line 87, in execute_command
_execute_in_fork(command_to_exec)
File "/home/airflow/.local/lib/python3.7/site-packages/airflow/executors/celery_executor.py", line 98, in _execute_in_fork
raise AirflowException('Celery command failed on host: ' + get_hostname())
airflow.exceptions.AirflowException: Celery command failed on host: airflow-worker-0.airflow-worker.airflow.svc.cluster.local
|
airflow.exceptions.AirflowException
|
def _change_state_for_tis_without_dagrun(
self, old_states: List[str], new_state: str, session: Session = None
) -> None:
"""
For all DAG IDs in the DagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
"""
tis_changed = 0
query = (
session.query(models.TaskInstance)
.outerjoin(models.TaskInstance.dag_run)
.filter(models.TaskInstance.dag_id.in_(list(self.dagbag.dag_ids)))
.filter(models.TaskInstance.state.in_(old_states))
.filter(
or_(
# pylint: disable=comparison-with-callable
models.DagRun.state != State.RUNNING,
# pylint: disable=no-member
models.DagRun.state.is_(None),
)
)
)
# We need to do this for mysql as well because it can cause deadlocks
# as discussed in https://issues.apache.org/jira/browse/AIRFLOW-2516
if self.using_sqlite or self.using_mysql:
tis_to_change: List[TI] = with_row_locks(
query, of=TI, session=session, **skip_locked(session=session)
).all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
current_time = timezone.utcnow()
ti_prop_update = {
models.TaskInstance.state: new_state,
models.TaskInstance.start_date: current_time,
}
# Only add end_date and duration if the new_state is 'success', 'failed' or 'skipped'
if new_state in State.finished:
ti_prop_update.update(
{
models.TaskInstance.end_date: current_time,
models.TaskInstance.duration: 0,
}
)
tis_changed = (
session.query(models.TaskInstance)
.filter(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date == subq.c.execution_date,
)
.update(ti_prop_update, synchronize_session=False)
)
if tis_changed > 0:
session.flush()
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed,
new_state,
)
Stats.gauge("scheduler.tasks.without_dagrun", tis_changed)
|
def _change_state_for_tis_without_dagrun(
self, old_states: List[str], new_state: str, session: Session = None
) -> None:
"""
For all DAG IDs in the DagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
"""
tis_changed = 0
query = (
session.query(models.TaskInstance)
.outerjoin(models.TaskInstance.dag_run)
.filter(models.TaskInstance.dag_id.in_(list(self.dagbag.dag_ids)))
.filter(models.TaskInstance.state.in_(old_states))
.filter(
or_(
# pylint: disable=comparison-with-callable
models.DagRun.state != State.RUNNING,
# pylint: disable=no-member
models.DagRun.state.is_(None),
)
)
)
# We need to do this for mysql as well because it can cause deadlocks
# as discussed in https://issues.apache.org/jira/browse/AIRFLOW-2516
if self.using_sqlite or self.using_mysql:
tis_to_change: List[TI] = with_row_locks(
query, of=TI, **skip_locked(session=session)
).all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
current_time = timezone.utcnow()
ti_prop_update = {
models.TaskInstance.state: new_state,
models.TaskInstance.start_date: current_time,
}
# Only add end_date and duration if the new_state is 'success', 'failed' or 'skipped'
if new_state in State.finished:
ti_prop_update.update(
{
models.TaskInstance.end_date: current_time,
models.TaskInstance.duration: 0,
}
)
tis_changed = (
session.query(models.TaskInstance)
.filter(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date == subq.c.execution_date,
)
.update(ti_prop_update, synchronize_session=False)
)
if tis_changed > 0:
session.flush()
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed,
new_state,
)
Stats.gauge("scheduler.tasks.without_dagrun", tis_changed)
|
https://github.com/apache/airflow/issues/11899
|
[2020-10-26 09:03:54,608] {{settings.py:49}} INFO - Configured default timezone Timezone('UTC')
[2020-10-26 09:04:05,467] {{scheduler_job.py:1327}} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
_mysql_exceptions.OperationalError: (1213, 'Deadlock found when trying to get lock; try restarting transaction')
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1308, in _execute
self._run_scheduler_loop()
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1379, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1451, in _do_scheduling
self._create_dag_runs(query.all(), session)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3341, in all
return list(self)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3503, in __iter__
return self._execute_and_instances(context)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3528, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1014, in execute
return meth(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1127, in _execute_clauseelement
ret = self._execute_context(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
self._handle_dbapi_exception(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
util.raise_(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
sqlalchemy.exc.OperationalError: (_mysql_exceptions.OperationalError) (1213, 'Deadlock found when trying to get lock; try restarting transaction')
[SQL: SELECT dag.dag_id AS dag_dag_id, dag.root_dag_id AS dag_root_dag_id, dag.is_paused AS dag_is_paused, dag.is_subdag AS dag_is_subdag, dag.is_active AS dag_is_active, dag.last_scheduler_run AS dag_last_scheduler_run, dag.last_pickled AS dag_last_pickled, dag.last_expired AS dag_last_expired, dag.scheduler_lock AS dag_scheduler_lock, dag.pickle_id AS dag_pickle_id, dag.fileloc AS dag_fileloc, dag.owners AS dag_owners, dag.description AS dag_description, dag.default_view AS dag_default_view, dag.schedule_interval AS dag_schedule_interval, dag.concurrency AS dag_concurrency, dag.has_task_concurrency_limits AS dag_has_task_concurrency_limits, dag.next_dagrun AS dag_next_dagrun, dag.next_dagrun_create_after AS dag_next_dagrun_create_after
FROM dag
WHERE dag.is_paused IS false AND dag.is_active IS true AND dag.next_dagrun_create_after <= now() ORDER BY dag.next_dagrun_create_after
LIMIT %s FOR UPDATE]
[parameters: (10,)]
(Background on this error at: http://sqlalche.me/e/13/e3q8)
[2020-10-26 09:04:06,512] {{process_utils.py:102}} INFO - Sending Signals.SIGTERM to GPID 7437
[2020-10-26 09:04:07,029] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7762, status='terminated', started='09:04:05') (7762) terminated with exit code None
[2020-10-26 09:04:07,122] {{process_utils.py:219}} INFO - Waiting up to 5 seconds for processes to exit...
[2020-10-26 09:04:07,126] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7774, status='terminated', started='09:04:05') (7774) terminated with exit code None
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7437, status='terminated', exitcode=0, started='09:03:54') (7437) terminated with exit code 0
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7775, status='terminated', started='09:04:05') (7775) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7773, status='terminated', started='09:04:05') (7773) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7782, status='terminated', started='09:04:05') (7782) terminated with exit code None
|
_mysql_exceptions.OperationalError
|
def _executable_task_instances_to_queued(
self, max_tis: int, session: Session = None
) -> List[TI]:
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param max_tis: Maximum number of TIs to queue in this loop.
:type max_tis: int
:return: list[airflow.models.TaskInstance]
"""
executable_tis: List[TI] = []
# Get the pool settings. We get a lock on the pool rows, treating this as a "critical section"
# Throws an exception if lock cannot be obtained, rather than blocking
pools = models.Pool.slots_stats(lock_rows=True, session=session)
# If the pools are full, there is no point doing anything!
# If _somehow_ the pool is overfull, don't let the limit go negative - it breaks SQL
pool_slots_free = max(0, sum(pool["open"] for pool in pools.values()))
if pool_slots_free == 0:
self.log.debug("All pools are full!")
return executable_tis
max_tis = min(max_tis, pool_slots_free)
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
query = (
session.query(TI)
.outerjoin(TI.dag_run)
.filter(or_(DR.run_id.is_(None), DR.run_type != DagRunType.BACKFILL_JOB))
.join(TI.dag_model)
.filter(not_(DM.is_paused))
.filter(TI.state == State.SCHEDULED)
.options(selectinload("dag_model"))
.limit(max_tis)
)
task_instances_to_examine: List[TI] = with_row_locks(
query,
of=TI,
session=session,
**skip_locked(session=session),
).all()
# TODO[HA]: This was wrong before anyway, as it only looked at a sub-set of dags, not everything.
# Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join([repr(x) for x in task_instances_to_examine])
self.log.info(
"%s tasks up for execution:\n\t%s",
len(task_instances_to_examine),
task_instance_str,
)
pool_to_task_instances: DefaultDict[str, List[models.Pool]] = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map: DefaultDict[str, int]
task_concurrency_map: DefaultDict[Tuple[str, str], int]
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=list(EXECUTION_STATES), session=session
)
num_tasks_in_executor = 0
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks_total = 0
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
# pylint: disable=too-many-nested-blocks
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled", pool
)
continue
open_slots = pools[pool]["open"]
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool,
open_slots,
num_ready,
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date)
)
num_starving_tasks = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots,
pool,
)
# Can't schedule any more since there are no more open slots.
num_unhandled = len(priority_sorted_task_instances) - current_index
num_starving_tasks += num_unhandled
num_starving_tasks_total += num_unhandled
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = task_instance.dag_model.concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id,
current_dag_concurrency,
dag_concurrency_limit,
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance,
dag_id,
dag_concurrency_limit,
)
continue
task_concurrency_limit: Optional[int] = None
if task_instance.dag_model.has_task_concurrency_limits:
# Many dags don't have a task_concurrency, so where we can avoid loading the full
# serialized DAG the better.
serialized_dag = self.dagbag.get_dag(dag_id, session=session)
if serialized_dag.has_task(task_instance.task_id):
task_concurrency_limit = serialized_dag.get_task(
task_instance.task_id
).task_concurrency
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info(
"Not executing %s since the task concurrency for"
" this task has been reached.",
task_instance,
)
continue
if task_instance.pool_slots > open_slots:
self.log.info(
"Not executing %s since it requires %s slots "
"but there are %s open slots in the pool %s.",
task_instance,
task_instance.pool_slots,
open_slots,
pool,
)
num_starving_tasks += 1
num_starving_tasks_total += 1
# Though we can execute tasks with lower priority if there's enough room
continue
executable_tis.append(task_instance)
open_slots -= task_instance.pool_slots
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge(f"pool.starving_tasks.{pool_name}", num_starving_tasks)
Stats.gauge("scheduler.tasks.starving", num_starving_tasks_total)
Stats.gauge("scheduler.tasks.running", num_tasks_in_executor)
Stats.gauge("scheduler.tasks.executable", len(executable_tis))
task_instance_str = "\n\t".join([repr(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str
)
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(executable_tis)
session.query(TI).filter(filter_for_tis).update(
# TODO[ha]: should we use func.now()? How does that work with DB timezone on mysql when it's not
# UTC?
{
TI.state: State.QUEUED,
TI.queued_dttm: timezone.utcnow(),
TI.queued_by_job_id: self.id,
},
synchronize_session=False,
)
for ti in executable_tis:
make_transient(ti)
return executable_tis
|
def _executable_task_instances_to_queued(
self, max_tis: int, session: Session = None
) -> List[TI]:
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param max_tis: Maximum number of TIs to queue in this loop.
:type max_tis: int
:return: list[airflow.models.TaskInstance]
"""
executable_tis: List[TI] = []
# Get the pool settings. We get a lock on the pool rows, treating this as a "critical section"
# Throws an exception if lock cannot be obtained, rather than blocking
pools = models.Pool.slots_stats(lock_rows=True, session=session)
# If the pools are full, there is no point doing anything!
# If _somehow_ the pool is overfull, don't let the limit go negative - it breaks SQL
pool_slots_free = max(0, sum(pool["open"] for pool in pools.values()))
if pool_slots_free == 0:
self.log.debug("All pools are full!")
return executable_tis
max_tis = min(max_tis, pool_slots_free)
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
query = (
session.query(TI)
.outerjoin(TI.dag_run)
.filter(or_(DR.run_id.is_(None), DR.run_type != DagRunType.BACKFILL_JOB))
.join(TI.dag_model)
.filter(not_(DM.is_paused))
.filter(TI.state == State.SCHEDULED)
.options(selectinload("dag_model"))
.limit(max_tis)
)
task_instances_to_examine: List[TI] = with_row_locks(
query,
of=TI,
**skip_locked(session=session),
).all()
# TODO[HA]: This was wrong before anyway, as it only looked at a sub-set of dags, not everything.
# Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join([repr(x) for x in task_instances_to_examine])
self.log.info(
"%s tasks up for execution:\n\t%s",
len(task_instances_to_examine),
task_instance_str,
)
pool_to_task_instances: DefaultDict[str, List[models.Pool]] = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map: DefaultDict[str, int]
task_concurrency_map: DefaultDict[Tuple[str, str], int]
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=list(EXECUTION_STATES), session=session
)
num_tasks_in_executor = 0
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks_total = 0
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
# pylint: disable=too-many-nested-blocks
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled", pool
)
continue
open_slots = pools[pool]["open"]
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool,
open_slots,
num_ready,
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date)
)
num_starving_tasks = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots,
pool,
)
# Can't schedule any more since there are no more open slots.
num_unhandled = len(priority_sorted_task_instances) - current_index
num_starving_tasks += num_unhandled
num_starving_tasks_total += num_unhandled
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = task_instance.dag_model.concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id,
current_dag_concurrency,
dag_concurrency_limit,
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance,
dag_id,
dag_concurrency_limit,
)
continue
task_concurrency_limit: Optional[int] = None
if task_instance.dag_model.has_task_concurrency_limits:
# Many dags don't have a task_concurrency, so where we can avoid loading the full
# serialized DAG the better.
serialized_dag = self.dagbag.get_dag(dag_id, session=session)
if serialized_dag.has_task(task_instance.task_id):
task_concurrency_limit = serialized_dag.get_task(
task_instance.task_id
).task_concurrency
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info(
"Not executing %s since the task concurrency for"
" this task has been reached.",
task_instance,
)
continue
if task_instance.pool_slots > open_slots:
self.log.info(
"Not executing %s since it requires %s slots "
"but there are %s open slots in the pool %s.",
task_instance,
task_instance.pool_slots,
open_slots,
pool,
)
num_starving_tasks += 1
num_starving_tasks_total += 1
# Though we can execute tasks with lower priority if there's enough room
continue
executable_tis.append(task_instance)
open_slots -= task_instance.pool_slots
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge(f"pool.starving_tasks.{pool_name}", num_starving_tasks)
Stats.gauge("scheduler.tasks.starving", num_starving_tasks_total)
Stats.gauge("scheduler.tasks.running", num_tasks_in_executor)
Stats.gauge("scheduler.tasks.executable", len(executable_tis))
task_instance_str = "\n\t".join([repr(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str
)
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(executable_tis)
session.query(TI).filter(filter_for_tis).update(
# TODO[ha]: should we use func.now()? How does that work with DB timezone on mysql when it's not
# UTC?
{
TI.state: State.QUEUED,
TI.queued_dttm: timezone.utcnow(),
TI.queued_by_job_id: self.id,
},
synchronize_session=False,
)
for ti in executable_tis:
make_transient(ti)
return executable_tis
|
https://github.com/apache/airflow/issues/11899
|
[2020-10-26 09:03:54,608] {{settings.py:49}} INFO - Configured default timezone Timezone('UTC')
[2020-10-26 09:04:05,467] {{scheduler_job.py:1327}} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
_mysql_exceptions.OperationalError: (1213, 'Deadlock found when trying to get lock; try restarting transaction')
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1308, in _execute
self._run_scheduler_loop()
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1379, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1451, in _do_scheduling
self._create_dag_runs(query.all(), session)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3341, in all
return list(self)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3503, in __iter__
return self._execute_and_instances(context)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3528, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1014, in execute
return meth(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1127, in _execute_clauseelement
ret = self._execute_context(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
self._handle_dbapi_exception(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
util.raise_(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
sqlalchemy.exc.OperationalError: (_mysql_exceptions.OperationalError) (1213, 'Deadlock found when trying to get lock; try restarting transaction')
[SQL: SELECT dag.dag_id AS dag_dag_id, dag.root_dag_id AS dag_root_dag_id, dag.is_paused AS dag_is_paused, dag.is_subdag AS dag_is_subdag, dag.is_active AS dag_is_active, dag.last_scheduler_run AS dag_last_scheduler_run, dag.last_pickled AS dag_last_pickled, dag.last_expired AS dag_last_expired, dag.scheduler_lock AS dag_scheduler_lock, dag.pickle_id AS dag_pickle_id, dag.fileloc AS dag_fileloc, dag.owners AS dag_owners, dag.description AS dag_description, dag.default_view AS dag_default_view, dag.schedule_interval AS dag_schedule_interval, dag.concurrency AS dag_concurrency, dag.has_task_concurrency_limits AS dag_has_task_concurrency_limits, dag.next_dagrun AS dag_next_dagrun, dag.next_dagrun_create_after AS dag_next_dagrun_create_after
FROM dag
WHERE dag.is_paused IS false AND dag.is_active IS true AND dag.next_dagrun_create_after <= now() ORDER BY dag.next_dagrun_create_after
LIMIT %s FOR UPDATE]
[parameters: (10,)]
(Background on this error at: http://sqlalche.me/e/13/e3q8)
[2020-10-26 09:04:06,512] {{process_utils.py:102}} INFO - Sending Signals.SIGTERM to GPID 7437
[2020-10-26 09:04:07,029] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7762, status='terminated', started='09:04:05') (7762) terminated with exit code None
[2020-10-26 09:04:07,122] {{process_utils.py:219}} INFO - Waiting up to 5 seconds for processes to exit...
[2020-10-26 09:04:07,126] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7774, status='terminated', started='09:04:05') (7774) terminated with exit code None
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7437, status='terminated', exitcode=0, started='09:03:54') (7437) terminated with exit code 0
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7775, status='terminated', started='09:04:05') (7775) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7773, status='terminated', started='09:04:05') (7773) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7782, status='terminated', started='09:04:05') (7782) terminated with exit code None
|
_mysql_exceptions.OperationalError
|
def _change_state_for_tasks_failed_to_execute(self, session: Session = None):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if not self.executor.queued_tasks:
return
filter_for_ti_state_change = [
and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1, # pylint: disable=protected-access
TI.state == State.QUEUED,
)
for dag_id, task_id, execution_date, try_number in self.executor.queued_tasks.keys()
]
ti_query = session.query(TI).filter(or_(*filter_for_ti_state_change))
tis_to_set_to_scheduled: List[TI] = with_row_locks(ti_query, session=session).all()
if not tis_to_set_to_scheduled:
return
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(tis_to_set_to_scheduled)
session.query(TI).filter(filter_for_tis).update(
{TI.state: State.SCHEDULED, TI.queued_dttm: None}, synchronize_session=False
)
for task_instance in tis_to_set_to_scheduled:
self.executor.queued_tasks.pop(task_instance.key)
task_instance_str = "\n\t".join(repr(x) for x in tis_to_set_to_scheduled)
self.log.info(
"Set the following tasks to scheduled state:\n\t%s", task_instance_str
)
|
def _change_state_for_tasks_failed_to_execute(self, session: Session = None):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if not self.executor.queued_tasks:
return
filter_for_ti_state_change = [
and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1, # pylint: disable=protected-access
TI.state == State.QUEUED,
)
for dag_id, task_id, execution_date, try_number in self.executor.queued_tasks.keys()
]
ti_query = session.query(TI).filter(or_(*filter_for_ti_state_change))
tis_to_set_to_scheduled: List[TI] = with_row_locks(ti_query).all()
if not tis_to_set_to_scheduled:
return
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(tis_to_set_to_scheduled)
session.query(TI).filter(filter_for_tis).update(
{TI.state: State.SCHEDULED, TI.queued_dttm: None}, synchronize_session=False
)
for task_instance in tis_to_set_to_scheduled:
self.executor.queued_tasks.pop(task_instance.key)
task_instance_str = "\n\t".join(repr(x) for x in tis_to_set_to_scheduled)
self.log.info(
"Set the following tasks to scheduled state:\n\t%s", task_instance_str
)
|
https://github.com/apache/airflow/issues/11899
|
[2020-10-26 09:03:54,608] {{settings.py:49}} INFO - Configured default timezone Timezone('UTC')
[2020-10-26 09:04:05,467] {{scheduler_job.py:1327}} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
_mysql_exceptions.OperationalError: (1213, 'Deadlock found when trying to get lock; try restarting transaction')
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1308, in _execute
self._run_scheduler_loop()
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1379, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1451, in _do_scheduling
self._create_dag_runs(query.all(), session)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3341, in all
return list(self)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3503, in __iter__
return self._execute_and_instances(context)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3528, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1014, in execute
return meth(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1127, in _execute_clauseelement
ret = self._execute_context(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
self._handle_dbapi_exception(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
util.raise_(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
sqlalchemy.exc.OperationalError: (_mysql_exceptions.OperationalError) (1213, 'Deadlock found when trying to get lock; try restarting transaction')
[SQL: SELECT dag.dag_id AS dag_dag_id, dag.root_dag_id AS dag_root_dag_id, dag.is_paused AS dag_is_paused, dag.is_subdag AS dag_is_subdag, dag.is_active AS dag_is_active, dag.last_scheduler_run AS dag_last_scheduler_run, dag.last_pickled AS dag_last_pickled, dag.last_expired AS dag_last_expired, dag.scheduler_lock AS dag_scheduler_lock, dag.pickle_id AS dag_pickle_id, dag.fileloc AS dag_fileloc, dag.owners AS dag_owners, dag.description AS dag_description, dag.default_view AS dag_default_view, dag.schedule_interval AS dag_schedule_interval, dag.concurrency AS dag_concurrency, dag.has_task_concurrency_limits AS dag_has_task_concurrency_limits, dag.next_dagrun AS dag_next_dagrun, dag.next_dagrun_create_after AS dag_next_dagrun_create_after
FROM dag
WHERE dag.is_paused IS false AND dag.is_active IS true AND dag.next_dagrun_create_after <= now() ORDER BY dag.next_dagrun_create_after
LIMIT %s FOR UPDATE]
[parameters: (10,)]
(Background on this error at: http://sqlalche.me/e/13/e3q8)
[2020-10-26 09:04:06,512] {{process_utils.py:102}} INFO - Sending Signals.SIGTERM to GPID 7437
[2020-10-26 09:04:07,029] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7762, status='terminated', started='09:04:05') (7762) terminated with exit code None
[2020-10-26 09:04:07,122] {{process_utils.py:219}} INFO - Waiting up to 5 seconds for processes to exit...
[2020-10-26 09:04:07,126] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7774, status='terminated', started='09:04:05') (7774) terminated with exit code None
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7437, status='terminated', exitcode=0, started='09:03:54') (7437) terminated with exit code 0
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7775, status='terminated', started='09:04:05') (7775) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7773, status='terminated', started='09:04:05') (7773) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7782, status='terminated', started='09:04:05') (7782) terminated with exit code None
|
_mysql_exceptions.OperationalError
|
def adopt_or_reset_orphaned_tasks(self, session: Session = None):
"""
Reset any TaskInstance still in QUEUED or SCHEDULED states that were
enqueued by a SchedulerJob that is no longer running.
:return: the number of TIs reset
:rtype: int
"""
self.log.info("Resetting orphaned tasks for active dag runs")
timeout = conf.getint("scheduler", "scheduler_health_check_threshold")
num_failed = (
session.query(SchedulerJob)
.filter(
SchedulerJob.state == State.RUNNING,
SchedulerJob.latest_heartbeat
< (timezone.utcnow() - timedelta(seconds=timeout)),
)
.update({"state": State.FAILED})
)
if num_failed:
self.log.info("Marked %d SchedulerJob instances as failed", num_failed)
Stats.incr(self.__class__.__name__.lower() + "_end", num_failed)
resettable_states = [State.SCHEDULED, State.QUEUED, State.RUNNING]
query = (
session.query(TI)
.filter(TI.state.in_(resettable_states))
# outerjoin is because we didn't use to have queued_by_job
# set, so we need to pick up anything pre upgrade. This (and the
# "or queued_by_job_id IS NONE") can go as soon as scheduler HA is
# released.
.outerjoin(TI.queued_by_job)
.filter(or_(TI.queued_by_job_id.is_(None), SchedulerJob.state != State.RUNNING))
.join(TI.dag_run)
.filter(
DagRun.run_type != DagRunType.BACKFILL_JOB,
# pylint: disable=comparison-with-callable
DagRun.state == State.RUNNING,
)
.options(load_only(TI.dag_id, TI.task_id, TI.execution_date))
)
# Lock these rows, so that another scheduler can't try and adopt these too
tis_to_reset_or_adopt = with_row_locks(
query, of=TI, session=session, **skip_locked(session=session)
).all()
to_reset = self.executor.try_adopt_task_instances(tis_to_reset_or_adopt)
reset_tis_message = []
for ti in to_reset:
reset_tis_message.append(repr(ti))
ti.state = State.NONE
ti.queued_by_job_id = None
for ti in set(tis_to_reset_or_adopt) - set(to_reset):
ti.queued_by_job_id = self.id
Stats.incr("scheduler.orphaned_tasks.cleared", len(to_reset))
Stats.incr(
"scheduler.orphaned_tasks.adopted", len(tis_to_reset_or_adopt) - len(to_reset)
)
if to_reset:
task_instance_str = "\n\t".join(reset_tis_message)
self.log.info(
"Reset the following %s orphaned TaskInstances:\n\t%s",
len(to_reset),
task_instance_str,
)
# Issue SQL/finish "Unit of Work", but let @provide_session commit (or if passed a session, let caller
# decide when to commit
session.flush()
return len(to_reset)
|
def adopt_or_reset_orphaned_tasks(self, session: Session = None):
"""
Reset any TaskInstance still in QUEUED or SCHEDULED states that were
enqueued by a SchedulerJob that is no longer running.
:return: the number of TIs reset
:rtype: int
"""
self.log.info("Resetting orphaned tasks for active dag runs")
timeout = conf.getint("scheduler", "scheduler_health_check_threshold")
num_failed = (
session.query(SchedulerJob)
.filter(
SchedulerJob.state == State.RUNNING,
SchedulerJob.latest_heartbeat
< (timezone.utcnow() - timedelta(seconds=timeout)),
)
.update({"state": State.FAILED})
)
if num_failed:
self.log.info("Marked %d SchedulerJob instances as failed", num_failed)
Stats.incr(self.__class__.__name__.lower() + "_end", num_failed)
resettable_states = [State.SCHEDULED, State.QUEUED, State.RUNNING]
query = (
session.query(TI)
.filter(TI.state.in_(resettable_states))
# outerjoin is because we didn't use to have queued_by_job
# set, so we need to pick up anything pre upgrade. This (and the
# "or queued_by_job_id IS NONE") can go as soon as scheduler HA is
# released.
.outerjoin(TI.queued_by_job)
.filter(or_(TI.queued_by_job_id.is_(None), SchedulerJob.state != State.RUNNING))
.join(TI.dag_run)
.filter(
DagRun.run_type != DagRunType.BACKFILL_JOB,
# pylint: disable=comparison-with-callable
DagRun.state == State.RUNNING,
)
.options(load_only(TI.dag_id, TI.task_id, TI.execution_date))
)
# Lock these rows, so that another scheduler can't try and adopt these too
tis_to_reset_or_adopt = with_row_locks(
query, of=TI, **skip_locked(session=session)
).all()
to_reset = self.executor.try_adopt_task_instances(tis_to_reset_or_adopt)
reset_tis_message = []
for ti in to_reset:
reset_tis_message.append(repr(ti))
ti.state = State.NONE
ti.queued_by_job_id = None
for ti in set(tis_to_reset_or_adopt) - set(to_reset):
ti.queued_by_job_id = self.id
Stats.incr("scheduler.orphaned_tasks.cleared", len(to_reset))
Stats.incr(
"scheduler.orphaned_tasks.adopted", len(tis_to_reset_or_adopt) - len(to_reset)
)
if to_reset:
task_instance_str = "\n\t".join(reset_tis_message)
self.log.info(
"Reset the following %s orphaned TaskInstances:\n\t%s",
len(to_reset),
task_instance_str,
)
# Issue SQL/finish "Unit of Work", but let @provide_session commit (or if passed a session, let caller
# decide when to commit
session.flush()
return len(to_reset)
|
https://github.com/apache/airflow/issues/11899
|
[2020-10-26 09:03:54,608] {{settings.py:49}} INFO - Configured default timezone Timezone('UTC')
[2020-10-26 09:04:05,467] {{scheduler_job.py:1327}} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
_mysql_exceptions.OperationalError: (1213, 'Deadlock found when trying to get lock; try restarting transaction')
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1308, in _execute
self._run_scheduler_loop()
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1379, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1451, in _do_scheduling
self._create_dag_runs(query.all(), session)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3341, in all
return list(self)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3503, in __iter__
return self._execute_and_instances(context)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3528, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1014, in execute
return meth(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1127, in _execute_clauseelement
ret = self._execute_context(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
self._handle_dbapi_exception(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
util.raise_(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
sqlalchemy.exc.OperationalError: (_mysql_exceptions.OperationalError) (1213, 'Deadlock found when trying to get lock; try restarting transaction')
[SQL: SELECT dag.dag_id AS dag_dag_id, dag.root_dag_id AS dag_root_dag_id, dag.is_paused AS dag_is_paused, dag.is_subdag AS dag_is_subdag, dag.is_active AS dag_is_active, dag.last_scheduler_run AS dag_last_scheduler_run, dag.last_pickled AS dag_last_pickled, dag.last_expired AS dag_last_expired, dag.scheduler_lock AS dag_scheduler_lock, dag.pickle_id AS dag_pickle_id, dag.fileloc AS dag_fileloc, dag.owners AS dag_owners, dag.description AS dag_description, dag.default_view AS dag_default_view, dag.schedule_interval AS dag_schedule_interval, dag.concurrency AS dag_concurrency, dag.has_task_concurrency_limits AS dag_has_task_concurrency_limits, dag.next_dagrun AS dag_next_dagrun, dag.next_dagrun_create_after AS dag_next_dagrun_create_after
FROM dag
WHERE dag.is_paused IS false AND dag.is_active IS true AND dag.next_dagrun_create_after <= now() ORDER BY dag.next_dagrun_create_after
LIMIT %s FOR UPDATE]
[parameters: (10,)]
(Background on this error at: http://sqlalche.me/e/13/e3q8)
[2020-10-26 09:04:06,512] {{process_utils.py:102}} INFO - Sending Signals.SIGTERM to GPID 7437
[2020-10-26 09:04:07,029] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7762, status='terminated', started='09:04:05') (7762) terminated with exit code None
[2020-10-26 09:04:07,122] {{process_utils.py:219}} INFO - Waiting up to 5 seconds for processes to exit...
[2020-10-26 09:04:07,126] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7774, status='terminated', started='09:04:05') (7774) terminated with exit code None
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7437, status='terminated', exitcode=0, started='09:03:54') (7437) terminated with exit code 0
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7775, status='terminated', started='09:04:05') (7775) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7773, status='terminated', started='09:04:05') (7773) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7782, status='terminated', started='09:04:05') (7782) terminated with exit code None
|
_mysql_exceptions.OperationalError
|
def bulk_write_to_db(cls, dags: Collection["DAG"], session=None):
"""
Ensure the DagModel rows for the given dags are up-to-date in the dag table in the DB, including
calculated fields.
Note that this method can be called for both DAGs and SubDAGs. A SubDag is actually a SubDagOperator.
:param dags: the DAG objects to save to the DB
:type dags: List[airflow.models.dag.DAG]
:return: None
"""
if not dags:
return
log.info("Sync %s DAGs", len(dags))
dag_by_ids = {dag.dag_id: dag for dag in dags}
dag_ids = set(dag_by_ids.keys())
query = (
session.query(DagModel)
.options(joinedload(DagModel.tags, innerjoin=False))
.filter(DagModel.dag_id.in_(dag_ids))
)
orm_dags = with_row_locks(query, of=DagModel, session=session).all()
existing_dag_ids = {orm_dag.dag_id for orm_dag in orm_dags}
missing_dag_ids = dag_ids.difference(existing_dag_ids)
for missing_dag_id in missing_dag_ids:
orm_dag = DagModel(dag_id=missing_dag_id)
dag = dag_by_ids[missing_dag_id]
if dag.is_paused_upon_creation is not None:
orm_dag.is_paused = dag.is_paused_upon_creation
orm_dag.tags = []
log.info("Creating ORM DAG for %s", dag.dag_id)
session.add(orm_dag)
orm_dags.append(orm_dag)
# Get the latest dag run for each existing dag as a single query (avoid n+1 query)
most_recent_dag_runs = dict(
session.query(DagRun.dag_id, func.max_(DagRun.execution_date))
.filter(
DagRun.dag_id.in_(existing_dag_ids),
or_(
DagRun.run_type == DagRunType.BACKFILL_JOB,
DagRun.run_type == DagRunType.SCHEDULED,
),
)
.group_by(DagRun.dag_id)
.all()
)
# Get number of active dagruns for all dags we are processing as a single query.
num_active_runs = dict(
session.query(DagRun.dag_id, func.count("*"))
.filter(
DagRun.dag_id.in_(existing_dag_ids),
DagRun.state == State.RUNNING, # pylint: disable=comparison-with-callable
DagRun.external_trigger.is_(False),
)
.group_by(DagRun.dag_id)
.all()
)
for orm_dag in sorted(orm_dags, key=lambda d: d.dag_id):
dag = dag_by_ids[orm_dag.dag_id]
if dag.is_subdag:
orm_dag.is_subdag = True
orm_dag.fileloc = dag.parent_dag.fileloc # type: ignore
orm_dag.root_dag_id = dag.parent_dag.dag_id # type: ignore
orm_dag.owners = dag.parent_dag.owner # type: ignore
else:
orm_dag.is_subdag = False
orm_dag.fileloc = dag.fileloc
orm_dag.owners = dag.owner
orm_dag.is_active = True
orm_dag.default_view = dag.default_view
orm_dag.description = dag.description
orm_dag.schedule_interval = dag.schedule_interval
orm_dag.concurrency = dag.concurrency
orm_dag.has_task_concurrency_limits = any(
t.task_concurrency is not None for t in dag.tasks
)
orm_dag.calculate_dagrun_date_fields(
dag,
most_recent_dag_runs.get(dag.dag_id),
num_active_runs.get(dag.dag_id, 0),
)
for orm_tag in list(orm_dag.tags):
if orm_tag.name not in orm_dag.tags:
session.delete(orm_tag)
orm_dag.tags.remove(orm_tag)
if dag.tags:
orm_tag_names = [t.name for t in orm_dag.tags]
for dag_tag in list(dag.tags):
if dag_tag not in orm_tag_names:
dag_tag_orm = DagTag(name=dag_tag, dag_id=dag.dag_id)
orm_dag.tags.append(dag_tag_orm)
session.add(dag_tag_orm)
if settings.STORE_DAG_CODE:
DagCode.bulk_sync_to_db([dag.fileloc for dag in orm_dags])
# Issue SQL/finish "Unit of Work", but let @provide_session commit (or if passed a session, let caller
# decide when to commit
session.flush()
for dag in dags:
cls.bulk_write_to_db(dag.subdags, session=session)
|
def bulk_write_to_db(cls, dags: Collection["DAG"], session=None):
"""
Ensure the DagModel rows for the given dags are up-to-date in the dag table in the DB, including
calculated fields.
Note that this method can be called for both DAGs and SubDAGs. A SubDag is actually a SubDagOperator.
:param dags: the DAG objects to save to the DB
:type dags: List[airflow.models.dag.DAG]
:return: None
"""
if not dags:
return
log.info("Sync %s DAGs", len(dags))
dag_by_ids = {dag.dag_id: dag for dag in dags}
dag_ids = set(dag_by_ids.keys())
query = (
session.query(DagModel)
.options(joinedload(DagModel.tags, innerjoin=False))
.filter(DagModel.dag_id.in_(dag_ids))
)
orm_dags = with_row_locks(query, of=DagModel).all()
existing_dag_ids = {orm_dag.dag_id for orm_dag in orm_dags}
missing_dag_ids = dag_ids.difference(existing_dag_ids)
for missing_dag_id in missing_dag_ids:
orm_dag = DagModel(dag_id=missing_dag_id)
dag = dag_by_ids[missing_dag_id]
if dag.is_paused_upon_creation is not None:
orm_dag.is_paused = dag.is_paused_upon_creation
orm_dag.tags = []
log.info("Creating ORM DAG for %s", dag.dag_id)
session.add(orm_dag)
orm_dags.append(orm_dag)
# Get the latest dag run for each existing dag as a single query (avoid n+1 query)
most_recent_dag_runs = dict(
session.query(DagRun.dag_id, func.max_(DagRun.execution_date))
.filter(
DagRun.dag_id.in_(existing_dag_ids),
or_(
DagRun.run_type == DagRunType.BACKFILL_JOB,
DagRun.run_type == DagRunType.SCHEDULED,
),
)
.group_by(DagRun.dag_id)
.all()
)
# Get number of active dagruns for all dags we are processing as a single query.
num_active_runs = dict(
session.query(DagRun.dag_id, func.count("*"))
.filter(
DagRun.dag_id.in_(existing_dag_ids),
DagRun.state == State.RUNNING, # pylint: disable=comparison-with-callable
DagRun.external_trigger.is_(False),
)
.group_by(DagRun.dag_id)
.all()
)
for orm_dag in sorted(orm_dags, key=lambda d: d.dag_id):
dag = dag_by_ids[orm_dag.dag_id]
if dag.is_subdag:
orm_dag.is_subdag = True
orm_dag.fileloc = dag.parent_dag.fileloc # type: ignore
orm_dag.root_dag_id = dag.parent_dag.dag_id # type: ignore
orm_dag.owners = dag.parent_dag.owner # type: ignore
else:
orm_dag.is_subdag = False
orm_dag.fileloc = dag.fileloc
orm_dag.owners = dag.owner
orm_dag.is_active = True
orm_dag.default_view = dag.default_view
orm_dag.description = dag.description
orm_dag.schedule_interval = dag.schedule_interval
orm_dag.concurrency = dag.concurrency
orm_dag.has_task_concurrency_limits = any(
t.task_concurrency is not None for t in dag.tasks
)
orm_dag.calculate_dagrun_date_fields(
dag,
most_recent_dag_runs.get(dag.dag_id),
num_active_runs.get(dag.dag_id, 0),
)
for orm_tag in list(orm_dag.tags):
if orm_tag.name not in orm_dag.tags:
session.delete(orm_tag)
orm_dag.tags.remove(orm_tag)
if dag.tags:
orm_tag_names = [t.name for t in orm_dag.tags]
for dag_tag in list(dag.tags):
if dag_tag not in orm_tag_names:
dag_tag_orm = DagTag(name=dag_tag, dag_id=dag.dag_id)
orm_dag.tags.append(dag_tag_orm)
session.add(dag_tag_orm)
if settings.STORE_DAG_CODE:
DagCode.bulk_sync_to_db([dag.fileloc for dag in orm_dags])
# Issue SQL/finish "Unit of Work", but let @provide_session commit (or if passed a session, let caller
# decide when to commit
session.flush()
for dag in dags:
cls.bulk_write_to_db(dag.subdags, session=session)
|
https://github.com/apache/airflow/issues/11899
|
[2020-10-26 09:03:54,608] {{settings.py:49}} INFO - Configured default timezone Timezone('UTC')
[2020-10-26 09:04:05,467] {{scheduler_job.py:1327}} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
_mysql_exceptions.OperationalError: (1213, 'Deadlock found when trying to get lock; try restarting transaction')
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1308, in _execute
self._run_scheduler_loop()
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1379, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1451, in _do_scheduling
self._create_dag_runs(query.all(), session)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3341, in all
return list(self)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3503, in __iter__
return self._execute_and_instances(context)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3528, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1014, in execute
return meth(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1127, in _execute_clauseelement
ret = self._execute_context(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
self._handle_dbapi_exception(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
util.raise_(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
sqlalchemy.exc.OperationalError: (_mysql_exceptions.OperationalError) (1213, 'Deadlock found when trying to get lock; try restarting transaction')
[SQL: SELECT dag.dag_id AS dag_dag_id, dag.root_dag_id AS dag_root_dag_id, dag.is_paused AS dag_is_paused, dag.is_subdag AS dag_is_subdag, dag.is_active AS dag_is_active, dag.last_scheduler_run AS dag_last_scheduler_run, dag.last_pickled AS dag_last_pickled, dag.last_expired AS dag_last_expired, dag.scheduler_lock AS dag_scheduler_lock, dag.pickle_id AS dag_pickle_id, dag.fileloc AS dag_fileloc, dag.owners AS dag_owners, dag.description AS dag_description, dag.default_view AS dag_default_view, dag.schedule_interval AS dag_schedule_interval, dag.concurrency AS dag_concurrency, dag.has_task_concurrency_limits AS dag_has_task_concurrency_limits, dag.next_dagrun AS dag_next_dagrun, dag.next_dagrun_create_after AS dag_next_dagrun_create_after
FROM dag
WHERE dag.is_paused IS false AND dag.is_active IS true AND dag.next_dagrun_create_after <= now() ORDER BY dag.next_dagrun_create_after
LIMIT %s FOR UPDATE]
[parameters: (10,)]
(Background on this error at: http://sqlalche.me/e/13/e3q8)
[2020-10-26 09:04:06,512] {{process_utils.py:102}} INFO - Sending Signals.SIGTERM to GPID 7437
[2020-10-26 09:04:07,029] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7762, status='terminated', started='09:04:05') (7762) terminated with exit code None
[2020-10-26 09:04:07,122] {{process_utils.py:219}} INFO - Waiting up to 5 seconds for processes to exit...
[2020-10-26 09:04:07,126] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7774, status='terminated', started='09:04:05') (7774) terminated with exit code None
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7437, status='terminated', exitcode=0, started='09:03:54') (7437) terminated with exit code 0
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7775, status='terminated', started='09:04:05') (7775) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7773, status='terminated', started='09:04:05') (7773) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7782, status='terminated', started='09:04:05') (7782) terminated with exit code None
|
_mysql_exceptions.OperationalError
|
def dags_needing_dagruns(cls, session: Session):
"""
Return (and lock) a list of Dag objects that are due to create a new DagRun.
This will return a resultset of rows that is row-level-locked with a "SELECT ... FOR UPDATE" query,
you should ensure that any scheduling decisions are made in a single transaction -- as soon as the
transaction is committed it will be unlocked.
"""
# TODO[HA]: Bake this query, it is run _A lot_
# We limit so that _one_ scheduler doesn't try to do all the creation
# of dag runs
query = (
session.query(cls)
.filter(
cls.is_paused.is_(False),
cls.is_active.is_(True),
cls.next_dagrun_create_after <= func.now(),
)
.order_by(cls.next_dagrun_create_after)
.limit(cls.NUM_DAGS_PER_DAGRUN_QUERY)
)
return with_row_locks(
query, of=cls, session=session, **skip_locked(session=session)
)
|
def dags_needing_dagruns(cls, session: Session):
"""
Return (and lock) a list of Dag objects that are due to create a new DagRun.
This will return a resultset of rows that is row-level-locked with a "SELECT ... FOR UPDATE" query,
you should ensure that any scheduling decisions are made in a single transaction -- as soon as the
transaction is committed it will be unlocked.
"""
# TODO[HA]: Bake this query, it is run _A lot_
# We limit so that _one_ scheduler doesn't try to do all the creation
# of dag runs
query = (
session.query(cls)
.filter(
cls.is_paused.is_(False),
cls.is_active.is_(True),
cls.next_dagrun_create_after <= func.now(),
)
.order_by(cls.next_dagrun_create_after)
.limit(cls.NUM_DAGS_PER_DAGRUN_QUERY)
)
return with_row_locks(query, of=cls, **skip_locked(session=session))
|
https://github.com/apache/airflow/issues/11899
|
[2020-10-26 09:03:54,608] {{settings.py:49}} INFO - Configured default timezone Timezone('UTC')
[2020-10-26 09:04:05,467] {{scheduler_job.py:1327}} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
_mysql_exceptions.OperationalError: (1213, 'Deadlock found when trying to get lock; try restarting transaction')
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1308, in _execute
self._run_scheduler_loop()
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1379, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1451, in _do_scheduling
self._create_dag_runs(query.all(), session)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3341, in all
return list(self)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3503, in __iter__
return self._execute_and_instances(context)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3528, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1014, in execute
return meth(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1127, in _execute_clauseelement
ret = self._execute_context(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
self._handle_dbapi_exception(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
util.raise_(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
sqlalchemy.exc.OperationalError: (_mysql_exceptions.OperationalError) (1213, 'Deadlock found when trying to get lock; try restarting transaction')
[SQL: SELECT dag.dag_id AS dag_dag_id, dag.root_dag_id AS dag_root_dag_id, dag.is_paused AS dag_is_paused, dag.is_subdag AS dag_is_subdag, dag.is_active AS dag_is_active, dag.last_scheduler_run AS dag_last_scheduler_run, dag.last_pickled AS dag_last_pickled, dag.last_expired AS dag_last_expired, dag.scheduler_lock AS dag_scheduler_lock, dag.pickle_id AS dag_pickle_id, dag.fileloc AS dag_fileloc, dag.owners AS dag_owners, dag.description AS dag_description, dag.default_view AS dag_default_view, dag.schedule_interval AS dag_schedule_interval, dag.concurrency AS dag_concurrency, dag.has_task_concurrency_limits AS dag_has_task_concurrency_limits, dag.next_dagrun AS dag_next_dagrun, dag.next_dagrun_create_after AS dag_next_dagrun_create_after
FROM dag
WHERE dag.is_paused IS false AND dag.is_active IS true AND dag.next_dagrun_create_after <= now() ORDER BY dag.next_dagrun_create_after
LIMIT %s FOR UPDATE]
[parameters: (10,)]
(Background on this error at: http://sqlalche.me/e/13/e3q8)
[2020-10-26 09:04:06,512] {{process_utils.py:102}} INFO - Sending Signals.SIGTERM to GPID 7437
[2020-10-26 09:04:07,029] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7762, status='terminated', started='09:04:05') (7762) terminated with exit code None
[2020-10-26 09:04:07,122] {{process_utils.py:219}} INFO - Waiting up to 5 seconds for processes to exit...
[2020-10-26 09:04:07,126] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7774, status='terminated', started='09:04:05') (7774) terminated with exit code None
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7437, status='terminated', exitcode=0, started='09:03:54') (7437) terminated with exit code 0
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7775, status='terminated', started='09:04:05') (7775) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7773, status='terminated', started='09:04:05') (7773) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7782, status='terminated', started='09:04:05') (7782) terminated with exit code None
|
_mysql_exceptions.OperationalError
|
def next_dagruns_to_examine(
cls,
session: Session,
max_number: Optional[int] = None,
):
"""
Return the next DagRuns that the scheduler should attempt to schedule.
This will return zero or more DagRun rows that are row-level-locked with a "SELECT ... FOR UPDATE"
query, you should ensure that any scheduling decisions are made in a single transaction -- as soon as
the transaction is committed it will be unlocked.
:rtype: list[airflow.models.DagRun]
"""
from airflow.models.dag import DagModel
if max_number is None:
max_number = cls.DEFAULT_DAGRUNS_TO_EXAMINE
# TODO: Bake this query, it is run _A lot_
query = (
session.query(cls)
.filter(cls.state == State.RUNNING, cls.run_type != DagRunType.BACKFILL_JOB)
.join(
DagModel,
DagModel.dag_id == cls.dag_id,
)
.filter(
DagModel.is_paused.is_(False),
DagModel.is_active.is_(True),
)
.order_by(
nulls_first(cls.last_scheduling_decision, session=session),
cls.execution_date,
)
)
if not settings.ALLOW_FUTURE_EXEC_DATES:
query = query.filter(DagRun.execution_date <= func.now())
return with_row_locks(
query.limit(max_number), of=cls, session=session, **skip_locked(session=session)
)
|
def next_dagruns_to_examine(
cls,
session: Session,
max_number: Optional[int] = None,
):
"""
Return the next DagRuns that the scheduler should attempt to schedule.
This will return zero or more DagRun rows that are row-level-locked with a "SELECT ... FOR UPDATE"
query, you should ensure that any scheduling decisions are made in a single transaction -- as soon as
the transaction is committed it will be unlocked.
:rtype: list[airflow.models.DagRun]
"""
from airflow.models.dag import DagModel
if max_number is None:
max_number = cls.DEFAULT_DAGRUNS_TO_EXAMINE
# TODO: Bake this query, it is run _A lot_
query = (
session.query(cls)
.filter(cls.state == State.RUNNING, cls.run_type != DagRunType.BACKFILL_JOB)
.join(
DagModel,
DagModel.dag_id == cls.dag_id,
)
.filter(
DagModel.is_paused.is_(False),
DagModel.is_active.is_(True),
)
.order_by(
nulls_first(cls.last_scheduling_decision, session=session),
cls.execution_date,
)
)
if not settings.ALLOW_FUTURE_EXEC_DATES:
query = query.filter(DagRun.execution_date <= func.now())
return with_row_locks(
query.limit(max_number), of=cls, **skip_locked(session=session)
)
|
https://github.com/apache/airflow/issues/11899
|
[2020-10-26 09:03:54,608] {{settings.py:49}} INFO - Configured default timezone Timezone('UTC')
[2020-10-26 09:04:05,467] {{scheduler_job.py:1327}} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
_mysql_exceptions.OperationalError: (1213, 'Deadlock found when trying to get lock; try restarting transaction')
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1308, in _execute
self._run_scheduler_loop()
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1379, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1451, in _do_scheduling
self._create_dag_runs(query.all(), session)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3341, in all
return list(self)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3503, in __iter__
return self._execute_and_instances(context)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3528, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1014, in execute
return meth(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1127, in _execute_clauseelement
ret = self._execute_context(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
self._handle_dbapi_exception(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
util.raise_(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
sqlalchemy.exc.OperationalError: (_mysql_exceptions.OperationalError) (1213, 'Deadlock found when trying to get lock; try restarting transaction')
[SQL: SELECT dag.dag_id AS dag_dag_id, dag.root_dag_id AS dag_root_dag_id, dag.is_paused AS dag_is_paused, dag.is_subdag AS dag_is_subdag, dag.is_active AS dag_is_active, dag.last_scheduler_run AS dag_last_scheduler_run, dag.last_pickled AS dag_last_pickled, dag.last_expired AS dag_last_expired, dag.scheduler_lock AS dag_scheduler_lock, dag.pickle_id AS dag_pickle_id, dag.fileloc AS dag_fileloc, dag.owners AS dag_owners, dag.description AS dag_description, dag.default_view AS dag_default_view, dag.schedule_interval AS dag_schedule_interval, dag.concurrency AS dag_concurrency, dag.has_task_concurrency_limits AS dag_has_task_concurrency_limits, dag.next_dagrun AS dag_next_dagrun, dag.next_dagrun_create_after AS dag_next_dagrun_create_after
FROM dag
WHERE dag.is_paused IS false AND dag.is_active IS true AND dag.next_dagrun_create_after <= now() ORDER BY dag.next_dagrun_create_after
LIMIT %s FOR UPDATE]
[parameters: (10,)]
(Background on this error at: http://sqlalche.me/e/13/e3q8)
[2020-10-26 09:04:06,512] {{process_utils.py:102}} INFO - Sending Signals.SIGTERM to GPID 7437
[2020-10-26 09:04:07,029] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7762, status='terminated', started='09:04:05') (7762) terminated with exit code None
[2020-10-26 09:04:07,122] {{process_utils.py:219}} INFO - Waiting up to 5 seconds for processes to exit...
[2020-10-26 09:04:07,126] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7774, status='terminated', started='09:04:05') (7774) terminated with exit code None
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7437, status='terminated', exitcode=0, started='09:03:54') (7437) terminated with exit code 0
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7775, status='terminated', started='09:04:05') (7775) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7773, status='terminated', started='09:04:05') (7773) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7782, status='terminated', started='09:04:05') (7782) terminated with exit code None
|
_mysql_exceptions.OperationalError
|
def slots_stats(
*,
lock_rows: bool = False,
session: Session = None,
) -> Dict[str, PoolStats]:
"""
Get Pool stats (Number of Running, Queued, Open & Total tasks)
If ``lock_rows`` is True, and the database engine in use supports the ``NOWAIT`` syntax, then a
non-blocking lock will be attempted -- if the lock is not available then SQLAlchemy will throw an
OperationalError.
:param lock_rows: Should we attempt to obtain a row-level lock on all the Pool rows returns
:param session: SQLAlchemy ORM Session
"""
from airflow.models.taskinstance import TaskInstance # Avoid circular import
pools: Dict[str, PoolStats] = {}
query = session.query(Pool.pool, Pool.slots)
if lock_rows:
query = with_row_locks(query, session=session, **nowait(session))
pool_rows: Iterable[Tuple[str, int]] = query.all()
for pool_name, total_slots in pool_rows:
pools[pool_name] = PoolStats(total=total_slots, running=0, queued=0, open=0)
state_count_by_pool = (
session.query(TaskInstance.pool, TaskInstance.state, func.count())
.filter(TaskInstance.state.in_(list(EXECUTION_STATES)))
.group_by(TaskInstance.pool, TaskInstance.state)
).all()
# calculate queued and running metrics
count: int
for pool_name, state, count in state_count_by_pool:
stats_dict: Optional[PoolStats] = pools.get(pool_name)
if not stats_dict:
continue
# TypedDict key must be a string literal, so we use if-statements to set value
if state == "running":
stats_dict["running"] = count
elif state == "queued":
stats_dict["queued"] = count
else:
raise AirflowException(
f"Unexpected state. Expected values: {EXECUTION_STATES}."
)
# calculate open metric
for pool_name, stats_dict in pools.items():
if stats_dict["total"] == -1:
# -1 means infinite
stats_dict["open"] = -1
else:
stats_dict["open"] = (
stats_dict["total"] - stats_dict["running"] - stats_dict["queued"]
)
return pools
|
def slots_stats(
*,
lock_rows: bool = False,
session: Session = None,
) -> Dict[str, PoolStats]:
"""
Get Pool stats (Number of Running, Queued, Open & Total tasks)
If ``lock_rows`` is True, and the database engine in use supports the ``NOWAIT`` syntax, then a
non-blocking lock will be attempted -- if the lock is not available then SQLAlchemy will throw an
OperationalError.
:param lock_rows: Should we attempt to obtain a row-level lock on all the Pool rows returns
:param session: SQLAlchemy ORM Session
"""
from airflow.models.taskinstance import TaskInstance # Avoid circular import
pools: Dict[str, PoolStats] = {}
query = session.query(Pool.pool, Pool.slots)
if lock_rows:
query = with_row_locks(query, **nowait(session))
pool_rows: Iterable[Tuple[str, int]] = query.all()
for pool_name, total_slots in pool_rows:
pools[pool_name] = PoolStats(total=total_slots, running=0, queued=0, open=0)
state_count_by_pool = (
session.query(TaskInstance.pool, TaskInstance.state, func.count())
.filter(TaskInstance.state.in_(list(EXECUTION_STATES)))
.group_by(TaskInstance.pool, TaskInstance.state)
).all()
# calculate queued and running metrics
count: int
for pool_name, state, count in state_count_by_pool:
stats_dict: Optional[PoolStats] = pools.get(pool_name)
if not stats_dict:
continue
# TypedDict key must be a string literal, so we use if-statements to set value
if state == "running":
stats_dict["running"] = count
elif state == "queued":
stats_dict["queued"] = count
else:
raise AirflowException(
f"Unexpected state. Expected values: {EXECUTION_STATES}."
)
# calculate open metric
for pool_name, stats_dict in pools.items():
if stats_dict["total"] == -1:
# -1 means infinite
stats_dict["open"] = -1
else:
stats_dict["open"] = (
stats_dict["total"] - stats_dict["running"] - stats_dict["queued"]
)
return pools
|
https://github.com/apache/airflow/issues/11899
|
[2020-10-26 09:03:54,608] {{settings.py:49}} INFO - Configured default timezone Timezone('UTC')
[2020-10-26 09:04:05,467] {{scheduler_job.py:1327}} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
_mysql_exceptions.OperationalError: (1213, 'Deadlock found when trying to get lock; try restarting transaction')
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1308, in _execute
self._run_scheduler_loop()
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1379, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1451, in _do_scheduling
self._create_dag_runs(query.all(), session)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3341, in all
return list(self)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3503, in __iter__
return self._execute_and_instances(context)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3528, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1014, in execute
return meth(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1127, in _execute_clauseelement
ret = self._execute_context(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
self._handle_dbapi_exception(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
util.raise_(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
sqlalchemy.exc.OperationalError: (_mysql_exceptions.OperationalError) (1213, 'Deadlock found when trying to get lock; try restarting transaction')
[SQL: SELECT dag.dag_id AS dag_dag_id, dag.root_dag_id AS dag_root_dag_id, dag.is_paused AS dag_is_paused, dag.is_subdag AS dag_is_subdag, dag.is_active AS dag_is_active, dag.last_scheduler_run AS dag_last_scheduler_run, dag.last_pickled AS dag_last_pickled, dag.last_expired AS dag_last_expired, dag.scheduler_lock AS dag_scheduler_lock, dag.pickle_id AS dag_pickle_id, dag.fileloc AS dag_fileloc, dag.owners AS dag_owners, dag.description AS dag_description, dag.default_view AS dag_default_view, dag.schedule_interval AS dag_schedule_interval, dag.concurrency AS dag_concurrency, dag.has_task_concurrency_limits AS dag_has_task_concurrency_limits, dag.next_dagrun AS dag_next_dagrun, dag.next_dagrun_create_after AS dag_next_dagrun_create_after
FROM dag
WHERE dag.is_paused IS false AND dag.is_active IS true AND dag.next_dagrun_create_after <= now() ORDER BY dag.next_dagrun_create_after
LIMIT %s FOR UPDATE]
[parameters: (10,)]
(Background on this error at: http://sqlalche.me/e/13/e3q8)
[2020-10-26 09:04:06,512] {{process_utils.py:102}} INFO - Sending Signals.SIGTERM to GPID 7437
[2020-10-26 09:04:07,029] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7762, status='terminated', started='09:04:05') (7762) terminated with exit code None
[2020-10-26 09:04:07,122] {{process_utils.py:219}} INFO - Waiting up to 5 seconds for processes to exit...
[2020-10-26 09:04:07,126] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7774, status='terminated', started='09:04:05') (7774) terminated with exit code None
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7437, status='terminated', exitcode=0, started='09:03:54') (7437) terminated with exit code 0
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7775, status='terminated', started='09:04:05') (7775) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7773, status='terminated', started='09:04:05') (7773) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7782, status='terminated', started='09:04:05') (7782) terminated with exit code None
|
_mysql_exceptions.OperationalError
|
def _run_mini_scheduler_on_child_tasks(self, session=None) -> None:
if conf.getboolean("scheduler", "schedule_after_task_execution", fallback=True):
from airflow.models.dagrun import DagRun # Avoid circular import
try:
# Re-select the row with a lock
dag_run = with_row_locks(
session.query(DagRun).filter_by(
dag_id=self.dag_id,
execution_date=self.execution_date,
),
session=session,
).one()
# Get a partial dag with just the specific tasks we want to
# examine. In order for dep checks to work correctly, we
# include ourself (so TriggerRuleDep can check the state of the
# task we just executed)
partial_dag = self.task.dag.partial_subset(
self.task.downstream_task_ids,
include_downstream=False,
include_upstream=False,
include_direct_upstream=True,
)
dag_run.dag = partial_dag
info = dag_run.task_instance_scheduling_decisions(session)
skippable_task_ids = {
task_id
for task_id in partial_dag.task_ids
if task_id not in self.task.downstream_task_ids
}
schedulable_tis = [
ti
for ti in info.schedulable_tis
if ti.task_id not in skippable_task_ids
]
for schedulable_ti in schedulable_tis:
if not hasattr(schedulable_ti, "task"):
schedulable_ti.task = self.task.dag.get_task(schedulable_ti.task_id)
num = dag_run.schedule_tis(schedulable_tis)
self.log.info(
"%d downstream tasks scheduled from follow-on schedule check", num
)
session.commit()
except OperationalError as e:
# Any kind of DB error here is _non fatal_ as this block is just an optimisation.
self.log.info(
f"Skipping mini scheduling run due to exception: {e.statement}",
exc_info=True,
)
session.rollback()
|
def _run_mini_scheduler_on_child_tasks(self, session=None) -> None:
if conf.getboolean("scheduler", "schedule_after_task_execution", fallback=True):
from airflow.models.dagrun import DagRun # Avoid circular import
try:
# Re-select the row with a lock
dag_run = with_row_locks(
session.query(DagRun).filter_by(
dag_id=self.dag_id,
execution_date=self.execution_date,
)
).one()
# Get a partial dag with just the specific tasks we want to
# examine. In order for dep checks to work correctly, we
# include ourself (so TriggerRuleDep can check the state of the
# task we just executed)
partial_dag = self.task.dag.partial_subset(
self.task.downstream_task_ids,
include_downstream=False,
include_upstream=False,
include_direct_upstream=True,
)
dag_run.dag = partial_dag
info = dag_run.task_instance_scheduling_decisions(session)
skippable_task_ids = {
task_id
for task_id in partial_dag.task_ids
if task_id not in self.task.downstream_task_ids
}
schedulable_tis = [
ti
for ti in info.schedulable_tis
if ti.task_id not in skippable_task_ids
]
for schedulable_ti in schedulable_tis:
if not hasattr(schedulable_ti, "task"):
schedulable_ti.task = self.task.dag.get_task(schedulable_ti.task_id)
num = dag_run.schedule_tis(schedulable_tis)
self.log.info(
"%d downstream tasks scheduled from follow-on schedule check", num
)
session.commit()
except OperationalError as e:
# Any kind of DB error here is _non fatal_ as this block is just an optimisation.
self.log.info(
f"Skipping mini scheduling run due to exception: {e.statement}",
exc_info=True,
)
session.rollback()
|
https://github.com/apache/airflow/issues/11899
|
[2020-10-26 09:03:54,608] {{settings.py:49}} INFO - Configured default timezone Timezone('UTC')
[2020-10-26 09:04:05,467] {{scheduler_job.py:1327}} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
_mysql_exceptions.OperationalError: (1213, 'Deadlock found when trying to get lock; try restarting transaction')
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1308, in _execute
self._run_scheduler_loop()
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1379, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1451, in _do_scheduling
self._create_dag_runs(query.all(), session)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3341, in all
return list(self)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3503, in __iter__
return self._execute_and_instances(context)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3528, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1014, in execute
return meth(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1127, in _execute_clauseelement
ret = self._execute_context(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
self._handle_dbapi_exception(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
util.raise_(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
sqlalchemy.exc.OperationalError: (_mysql_exceptions.OperationalError) (1213, 'Deadlock found when trying to get lock; try restarting transaction')
[SQL: SELECT dag.dag_id AS dag_dag_id, dag.root_dag_id AS dag_root_dag_id, dag.is_paused AS dag_is_paused, dag.is_subdag AS dag_is_subdag, dag.is_active AS dag_is_active, dag.last_scheduler_run AS dag_last_scheduler_run, dag.last_pickled AS dag_last_pickled, dag.last_expired AS dag_last_expired, dag.scheduler_lock AS dag_scheduler_lock, dag.pickle_id AS dag_pickle_id, dag.fileloc AS dag_fileloc, dag.owners AS dag_owners, dag.description AS dag_description, dag.default_view AS dag_default_view, dag.schedule_interval AS dag_schedule_interval, dag.concurrency AS dag_concurrency, dag.has_task_concurrency_limits AS dag_has_task_concurrency_limits, dag.next_dagrun AS dag_next_dagrun, dag.next_dagrun_create_after AS dag_next_dagrun_create_after
FROM dag
WHERE dag.is_paused IS false AND dag.is_active IS true AND dag.next_dagrun_create_after <= now() ORDER BY dag.next_dagrun_create_after
LIMIT %s FOR UPDATE]
[parameters: (10,)]
(Background on this error at: http://sqlalche.me/e/13/e3q8)
[2020-10-26 09:04:06,512] {{process_utils.py:102}} INFO - Sending Signals.SIGTERM to GPID 7437
[2020-10-26 09:04:07,029] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7762, status='terminated', started='09:04:05') (7762) terminated with exit code None
[2020-10-26 09:04:07,122] {{process_utils.py:219}} INFO - Waiting up to 5 seconds for processes to exit...
[2020-10-26 09:04:07,126] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7774, status='terminated', started='09:04:05') (7774) terminated with exit code None
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7437, status='terminated', exitcode=0, started='09:03:54') (7437) terminated with exit code 0
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7775, status='terminated', started='09:04:05') (7775) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7773, status='terminated', started='09:04:05') (7773) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7782, status='terminated', started='09:04:05') (7782) terminated with exit code None
|
_mysql_exceptions.OperationalError
|
def with_row_locks(query, session: Session, **kwargs):
"""
Apply with_for_update to an SQLAlchemy query, if row level locking is in use.
:param query: An SQLAlchemy Query object
:param session: ORM Session
:param kwargs: Extra kwargs to pass to with_for_update (of, nowait, skip_locked, etc)
:return: updated query
"""
dialect = session.bind.dialect
# Don't use row level locks if the MySQL dialect (Mariadb & MySQL < 8) does not support it.
if USE_ROW_LEVEL_LOCKING and (
dialect.name != "mysql" or dialect.supports_for_update_of
):
return query.with_for_update(**kwargs)
else:
return query
|
def with_row_locks(query, **kwargs):
"""
Apply with_for_update to an SQLAlchemy query, if row level locking is in use.
:param query: An SQLAlchemy Query object
:param kwargs: Extra kwargs to pass to with_for_update (of, nowait, skip_locked, etc)
:return: updated query
"""
if USE_ROW_LEVEL_LOCKING:
return query.with_for_update(**kwargs)
else:
return query
|
https://github.com/apache/airflow/issues/11899
|
[2020-10-26 09:03:54,608] {{settings.py:49}} INFO - Configured default timezone Timezone('UTC')
[2020-10-26 09:04:05,467] {{scheduler_job.py:1327}} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
_mysql_exceptions.OperationalError: (1213, 'Deadlock found when trying to get lock; try restarting transaction')
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1308, in _execute
self._run_scheduler_loop()
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1379, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1451, in _do_scheduling
self._create_dag_runs(query.all(), session)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3341, in all
return list(self)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3503, in __iter__
return self._execute_and_instances(context)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3528, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1014, in execute
return meth(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1127, in _execute_clauseelement
ret = self._execute_context(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
self._handle_dbapi_exception(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
util.raise_(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
sqlalchemy.exc.OperationalError: (_mysql_exceptions.OperationalError) (1213, 'Deadlock found when trying to get lock; try restarting transaction')
[SQL: SELECT dag.dag_id AS dag_dag_id, dag.root_dag_id AS dag_root_dag_id, dag.is_paused AS dag_is_paused, dag.is_subdag AS dag_is_subdag, dag.is_active AS dag_is_active, dag.last_scheduler_run AS dag_last_scheduler_run, dag.last_pickled AS dag_last_pickled, dag.last_expired AS dag_last_expired, dag.scheduler_lock AS dag_scheduler_lock, dag.pickle_id AS dag_pickle_id, dag.fileloc AS dag_fileloc, dag.owners AS dag_owners, dag.description AS dag_description, dag.default_view AS dag_default_view, dag.schedule_interval AS dag_schedule_interval, dag.concurrency AS dag_concurrency, dag.has_task_concurrency_limits AS dag_has_task_concurrency_limits, dag.next_dagrun AS dag_next_dagrun, dag.next_dagrun_create_after AS dag_next_dagrun_create_after
FROM dag
WHERE dag.is_paused IS false AND dag.is_active IS true AND dag.next_dagrun_create_after <= now() ORDER BY dag.next_dagrun_create_after
LIMIT %s FOR UPDATE]
[parameters: (10,)]
(Background on this error at: http://sqlalche.me/e/13/e3q8)
[2020-10-26 09:04:06,512] {{process_utils.py:102}} INFO - Sending Signals.SIGTERM to GPID 7437
[2020-10-26 09:04:07,029] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7762, status='terminated', started='09:04:05') (7762) terminated with exit code None
[2020-10-26 09:04:07,122] {{process_utils.py:219}} INFO - Waiting up to 5 seconds for processes to exit...
[2020-10-26 09:04:07,126] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7774, status='terminated', started='09:04:05') (7774) terminated with exit code None
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7437, status='terminated', exitcode=0, started='09:03:54') (7437) terminated with exit code 0
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7775, status='terminated', started='09:04:05') (7775) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7773, status='terminated', started='09:04:05') (7773) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7782, status='terminated', started='09:04:05') (7782) terminated with exit code None
|
_mysql_exceptions.OperationalError
|
def _do_scheduling(self, session) -> int:
"""
This function is where the main scheduling decisions take places. It:
- Creates any necessary DAG runs by examining the next_dagrun_create_after column of DagModel
Since creating Dag Runs is a relatively time consuming process, we select only 10 dags by default
(configurable via ``scheduler.max_dagruns_to_create_per_loop`` setting) - putting this higher will
mean one scheduler could spend a chunk of time creating dag runs, and not ever get around to
scheduling tasks.
- Finds the "next n oldest" running DAG Runs to examine for scheduling (n=20 by default, configurable
via ``scheduler.max_dagruns_per_loop_to_schedule`` config setting) and tries to progress state (TIs
to SCHEDULED, or DagRuns to SUCCESS/FAILURE etc)
By "next oldest", we mean hasn't been examined/scheduled in the most time.
The reason we don't select all dagruns at once because the rows are selected with row locks, meaning
that only one scheduler can "process them", even it it is waiting behind other dags. Increasing this
limit will allow more throughput for smaller DAGs but will likely slow down throughput for larger
(>500 tasks.) DAGs
- Then, via a Critical Section (locking the rows of the Pool model) we queue tasks, and then send them
to the executor.
See docs of _critical_section_execute_task_instances for more.
:return: Number of TIs enqueued in this iteration
:rtype: int
"""
# Put a check in place to make sure we don't commit unexpectedly
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
self._create_dagruns_for_dags(guard, session)
dag_runs = self._get_next_dagruns_to_examine(session)
# Bulk fetch the currently active dag runs for the dags we are
# examining, rather than making one query per DagRun
# TODO: This query is probably horribly inefficient (though there is an
# index on (dag_id,state)). It is to deal with the case when a user
# clears more than max_active_runs older tasks -- we don't want the
# scheduler to suddenly go and start running tasks from all of the
# runs. (AIRFLOW-137/GH #1442)
#
# The longer term fix would be to have `clear` do this, and put DagRuns
# in to the queued state, then take DRs out of queued before creating
# any new ones
# Build up a set of execution_dates that are "active" for a given
# dag_id -- only tasks from those runs will be scheduled.
active_runs_by_dag_id = defaultdict(set)
query = (
session.query(
TI.dag_id,
TI.execution_date,
)
.filter(
TI.dag_id.in_(list({dag_run.dag_id for dag_run in dag_runs})),
TI.state.notin_(list(State.finished) + [State.REMOVED]),
)
.group_by(TI.dag_id, TI.execution_date)
)
for dag_id, execution_date in query:
active_runs_by_dag_id[dag_id].add(execution_date)
for dag_run in dag_runs:
# Use try_except to not stop the Scheduler when a Serialized DAG is not found
# This takes care of Dynamic DAGs especially
# SerializedDagNotFound should not happen here in the same loop because the DagRun would
# not be created in self._create_dag_runs if Serialized DAG does not exist
# But this would take care of the scenario when the Scheduler is restarted after DagRun is
# created and the DAG is deleted / renamed
try:
self._schedule_dag_run(
dag_run, active_runs_by_dag_id.get(dag_run.dag_id, set()), session
)
except SerializedDagNotFound:
self.log.exception(
"DAG '%s' not found in serialized_dag table", dag_run.dag_id
)
continue
guard.commit()
# Without this, the session has an invalid view of the DB
session.expunge_all()
# END: schedule TIs
try:
if self.executor.slots_available <= 0:
# We know we can't do anything here, so don't even try!
self.log.debug("Executor full, skipping critical section")
return 0
timer = Stats.timer("scheduler.critical_section_duration")
timer.start()
# Find anything TIs in state SCHEDULED, try to QUEUE it (send it to the executor)
num_queued_tis = self._critical_section_execute_task_instances(
session=session
)
# Make sure we only sent this metric if we obtained the lock, otherwise we'll skew the
# metric, way down
timer.stop(send=True)
except OperationalError as e:
timer.stop(send=False)
if is_lock_not_available_error(error=e):
self.log.debug("Critical section lock held by another Scheduler")
Stats.incr("scheduler.critical_section_busy")
session.rollback()
return 0
raise
guard.commit()
return num_queued_tis
|
def _do_scheduling(self, session) -> int:
"""
This function is where the main scheduling decisions take places. It:
- Creates any necessary DAG runs by examining the next_dagrun_create_after column of DagModel
Since creating Dag Runs is a relatively time consuming process, we select only 10 dags by default
(configurable via ``scheduler.max_dagruns_to_create_per_loop`` setting) - putting this higher will
mean one scheduler could spend a chunk of time creating dag runs, and not ever get around to
scheduling tasks.
- Finds the "next n oldest" running DAG Runs to examine for scheduling (n=20 by default, configurable
via ``scheduler.max_dagruns_per_loop_to_schedule`` config setting) and tries to progress state (TIs
to SCHEDULED, or DagRuns to SUCCESS/FAILURE etc)
By "next oldest", we mean hasn't been examined/scheduled in the most time.
The reason we don't select all dagruns at once because the rows are selected with row locks, meaning
that only one scheduler can "process them", even it it is waiting behind other dags. Increasing this
limit will allow more throughput for smaller DAGs but will likely slow down throughput for larger
(>500 tasks.) DAGs
- Then, via a Critical Section (locking the rows of the Pool model) we queue tasks, and then send them
to the executor.
See docs of _critical_section_execute_task_instances for more.
:return: Number of TIs enqueued in this iteration
:rtype: int
"""
# Put a check in place to make sure we don't commit unexpectedly
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
query = DagModel.dags_needing_dagruns(session)
self._create_dag_runs(query.all(), session)
# commit the session - Release the write lock on DagModel table.
guard.commit()
# END: create dagruns
dag_runs = DagRun.next_dagruns_to_examine(session)
# Bulk fetch the currently active dag runs for the dags we are
# examining, rather than making one query per DagRun
# TODO: This query is probably horribly inefficient (though there is an
# index on (dag_id,state)). It is to deal with the case when a user
# clears more than max_active_runs older tasks -- we don't want the
# scheduler to suddenly go and start running tasks from all of the
# runs. (AIRFLOW-137/GH #1442)
#
# The longer term fix would be to have `clear` do this, and put DagRuns
# in to the queued state, then take DRs out of queued before creating
# any new ones
# Build up a set of execution_dates that are "active" for a given
# dag_id -- only tasks from those runs will be scheduled.
active_runs_by_dag_id = defaultdict(set)
query = (
session.query(
TI.dag_id,
TI.execution_date,
)
.filter(
TI.dag_id.in_(list({dag_run.dag_id for dag_run in dag_runs})),
TI.state.notin_(list(State.finished) + [State.REMOVED]),
)
.group_by(TI.dag_id, TI.execution_date)
)
for dag_id, execution_date in query:
active_runs_by_dag_id[dag_id].add(execution_date)
for dag_run in dag_runs:
# Use try_except to not stop the Scheduler when a Serialized DAG is not found
# This takes care of Dynamic DAGs especially
# SerializedDagNotFound should not happen here in the same loop because the DagRun would
# not be created in self._create_dag_runs if Serialized DAG does not exist
# But this would take care of the scenario when the Scheduler is restarted after DagRun is
# created and the DAG is deleted / renamed
try:
self._schedule_dag_run(
dag_run, active_runs_by_dag_id.get(dag_run.dag_id, set()), session
)
except SerializedDagNotFound:
self.log.exception(
"DAG '%s' not found in serialized_dag table", dag_run.dag_id
)
continue
guard.commit()
# Without this, the session has an invalid view of the DB
session.expunge_all()
# END: schedule TIs
try:
if self.executor.slots_available <= 0:
# We know we can't do anything here, so don't even try!
self.log.debug("Executor full, skipping critical section")
return 0
timer = Stats.timer("scheduler.critical_section_duration")
timer.start()
# Find anything TIs in state SCHEDULED, try to QUEUE it (send it to the executor)
num_queued_tis = self._critical_section_execute_task_instances(
session=session
)
# Make sure we only sent this metric if we obtained the lock, otherwise we'll skew the
# metric, way down
timer.stop(send=True)
except OperationalError as e:
timer.stop(send=False)
if is_lock_not_available_error(error=e):
self.log.debug("Critical section lock held by another Scheduler")
Stats.incr("scheduler.critical_section_busy")
session.rollback()
return 0
raise
guard.commit()
return num_queued_tis
|
https://github.com/apache/airflow/issues/11899
|
[2020-10-26 09:03:54,608] {{settings.py:49}} INFO - Configured default timezone Timezone('UTC')
[2020-10-26 09:04:05,467] {{scheduler_job.py:1327}} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
_mysql_exceptions.OperationalError: (1213, 'Deadlock found when trying to get lock; try restarting transaction')
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1308, in _execute
self._run_scheduler_loop()
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1379, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1451, in _do_scheduling
self._create_dag_runs(query.all(), session)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3341, in all
return list(self)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3503, in __iter__
return self._execute_and_instances(context)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3528, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1014, in execute
return meth(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1127, in _execute_clauseelement
ret = self._execute_context(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
self._handle_dbapi_exception(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
util.raise_(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
sqlalchemy.exc.OperationalError: (_mysql_exceptions.OperationalError) (1213, 'Deadlock found when trying to get lock; try restarting transaction')
[SQL: SELECT dag.dag_id AS dag_dag_id, dag.root_dag_id AS dag_root_dag_id, dag.is_paused AS dag_is_paused, dag.is_subdag AS dag_is_subdag, dag.is_active AS dag_is_active, dag.last_scheduler_run AS dag_last_scheduler_run, dag.last_pickled AS dag_last_pickled, dag.last_expired AS dag_last_expired, dag.scheduler_lock AS dag_scheduler_lock, dag.pickle_id AS dag_pickle_id, dag.fileloc AS dag_fileloc, dag.owners AS dag_owners, dag.description AS dag_description, dag.default_view AS dag_default_view, dag.schedule_interval AS dag_schedule_interval, dag.concurrency AS dag_concurrency, dag.has_task_concurrency_limits AS dag_has_task_concurrency_limits, dag.next_dagrun AS dag_next_dagrun, dag.next_dagrun_create_after AS dag_next_dagrun_create_after
FROM dag
WHERE dag.is_paused IS false AND dag.is_active IS true AND dag.next_dagrun_create_after <= now() ORDER BY dag.next_dagrun_create_after
LIMIT %s FOR UPDATE]
[parameters: (10,)]
(Background on this error at: http://sqlalche.me/e/13/e3q8)
[2020-10-26 09:04:06,512] {{process_utils.py:102}} INFO - Sending Signals.SIGTERM to GPID 7437
[2020-10-26 09:04:07,029] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7762, status='terminated', started='09:04:05') (7762) terminated with exit code None
[2020-10-26 09:04:07,122] {{process_utils.py:219}} INFO - Waiting up to 5 seconds for processes to exit...
[2020-10-26 09:04:07,126] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7774, status='terminated', started='09:04:05') (7774) terminated with exit code None
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7437, status='terminated', exitcode=0, started='09:03:54') (7437) terminated with exit code 0
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7775, status='terminated', started='09:04:05') (7775) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7773, status='terminated', started='09:04:05') (7773) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7782, status='terminated', started='09:04:05') (7782) terminated with exit code None
|
_mysql_exceptions.OperationalError
|
def adopt_or_reset_orphaned_tasks(self, session: Session = None):
"""
Reset any TaskInstance still in QUEUED or SCHEDULED states that were
enqueued by a SchedulerJob that is no longer running.
:return: the number of TIs reset
:rtype: int
"""
self.log.info("Resetting orphaned tasks for active dag runs")
timeout = conf.getint("scheduler", "scheduler_health_check_threshold")
for attempt in run_with_db_retries(logger=self.log):
with attempt:
self.log.debug(
"Running SchedulerJob.adopt_or_reset_orphaned_tasks with retries. Try %d of %d",
attempt.retry_state.attempt_number,
settings.MAX_DB_RETRIES,
)
self.log.debug("Calling SchedulerJob.adopt_or_reset_orphaned_tasks method")
try:
num_failed = (
session.query(SchedulerJob)
.filter(
SchedulerJob.state == State.RUNNING,
SchedulerJob.latest_heartbeat
< (timezone.utcnow() - timedelta(seconds=timeout)),
)
.update({"state": State.FAILED})
)
if num_failed:
self.log.info(
"Marked %d SchedulerJob instances as failed", num_failed
)
Stats.incr(self.__class__.__name__.lower() + "_end", num_failed)
resettable_states = [State.SCHEDULED, State.QUEUED, State.RUNNING]
query = (
session.query(TI)
.filter(TI.state.in_(resettable_states))
# outerjoin is because we didn't use to have queued_by_job
# set, so we need to pick up anything pre upgrade. This (and the
# "or queued_by_job_id IS NONE") can go as soon as scheduler HA is
# released.
.outerjoin(TI.queued_by_job)
.filter(
or_(
TI.queued_by_job_id.is_(None),
SchedulerJob.state != State.RUNNING,
)
)
.join(TI.dag_run)
.filter(
DagRun.run_type != DagRunType.BACKFILL_JOB,
# pylint: disable=comparison-with-callable
DagRun.state == State.RUNNING,
)
.options(load_only(TI.dag_id, TI.task_id, TI.execution_date))
)
# Lock these rows, so that another scheduler can't try and adopt these too
tis_to_reset_or_adopt = with_row_locks(
query, of=TI, session=session, **skip_locked(session=session)
).all()
to_reset = self.executor.try_adopt_task_instances(tis_to_reset_or_adopt)
reset_tis_message = []
for ti in to_reset:
reset_tis_message.append(repr(ti))
ti.state = State.NONE
ti.queued_by_job_id = None
for ti in set(tis_to_reset_or_adopt) - set(to_reset):
ti.queued_by_job_id = self.id
Stats.incr("scheduler.orphaned_tasks.cleared", len(to_reset))
Stats.incr(
"scheduler.orphaned_tasks.adopted",
len(tis_to_reset_or_adopt) - len(to_reset),
)
if to_reset:
task_instance_str = "\n\t".join(reset_tis_message)
self.log.info(
"Reset the following %s orphaned TaskInstances:\n\t%s",
len(to_reset),
task_instance_str,
)
# Issue SQL/finish "Unit of Work", but let @provide_session
# commit (or if passed a session, let caller decide when to commit
session.flush()
except OperationalError:
session.rollback()
raise
return len(to_reset)
|
def adopt_or_reset_orphaned_tasks(self, session: Session = None):
"""
Reset any TaskInstance still in QUEUED or SCHEDULED states that were
enqueued by a SchedulerJob that is no longer running.
:return: the number of TIs reset
:rtype: int
"""
self.log.info("Resetting orphaned tasks for active dag runs")
timeout = conf.getint("scheduler", "scheduler_health_check_threshold")
num_failed = (
session.query(SchedulerJob)
.filter(
SchedulerJob.state == State.RUNNING,
SchedulerJob.latest_heartbeat
< (timezone.utcnow() - timedelta(seconds=timeout)),
)
.update({"state": State.FAILED})
)
if num_failed:
self.log.info("Marked %d SchedulerJob instances as failed", num_failed)
Stats.incr(self.__class__.__name__.lower() + "_end", num_failed)
resettable_states = [State.SCHEDULED, State.QUEUED, State.RUNNING]
query = (
session.query(TI)
.filter(TI.state.in_(resettable_states))
# outerjoin is because we didn't use to have queued_by_job
# set, so we need to pick up anything pre upgrade. This (and the
# "or queued_by_job_id IS NONE") can go as soon as scheduler HA is
# released.
.outerjoin(TI.queued_by_job)
.filter(or_(TI.queued_by_job_id.is_(None), SchedulerJob.state != State.RUNNING))
.join(TI.dag_run)
.filter(
DagRun.run_type != DagRunType.BACKFILL_JOB,
# pylint: disable=comparison-with-callable
DagRun.state == State.RUNNING,
)
.options(load_only(TI.dag_id, TI.task_id, TI.execution_date))
)
# Lock these rows, so that another scheduler can't try and adopt these too
tis_to_reset_or_adopt = with_row_locks(
query, of=TI, session=session, **skip_locked(session=session)
).all()
to_reset = self.executor.try_adopt_task_instances(tis_to_reset_or_adopt)
reset_tis_message = []
for ti in to_reset:
reset_tis_message.append(repr(ti))
ti.state = State.NONE
ti.queued_by_job_id = None
for ti in set(tis_to_reset_or_adopt) - set(to_reset):
ti.queued_by_job_id = self.id
Stats.incr("scheduler.orphaned_tasks.cleared", len(to_reset))
Stats.incr(
"scheduler.orphaned_tasks.adopted", len(tis_to_reset_or_adopt) - len(to_reset)
)
if to_reset:
task_instance_str = "\n\t".join(reset_tis_message)
self.log.info(
"Reset the following %s orphaned TaskInstances:\n\t%s",
len(to_reset),
task_instance_str,
)
# Issue SQL/finish "Unit of Work", but let @provide_session commit (or if passed a session, let caller
# decide when to commit
session.flush()
return len(to_reset)
|
https://github.com/apache/airflow/issues/11899
|
[2020-10-26 09:03:54,608] {{settings.py:49}} INFO - Configured default timezone Timezone('UTC')
[2020-10-26 09:04:05,467] {{scheduler_job.py:1327}} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
_mysql_exceptions.OperationalError: (1213, 'Deadlock found when trying to get lock; try restarting transaction')
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1308, in _execute
self._run_scheduler_loop()
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1379, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1451, in _do_scheduling
self._create_dag_runs(query.all(), session)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3341, in all
return list(self)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3503, in __iter__
return self._execute_and_instances(context)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3528, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1014, in execute
return meth(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1127, in _execute_clauseelement
ret = self._execute_context(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
self._handle_dbapi_exception(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
util.raise_(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
sqlalchemy.exc.OperationalError: (_mysql_exceptions.OperationalError) (1213, 'Deadlock found when trying to get lock; try restarting transaction')
[SQL: SELECT dag.dag_id AS dag_dag_id, dag.root_dag_id AS dag_root_dag_id, dag.is_paused AS dag_is_paused, dag.is_subdag AS dag_is_subdag, dag.is_active AS dag_is_active, dag.last_scheduler_run AS dag_last_scheduler_run, dag.last_pickled AS dag_last_pickled, dag.last_expired AS dag_last_expired, dag.scheduler_lock AS dag_scheduler_lock, dag.pickle_id AS dag_pickle_id, dag.fileloc AS dag_fileloc, dag.owners AS dag_owners, dag.description AS dag_description, dag.default_view AS dag_default_view, dag.schedule_interval AS dag_schedule_interval, dag.concurrency AS dag_concurrency, dag.has_task_concurrency_limits AS dag_has_task_concurrency_limits, dag.next_dagrun AS dag_next_dagrun, dag.next_dagrun_create_after AS dag_next_dagrun_create_after
FROM dag
WHERE dag.is_paused IS false AND dag.is_active IS true AND dag.next_dagrun_create_after <= now() ORDER BY dag.next_dagrun_create_after
LIMIT %s FOR UPDATE]
[parameters: (10,)]
(Background on this error at: http://sqlalche.me/e/13/e3q8)
[2020-10-26 09:04:06,512] {{process_utils.py:102}} INFO - Sending Signals.SIGTERM to GPID 7437
[2020-10-26 09:04:07,029] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7762, status='terminated', started='09:04:05') (7762) terminated with exit code None
[2020-10-26 09:04:07,122] {{process_utils.py:219}} INFO - Waiting up to 5 seconds for processes to exit...
[2020-10-26 09:04:07,126] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7774, status='terminated', started='09:04:05') (7774) terminated with exit code None
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7437, status='terminated', exitcode=0, started='09:03:54') (7437) terminated with exit code 0
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7775, status='terminated', started='09:04:05') (7775) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7773, status='terminated', started='09:04:05') (7773) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7782, status='terminated', started='09:04:05') (7782) terminated with exit code None
|
_mysql_exceptions.OperationalError
|
def sync_to_db(self, session: Optional[Session] = None):
"""Save attributes about list of DAG to the DB."""
# To avoid circular import - airflow.models.dagbag -> airflow.models.dag -> airflow.models.dagbag
from airflow.models.dag import DAG
from airflow.models.serialized_dag import SerializedDagModel
def _serialze_dag_capturing_errors(dag, session):
"""
Try to serialize the dag to the DB, but make a note of any errors.
We can't place them directly in import_errors, as this may be retried, and work the next time
"""
if dag.is_subdag:
return []
try:
# We cant use bulk_write_to_db as we want to capture each error individually
SerializedDagModel.write_dag(
dag,
min_update_interval=settings.MIN_SERIALIZED_DAG_UPDATE_INTERVAL,
session=session,
)
return []
except OperationalError:
raise
except Exception: # pylint: disable=broad-except
return [
(
dag.fileloc,
traceback.format_exc(
limit=-self.dagbag_import_error_traceback_depth
),
)
]
# Retry 'DAG.bulk_write_to_db' & 'SerializedDagModel.bulk_sync_to_db' in case
# of any Operational Errors
# In case of failures, provide_session handles rollback
for attempt in run_with_db_retries(logger=self.log):
with attempt:
serialize_errors = []
self.log.debug(
"Running dagbag.sync_to_db with retries. Try %d of %d",
attempt.retry_state.attempt_number,
settings.MAX_DB_RETRIES,
)
self.log.debug("Calling the DAG.bulk_sync_to_db method")
try:
# Write Serialized DAGs to DB, capturing errors
for dag in self.dags.values():
serialize_errors.extend(
_serialze_dag_capturing_errors(dag, session)
)
DAG.bulk_write_to_db(self.dags.values(), session=session)
except OperationalError:
session.rollback()
raise
# Only now we are "complete" do we update import_errors - don't want to record errors from
# previous failed attempts
self.import_errors.update(dict(serialize_errors))
|
def sync_to_db(self, session: Optional[Session] = None):
"""Save attributes about list of DAG to the DB."""
# To avoid circular import - airflow.models.dagbag -> airflow.models.dag -> airflow.models.dagbag
from airflow.models.dag import DAG
from airflow.models.serialized_dag import SerializedDagModel
def _serialze_dag_capturing_errors(dag, session):
"""
Try to serialize the dag to the DB, but make a note of any errors.
We can't place them directly in import_errors, as this may be retried, and work the next time
"""
if dag.is_subdag:
return []
try:
# We cant use bulk_write_to_db as we want to capture each error individually
SerializedDagModel.write_dag(
dag,
min_update_interval=settings.MIN_SERIALIZED_DAG_UPDATE_INTERVAL,
session=session,
)
return []
except OperationalError:
raise
except Exception: # pylint: disable=broad-except
return [
(
dag.fileloc,
traceback.format_exc(
limit=-self.dagbag_import_error_traceback_depth
),
)
]
# Retry 'DAG.bulk_write_to_db' & 'SerializedDagModel.bulk_sync_to_db' in case
# of any Operational Errors
# In case of failures, provide_session handles rollback
for attempt in tenacity.Retrying(
retry=tenacity.retry_if_exception_type(exception_types=OperationalError),
wait=tenacity.wait_random_exponential(multiplier=0.5, max=5),
stop=tenacity.stop_after_attempt(settings.MAX_DB_RETRIES),
before_sleep=tenacity.before_sleep_log(self.log, logging.DEBUG),
reraise=True,
):
with attempt:
serialize_errors = []
self.log.debug(
"Running dagbag.sync_to_db with retries. Try %d of %d",
attempt.retry_state.attempt_number,
settings.MAX_DB_RETRIES,
)
self.log.debug("Calling the DAG.bulk_sync_to_db method")
try:
# Write Serialized DAGs to DB, capturing errors
for dag in self.dags.values():
serialize_errors.extend(
_serialze_dag_capturing_errors(dag, session)
)
DAG.bulk_write_to_db(self.dags.values(), session=session)
except OperationalError:
session.rollback()
raise
# Only now we are "complete" do we update import_errors - don't want to record errors from
# previous failed attempts
self.import_errors.update(dict(serialize_errors))
|
https://github.com/apache/airflow/issues/11899
|
[2020-10-26 09:03:54,608] {{settings.py:49}} INFO - Configured default timezone Timezone('UTC')
[2020-10-26 09:04:05,467] {{scheduler_job.py:1327}} ERROR - Exception when executing SchedulerJob._run_scheduler_loop
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
_mysql_exceptions.OperationalError: (1213, 'Deadlock found when trying to get lock; try restarting transaction')
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1308, in _execute
self._run_scheduler_loop()
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1379, in _run_scheduler_loop
num_queued_tis = self._do_scheduling(session)
File "/home/airflow/.local/lib/python3.8/site-packages/airflow/jobs/scheduler_job.py", line 1451, in _do_scheduling
self._create_dag_runs(query.all(), session)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3341, in all
return list(self)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3503, in __iter__
return self._execute_and_instances(context)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3528, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1014, in execute
return meth(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1127, in _execute_clauseelement
ret = self._execute_context(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
self._handle_dbapi_exception(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
util.raise_(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/home/airflow/.local/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 255, in execute
self.errorhandler(self, exc, value)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/connections.py", line 50, in defaulterrorhandler
raise errorvalue
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 252, in execute
res = self._query(query)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 379, in _query
self._do_get_result(db)
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 182, in _do_get_result
self._result = result = self._get_result()
File "/home/airflow/.local/lib/python3.8/site-packages/MySQLdb/cursors.py", line 411, in _get_result
return self._get_db().store_result()
sqlalchemy.exc.OperationalError: (_mysql_exceptions.OperationalError) (1213, 'Deadlock found when trying to get lock; try restarting transaction')
[SQL: SELECT dag.dag_id AS dag_dag_id, dag.root_dag_id AS dag_root_dag_id, dag.is_paused AS dag_is_paused, dag.is_subdag AS dag_is_subdag, dag.is_active AS dag_is_active, dag.last_scheduler_run AS dag_last_scheduler_run, dag.last_pickled AS dag_last_pickled, dag.last_expired AS dag_last_expired, dag.scheduler_lock AS dag_scheduler_lock, dag.pickle_id AS dag_pickle_id, dag.fileloc AS dag_fileloc, dag.owners AS dag_owners, dag.description AS dag_description, dag.default_view AS dag_default_view, dag.schedule_interval AS dag_schedule_interval, dag.concurrency AS dag_concurrency, dag.has_task_concurrency_limits AS dag_has_task_concurrency_limits, dag.next_dagrun AS dag_next_dagrun, dag.next_dagrun_create_after AS dag_next_dagrun_create_after
FROM dag
WHERE dag.is_paused IS false AND dag.is_active IS true AND dag.next_dagrun_create_after <= now() ORDER BY dag.next_dagrun_create_after
LIMIT %s FOR UPDATE]
[parameters: (10,)]
(Background on this error at: http://sqlalche.me/e/13/e3q8)
[2020-10-26 09:04:06,512] {{process_utils.py:102}} INFO - Sending Signals.SIGTERM to GPID 7437
[2020-10-26 09:04:07,029] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7762, status='terminated', started='09:04:05') (7762) terminated with exit code None
[2020-10-26 09:04:07,122] {{process_utils.py:219}} INFO - Waiting up to 5 seconds for processes to exit...
[2020-10-26 09:04:07,126] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7774, status='terminated', started='09:04:05') (7774) terminated with exit code None
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7437, status='terminated', exitcode=0, started='09:03:54') (7437) terminated with exit code 0
[2020-10-26 09:04:07,128] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7775, status='terminated', started='09:04:05') (7775) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7773, status='terminated', started='09:04:05') (7773) terminated with exit code None
[2020-10-26 09:04:07,129] {{process_utils.py:68}} INFO - Process psutil.Process(pid=7782, status='terminated', started='09:04:05') (7782) terminated with exit code None
|
_mysql_exceptions.OperationalError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.