after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def __contains__(self, path):
if super().__contains__(path):
return True
return path in self.passthroughdict
|
def __contains__(self, path):
# if already in inventory, always return True.
return self.cache.in_inventory(path) or super().__contains__(path)
|
https://github.com/snakemake/snakemake/issues/611
|
Building DAG of jobs...
Traceback (most recent call last):
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake
keepincomplete=keep_incomplete,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute
dag.init()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init
job = self.update(self.file2jobs(file), file=file, progress=progress)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update
progress=progress,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_
file.inventory()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory
self._local_inventory(cache)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory
with os.scandir(path) as scan:
FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
|
FileNotFoundError
|
def inventory(self, cache: snakemake.io.IOCache):
"""Using client.list_blobs(), we want to iterate over the objects in
the "folder" of a bucket and store information about the IOFiles in the
provided cache (snakemake.io.IOCache) indexed by bucket/blob name.
This will be called by the first mention of a remote object, and
iterate over the entire bucket once (and then not need to again).
This includes:
- cache.exist_remote
- cache_mtime
- cache.size
"""
if cache.remaining_wait_time <= 0:
# No more time to create inventory.
return
start_time = time.time()
subfolder = os.path.dirname(self.blob.name)
for blob in self.client.list_blobs(self.bucket_name, prefix=subfolder):
# By way of being listed, it exists. mtime is a datetime object
name = "{}/{}".format(blob.bucket.name, blob.name)
cache.exists_remote[name] = True
cache.mtime[name] = blob.updated.timestamp()
cache.size[name] = blob.size
cache.exists_local[name] = snakemake.io.IOCACHE_DEFERRED
cache.remaining_wait_time -= time.time() - start_time
# Mark bucket and prefix as having an inventory, such that this method is
# only called once for the subfolder in the bucket.
cache.has_inventory.add(self.inventory_root)
|
def inventory(self, cache: snakemake.io.IOCache):
"""Using client.list_blobs(), we want to iterate over the objects in
the "folder" of a bucket and store information about the IOFiles in the
provided cache (snakemake.io.IOCache) indexed by bucket/blob name.
This will be called by the first mention of a remote object, and
iterate over the entire bucket once (and then not need to again).
This includes:
- cache.exist_remote
- cache_mtime
- cache.size
"""
if cache.remaining_wait_time <= 0:
# No more time to create inventory.
return
start_time = time.time()
subfolder = os.path.dirname(self.blob.name)
for blob in self.client.list_blobs(self.bucket_name, prefix=subfolder):
# By way of being listed, it exists. mtime is a datetime object
name = "{}/{}".format(blob.bucket.name, blob.name)
cache.exists_remote[name] = True
cache.mtime[name] = blob.updated.timestamp()
cache.size[name] = blob.size
cache.remaining_wait_time -= time.time() - start_time
# Mark bucket and prefix as having an inventory, such that this method is
# only called once for the subfolder in the bucket.
cache.has_inventory.add("%s/%s" % (self.bucket_name, subfolder))
|
https://github.com/snakemake/snakemake/issues/611
|
Building DAG of jobs...
Traceback (most recent call last):
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake
keepincomplete=keep_incomplete,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute
dag.init()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init
job = self.update(self.file2jobs(file), file=file, progress=progress)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update
progress=progress,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_
file.inventory()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory
self._local_inventory(cache)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory
with os.scandir(path) as scan:
FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
|
FileNotFoundError
|
def inventory(self, cache: snakemake.io.IOCache):
"""From this file, try to find as much existence and modification date
information as possible for files in the same folder.
"""
# If this is implemented in a remote object, results have to be stored in
# the given IOCache object.
pass
|
def inventory(self, cache: snakemake.io.IOCache):
"""From this file, try to find as much existence and modification date
information as possible.
"""
# If this is implemented in a remote object, results have to be stored in
# the given IOCache object.
pass
|
https://github.com/snakemake/snakemake/issues/611
|
Building DAG of jobs...
Traceback (most recent call last):
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake
keepincomplete=keep_incomplete,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute
dag.init()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init
job = self.update(self.file2jobs(file), file=file, progress=progress)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update
progress=progress,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_
file.inventory()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory
self._local_inventory(cache)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory
with os.scandir(path) as scan:
FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
|
FileNotFoundError
|
def init(self, progress=False):
"""Initialise the DAG."""
for job in map(self.rule2job, self.targetrules):
job = self.update([job], progress=progress, create_inventory=True)
self.targetjobs.add(job)
for file in self.targetfiles:
job = self.update(
self.file2jobs(file),
file=file,
progress=progress,
create_inventory=True,
)
self.targetjobs.add(job)
self.cleanup()
self.update_needrun(create_inventory=True)
self.set_until_jobs()
self.delete_omitfrom_jobs()
self.update_jobids()
self.check_directory_outputs()
# check if remaining jobs are valid
for i, job in enumerate(self.jobs):
job.is_valid()
|
def init(self, progress=False):
"""Initialise the DAG."""
for job in map(self.rule2job, self.targetrules):
job = self.update([job], progress=progress)
self.targetjobs.add(job)
for file in self.targetfiles:
job = self.update(self.file2jobs(file), file=file, progress=progress)
self.targetjobs.add(job)
self.cleanup()
self.update_needrun()
self.set_until_jobs()
self.delete_omitfrom_jobs()
self.update_jobids()
self.check_directory_outputs()
# check if remaining jobs are valid
for i, job in enumerate(self.jobs):
job.is_valid()
|
https://github.com/snakemake/snakemake/issues/611
|
Building DAG of jobs...
Traceback (most recent call last):
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake
keepincomplete=keep_incomplete,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute
dag.init()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init
job = self.update(self.file2jobs(file), file=file, progress=progress)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update
progress=progress,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_
file.inventory()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory
self._local_inventory(cache)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory
with os.scandir(path) as scan:
FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
|
FileNotFoundError
|
def handle_remote(self, job, upload=True):
"""Remove local files if they are no longer needed and upload."""
if upload:
# handle output files
files = list(job.expanded_output)
if job.benchmark:
files.append(job.benchmark)
for f in files:
if f.is_remote and not f.should_stay_on_remote:
f.upload_to_remote()
remote_mtime = f.mtime.remote()
# immediately force local mtime to match remote,
# since conversions from S3 headers are not 100% reliable
# without this, newness comparisons may fail down the line
f.touch(times=(remote_mtime, remote_mtime))
if not f.exists_remote:
raise RemoteFileException(
"The file upload was attempted, but it does not "
"exist on remote. Check that your credentials have "
"read AND write permissions."
)
if not self.keep_remote_local:
# handle input files
needed = lambda job_, f: any(
f in files
for j, files in self.depending[job_].items()
if not self.finished(j) and self.needrun(j) and j != job
)
def unneeded_files():
putative = (
lambda f: f.is_remote and not f.protected and not f.should_keep_local
)
generated_input = set()
for job_, files in self.dependencies[job].items():
generated_input |= files
for f in filter(putative, files):
if not needed(job_, f):
yield f
for f, f_ in zip(job.output, job.rule.output):
if putative(f) and not needed(job, f) and not f in self.targetfiles:
if f in job.dynamic_output:
for f_ in job.expand_dynamic(f_):
yield f_
else:
yield f
for f in filter(putative, job.input):
# TODO what about remote inputs that are used by multiple jobs?
if f not in generated_input:
yield f
for f in unneeded_files():
if f.exists_local:
logger.info("Removing local output file: {}".format(f))
f.remove()
|
def handle_remote(self, job, upload=True):
"""Remove local files if they are no longer needed and upload."""
if upload:
# handle output files
files = list(job.expanded_output)
if job.benchmark:
files.append(job.benchmark)
for f in files:
if f.is_remote and not f.should_stay_on_remote:
f.upload_to_remote()
remote_mtime = f.mtime
# immediately force local mtime to match remote,
# since conversions from S3 headers are not 100% reliable
# without this, newness comparisons may fail down the line
f.touch(times=(remote_mtime, remote_mtime))
if not f.exists_remote:
raise RemoteFileException(
"The file upload was attempted, but it does not "
"exist on remote. Check that your credentials have "
"read AND write permissions."
)
if not self.keep_remote_local:
# handle input files
needed = lambda job_, f: any(
f in files
for j, files in self.depending[job_].items()
if not self.finished(j) and self.needrun(j) and j != job
)
def unneeded_files():
putative = (
lambda f: f.is_remote and not f.protected and not f.should_keep_local
)
generated_input = set()
for job_, files in self.dependencies[job].items():
generated_input |= files
for f in filter(putative, files):
if not needed(job_, f):
yield f
for f, f_ in zip(job.output, job.rule.output):
if putative(f) and not needed(job, f) and not f in self.targetfiles:
if f in job.dynamic_output:
for f_ in job.expand_dynamic(f_):
yield f_
else:
yield f
for f in filter(putative, job.input):
# TODO what about remote inputs that are used by multiple jobs?
if f not in generated_input:
yield f
for f in unneeded_files():
if f.exists_local:
logger.info("Removing local output file: {}".format(f))
f.remove()
|
https://github.com/snakemake/snakemake/issues/611
|
Building DAG of jobs...
Traceback (most recent call last):
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake
keepincomplete=keep_incomplete,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute
dag.init()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init
job = self.update(self.file2jobs(file), file=file, progress=progress)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update
progress=progress,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_
file.inventory()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory
self._local_inventory(cache)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory
with os.scandir(path) as scan:
FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
|
FileNotFoundError
|
def update(
self,
jobs,
file=None,
visited=None,
skip_until_dynamic=False,
progress=False,
create_inventory=False,
):
"""Update the DAG by adding given jobs and their dependencies."""
if visited is None:
visited = set()
producer = None
exceptions = list()
jobs = sorted(jobs, reverse=not self.ignore_ambiguity)
cycles = list()
for job in jobs:
logger.dag_debug(dict(status="candidate", job=job))
if file in job.input:
cycles.append(job)
continue
if job in visited:
cycles.append(job)
continue
try:
self.check_periodic_wildcards(job)
self.update_(
job,
visited=set(visited),
skip_until_dynamic=skip_until_dynamic,
progress=progress,
create_inventory=create_inventory,
)
# TODO this might fail if a rule discarded here is needed
# elsewhere
if producer:
if job < producer or self.ignore_ambiguity:
break
elif producer is not None:
raise AmbiguousRuleException(file, job, producer)
producer = job
except (
MissingInputException,
CyclicGraphException,
PeriodicWildcardError,
WorkflowError,
) as ex:
exceptions.append(ex)
except RecursionError as e:
raise WorkflowError(
e,
"If building the DAG exceeds the recursion limit, "
"this is likely due to a cyclic dependency."
"E.g. you might have a sequence of rules that "
"can generate their own input. Try to make "
"the output files more specific. "
"A common pattern is to have different prefixes "
"in the output files of different rules."
+ "\nProblematic file pattern: {}".format(file)
if file
else "",
)
if producer is None:
if cycles:
job = cycles[0]
raise CyclicGraphException(job.rule, file, rule=job.rule)
if len(exceptions) > 1:
raise WorkflowError(*exceptions)
elif len(exceptions) == 1:
raise exceptions[0]
else:
logger.dag_debug(dict(status="selected", job=producer))
logger.dag_debug(
dict(
file=file,
msg="Producer found, hence exceptions are ignored.",
exception=WorkflowError(*exceptions),
)
)
n = len(self.dependencies)
if progress and n % 1000 == 0 and n and self._progress != n:
logger.info("Processed {} potential jobs.".format(n))
self._progress = n
return producer
|
def update(
self, jobs, file=None, visited=None, skip_until_dynamic=False, progress=False
):
"""Update the DAG by adding given jobs and their dependencies."""
if visited is None:
visited = set()
producer = None
exceptions = list()
jobs = sorted(jobs, reverse=not self.ignore_ambiguity)
cycles = list()
for job in jobs:
logger.dag_debug(dict(status="candidate", job=job))
if file in job.input:
cycles.append(job)
continue
if job in visited:
cycles.append(job)
continue
try:
self.check_periodic_wildcards(job)
self.update_(
job,
visited=set(visited),
skip_until_dynamic=skip_until_dynamic,
progress=progress,
)
# TODO this might fail if a rule discarded here is needed
# elsewhere
if producer:
if job < producer or self.ignore_ambiguity:
break
elif producer is not None:
raise AmbiguousRuleException(file, job, producer)
producer = job
except (
MissingInputException,
CyclicGraphException,
PeriodicWildcardError,
WorkflowError,
) as ex:
exceptions.append(ex)
except RecursionError as e:
raise WorkflowError(
e,
"If building the DAG exceeds the recursion limit, "
"this is likely due to a cyclic dependency."
"E.g. you might have a sequence of rules that "
"can generate their own input. Try to make "
"the output files more specific. "
"A common pattern is to have different prefixes "
"in the output files of different rules."
+ "\nProblematic file pattern: {}".format(file)
if file
else "",
)
if producer is None:
if cycles:
job = cycles[0]
raise CyclicGraphException(job.rule, file, rule=job.rule)
if len(exceptions) > 1:
raise WorkflowError(*exceptions)
elif len(exceptions) == 1:
raise exceptions[0]
else:
logger.dag_debug(dict(status="selected", job=producer))
logger.dag_debug(
dict(
file=file,
msg="Producer found, hence exceptions are ignored.",
exception=WorkflowError(*exceptions),
)
)
n = len(self.dependencies)
if progress and n % 1000 == 0 and n and self._progress != n:
logger.info("Processed {} potential jobs.".format(n))
self._progress = n
return producer
|
https://github.com/snakemake/snakemake/issues/611
|
Building DAG of jobs...
Traceback (most recent call last):
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake
keepincomplete=keep_incomplete,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute
dag.init()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init
job = self.update(self.file2jobs(file), file=file, progress=progress)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update
progress=progress,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_
file.inventory()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory
self._local_inventory(cache)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory
with os.scandir(path) as scan:
FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
|
FileNotFoundError
|
def update_(
self,
job,
visited=None,
skip_until_dynamic=False,
progress=False,
create_inventory=False,
):
"""Update the DAG by adding the given job and its dependencies."""
if job in self.dependencies:
return
if visited is None:
visited = set()
visited.add(job)
dependencies = self.dependencies[job]
potential_dependencies = self.collect_potential_dependencies(job)
skip_until_dynamic = skip_until_dynamic and not job.dynamic_output
missing_input = set()
producer = dict()
exceptions = dict()
for file, jobs in potential_dependencies.items():
if create_inventory:
# If possible, obtain inventory information starting from
# given file and store it in the IOCache.
# This should provide faster access to existence and mtime information
# than querying file by file. If the file type does not support inventory
# information, this call is a no-op.
file.inventory()
if not jobs:
# no producing job found
if not file.exists:
# file not found, hence missing input
missing_input.add(file)
# file found, no problem
continue
try:
selected_job = self.update(
jobs,
file=file,
visited=visited,
skip_until_dynamic=skip_until_dynamic or file in job.dynamic_input,
progress=progress,
)
producer[file] = selected_job
except (
MissingInputException,
CyclicGraphException,
PeriodicWildcardError,
WorkflowError,
) as ex:
if not file.exists:
self.delete_job(job, recursive=False) # delete job from tree
raise ex
else:
logger.dag_debug(
dict(
file=file,
msg="No producers found, but file is present on disk.",
exception=ex,
)
)
for file, job_ in producer.items():
dependencies[job_].add(file)
self.depending[job_][job].add(file)
if self.is_batch_rule(job.rule) and self.batch.is_final:
# For the final batch, ensure that all input files from
# previous batches are present on disk.
if any(
f for f in job.input if f not in potential_dependencies and not f.exists
):
raise WorkflowError(
"Unable to execute batch {} because not all previous batches "
"have been completed before or files have been deleted.".format(
self.batch
)
)
if missing_input:
self.delete_job(job, recursive=False) # delete job from tree
raise MissingInputException(job.rule, missing_input)
if skip_until_dynamic:
self._dynamic.add(job)
|
def update_(self, job, visited=None, skip_until_dynamic=False, progress=False):
"""Update the DAG by adding the given job and its dependencies."""
if job in self.dependencies:
return
if visited is None:
visited = set()
visited.add(job)
dependencies = self.dependencies[job]
potential_dependencies = self.collect_potential_dependencies(job)
skip_until_dynamic = skip_until_dynamic and not job.dynamic_output
missing_input = set()
producer = dict()
exceptions = dict()
for file, jobs in potential_dependencies.items():
# If possible, obtain inventory information starting from
# given file and store it in the IOCache.
# This should provide faster access to existence and mtime information
# than querying file by file. If the file type does not support inventory
# information, this call is a no-op.
file.inventory()
if not jobs:
# no producing job found
if not file.exists:
# file not found, hence missing input
missing_input.add(file)
# file found, no problem
continue
try:
selected_job = self.update(
jobs,
file=file,
visited=visited,
skip_until_dynamic=skip_until_dynamic or file in job.dynamic_input,
progress=progress,
)
producer[file] = selected_job
except (
MissingInputException,
CyclicGraphException,
PeriodicWildcardError,
WorkflowError,
) as ex:
if not file.exists:
self.delete_job(job, recursive=False) # delete job from tree
raise ex
else:
logger.dag_debug(
dict(
file=file,
msg="No producers found, but file is present on disk.",
exception=ex,
)
)
for file, job_ in producer.items():
dependencies[job_].add(file)
self.depending[job_][job].add(file)
if self.is_batch_rule(job.rule) and self.batch.is_final:
# For the final batch, ensure that all input files from
# previous batches are present on disk.
if any(
f for f in job.input if f not in potential_dependencies and not f.exists
):
raise WorkflowError(
"Unable to execute batch {} because not all previous batches "
"have been completed before or files have been deleted.".format(
self.batch
)
)
if missing_input:
self.delete_job(job, recursive=False) # delete job from tree
raise MissingInputException(job.rule, missing_input)
if skip_until_dynamic:
self._dynamic.add(job)
|
https://github.com/snakemake/snakemake/issues/611
|
Building DAG of jobs...
Traceback (most recent call last):
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake
keepincomplete=keep_incomplete,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute
dag.init()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init
job = self.update(self.file2jobs(file), file=file, progress=progress)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update
progress=progress,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_
file.inventory()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory
self._local_inventory(cache)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory
with os.scandir(path) as scan:
FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
|
FileNotFoundError
|
def summary(self, detailed=False):
if detailed:
yield "output_file\tdate\trule\tversion\tlog-file(s)\tinput-file(s)\tshellcmd\tstatus\tplan"
else:
yield "output_file\tdate\trule\tversion\tlog-file(s)\tstatus\tplan"
for job in self.jobs:
output = job.rule.output if self.dynamic(job) else job.expanded_output
for f in output:
rule = self.workflow.persistence.rule(f)
rule = "-" if rule is None else rule
version = self.workflow.persistence.version(f)
version = "-" if version is None else str(version)
date = time.ctime(f.mtime.local_or_remote()) if f.exists else "-"
pending = "update pending" if self.reason(job) else "no update"
log = self.workflow.persistence.log(f)
log = "-" if log is None else ",".join(log)
input = self.workflow.persistence.input(f)
input = "-" if input is None else ",".join(input)
shellcmd = self.workflow.persistence.shellcmd(f)
shellcmd = "-" if shellcmd is None else shellcmd
# remove new line characters, leading and trailing whitespace
shellcmd = shellcmd.strip().replace("\n", "; ")
status = "ok"
if not f.exists:
status = "missing"
elif self.reason(job).updated_input:
status = "updated input files"
elif self.workflow.persistence.version_changed(job, file=f):
status = "version changed to {}".format(job.rule.version)
elif self.workflow.persistence.code_changed(job, file=f):
status = "rule implementation changed"
elif self.workflow.persistence.input_changed(job, file=f):
status = "set of input files changed"
elif self.workflow.persistence.params_changed(job, file=f):
status = "params changed"
if detailed:
yield "\t".join(
(f, date, rule, version, log, input, shellcmd, status, pending)
)
else:
yield "\t".join((f, date, rule, version, log, status, pending))
|
def summary(self, detailed=False):
if detailed:
yield "output_file\tdate\trule\tversion\tlog-file(s)\tinput-file(s)\tshellcmd\tstatus\tplan"
else:
yield "output_file\tdate\trule\tversion\tlog-file(s)\tstatus\tplan"
for job in self.jobs:
output = job.rule.output if self.dynamic(job) else job.expanded_output
for f in output:
rule = self.workflow.persistence.rule(f)
rule = "-" if rule is None else rule
version = self.workflow.persistence.version(f)
version = "-" if version is None else str(version)
date = time.ctime(f.mtime) if f.exists else "-"
pending = "update pending" if self.reason(job) else "no update"
log = self.workflow.persistence.log(f)
log = "-" if log is None else ",".join(log)
input = self.workflow.persistence.input(f)
input = "-" if input is None else ",".join(input)
shellcmd = self.workflow.persistence.shellcmd(f)
shellcmd = "-" if shellcmd is None else shellcmd
# remove new line characters, leading and trailing whitespace
shellcmd = shellcmd.strip().replace("\n", "; ")
status = "ok"
if not f.exists:
status = "missing"
elif self.reason(job).updated_input:
status = "updated input files"
elif self.workflow.persistence.version_changed(job, file=f):
status = "version changed to {}".format(job.rule.version)
elif self.workflow.persistence.code_changed(job, file=f):
status = "rule implementation changed"
elif self.workflow.persistence.input_changed(job, file=f):
status = "set of input files changed"
elif self.workflow.persistence.params_changed(job, file=f):
status = "params changed"
if detailed:
yield "\t".join(
(f, date, rule, version, log, input, shellcmd, status, pending)
)
else:
yield "\t".join((f, date, rule, version, log, status, pending))
|
https://github.com/snakemake/snakemake/issues/611
|
Building DAG of jobs...
Traceback (most recent call last):
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake
keepincomplete=keep_incomplete,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute
dag.init()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init
job = self.update(self.file2jobs(file), file=file, progress=progress)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update
progress=progress,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_
file.inventory()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory
self._local_inventory(cache)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory
with os.scandir(path) as scan:
FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
|
FileNotFoundError
|
def __init__(self, cache):
super().__init__()
self.cache = cache
self.has_inventory = set()
|
def __init__(self, cache):
super().__init__()
self.cache = cache
|
https://github.com/snakemake/snakemake/issues/611
|
Building DAG of jobs...
Traceback (most recent call last):
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake
keepincomplete=keep_incomplete,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute
dag.init()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init
job = self.update(self.file2jobs(file), file=file, progress=progress)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update
progress=progress,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_
file.inventory()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory
self._local_inventory(cache)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory
with os.scandir(path) as scan:
FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
|
FileNotFoundError
|
def mtime(self):
return self.mtime_uncached
|
def mtime(self):
return self.mtime_local
|
https://github.com/snakemake/snakemake/issues/611
|
Building DAG of jobs...
Traceback (most recent call last):
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake
keepincomplete=keep_incomplete,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute
dag.init()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init
job = self.update(self.file2jobs(file), file=file, progress=progress)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update
progress=progress,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_
file.inventory()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory
self._local_inventory(cache)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory
with os.scandir(path) as scan:
FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
|
FileNotFoundError
|
def local(self, follow_symlinks=False):
if follow_symlinks and self._local_target is not None:
return self._local_target
return self._local
|
def local(value):
"""Mark a file as local file. This disables application of a default remote
provider.
"""
if is_flagged(value, "remote"):
raise SyntaxError("Remote and local flags are mutually exclusive.")
return flag(value, "local")
|
https://github.com/snakemake/snakemake/issues/611
|
Building DAG of jobs...
Traceback (most recent call last):
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake
keepincomplete=keep_incomplete,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute
dag.init()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init
job = self.update(self.file2jobs(file), file=file, progress=progress)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update
progress=progress,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_
file.inventory()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory
self._local_inventory(cache)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory
with os.scandir(path) as scan:
FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
|
FileNotFoundError
|
def output_mintime(self):
"""Return oldest output file."""
try:
mintime = min(
f.mtime.local_or_remote() for f in self.expanded_output if f.exists
)
except ValueError:
# no existing output
mintime = None
if self.benchmark and self.benchmark.exists:
mintime_benchmark = self.benchmark.mtime.local_or_remote()
if mintime is not None:
return min(mintime, mintime_benchmark)
else:
return mintime_benchmark
return mintime
|
def output_mintime(self):
"""Return oldest output file."""
existing = [f.mtime for f in self.expanded_output if f.exists]
if self.benchmark and self.benchmark.exists:
existing.append(self.benchmark.mtime)
if existing:
return min(existing)
return None
|
https://github.com/snakemake/snakemake/issues/611
|
Building DAG of jobs...
Traceback (most recent call last):
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake
keepincomplete=keep_incomplete,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute
dag.init()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init
job = self.update(self.file2jobs(file), file=file, progress=progress)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update
progress=progress,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_
file.inventory()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory
self._local_inventory(cache)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory
with os.scandir(path) as scan:
FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
|
FileNotFoundError
|
def remote_input_newer_than_local(self):
files = set()
for f in self.remote_input:
if (f.exists_remote and f.exists_local) and (
f.mtime.remote() > f.mtime.local(follow_symlinks=True)
):
files.add(f)
return files
|
def remote_input_newer_than_local(self):
files = set()
for f in self.remote_input:
if (f.exists_remote and f.exists_local) and (f.mtime > f.mtime_local):
files.add(f)
return files
|
https://github.com/snakemake/snakemake/issues/611
|
Building DAG of jobs...
Traceback (most recent call last):
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake
keepincomplete=keep_incomplete,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute
dag.init()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init
job = self.update(self.file2jobs(file), file=file, progress=progress)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update
progress=progress,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_
file.inventory()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory
self._local_inventory(cache)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory
with os.scandir(path) as scan:
FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
|
FileNotFoundError
|
def remote_input_older_than_local(self):
files = set()
for f in self.remote_input:
if (f.exists_remote and f.exists_local) and (
f.mtime.remote() < f.mtime.local(follow_symlinks=True)
):
files.add(f)
return files
|
def remote_input_older_than_local(self):
files = set()
for f in self.remote_input:
if (f.exists_remote and f.exists_local) and (f.mtime < f.mtime_local):
files.add(f)
return files
|
https://github.com/snakemake/snakemake/issues/611
|
Building DAG of jobs...
Traceback (most recent call last):
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake
keepincomplete=keep_incomplete,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute
dag.init()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init
job = self.update(self.file2jobs(file), file=file, progress=progress)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update
progress=progress,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_
file.inventory()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory
self._local_inventory(cache)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory
with os.scandir(path) as scan:
FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
|
FileNotFoundError
|
def remote_output_newer_than_local(self):
files = set()
for f in self.remote_output:
if (f.exists_remote and f.exists_local) and (
f.mtime.remote() > f.mtime.local(follow_symlinks=True)
):
files.add(f)
return files
|
def remote_output_newer_than_local(self):
files = set()
for f in self.remote_output:
if (f.exists_remote and f.exists_local) and (f.mtime > f.mtime_local):
files.add(f)
return files
|
https://github.com/snakemake/snakemake/issues/611
|
Building DAG of jobs...
Traceback (most recent call last):
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake
keepincomplete=keep_incomplete,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute
dag.init()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init
job = self.update(self.file2jobs(file), file=file, progress=progress)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update
progress=progress,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_
file.inventory()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory
self._local_inventory(cache)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory
with os.scandir(path) as scan:
FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
|
FileNotFoundError
|
def remote_output_older_than_local(self):
files = set()
for f in self.remote_output:
if (f.exists_remote and f.exists_local) and (
f.mtime.remote() < f.mtime.local(follow_symlinks=True)
):
files.add(f)
return files
|
def remote_output_older_than_local(self):
files = set()
for f in self.remote_output:
if (f.exists_remote and f.exists_local) and (f.mtime < f.mtime_local):
files.add(f)
return files
|
https://github.com/snakemake/snakemake/issues/611
|
Building DAG of jobs...
Traceback (most recent call last):
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake
keepincomplete=keep_incomplete,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute
dag.init()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init
job = self.update(self.file2jobs(file), file=file, progress=progress)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update
progress=progress,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_
file.inventory()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory
self._local_inventory(cache)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory
with os.scandir(path) as scan:
FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
|
FileNotFoundError
|
def finished(self, job, keep_metadata=True):
if not keep_metadata:
for f in job.expanded_output:
self._delete_record(self._incomplete_path, f)
return
version = str(job.rule.version) if job.rule.version is not None else None
code = self._code(job.rule)
input = self._input(job)
log = self._log(job)
params = self._params(job)
shellcmd = job.shellcmd
conda_env = self._conda_env(job)
fallback_time = time.time()
for f in job.expanded_output:
rec_path = self._record_path(self._incomplete_path, f)
starttime = os.path.getmtime(rec_path) if os.path.exists(rec_path) else None
endtime = f.mtime.local_or_remote() if f.exists else fallback_time
self._record(
self._metadata_path,
{
"version": version,
"code": code,
"rule": job.rule.name,
"input": input,
"log": log,
"params": params,
"shellcmd": shellcmd,
"incomplete": False,
"starttime": starttime,
"endtime": endtime,
"job_hash": hash(job),
"conda_env": conda_env,
"container_img_url": job.container_img_url,
},
f,
)
self._delete_record(self._incomplete_path, f)
|
def finished(self, job, keep_metadata=True):
if not keep_metadata:
for f in job.expanded_output:
self._delete_record(self._incomplete_path, f)
return
version = str(job.rule.version) if job.rule.version is not None else None
code = self._code(job.rule)
input = self._input(job)
log = self._log(job)
params = self._params(job)
shellcmd = job.shellcmd
conda_env = self._conda_env(job)
fallback_time = time.time()
for f in job.expanded_output:
rec_path = self._record_path(self._incomplete_path, f)
starttime = os.path.getmtime(rec_path) if os.path.exists(rec_path) else None
endtime = f.mtime if os.path.exists(f) else fallback_time
self._record(
self._metadata_path,
{
"version": version,
"code": code,
"rule": job.rule.name,
"input": input,
"log": log,
"params": params,
"shellcmd": shellcmd,
"incomplete": False,
"starttime": starttime,
"endtime": endtime,
"job_hash": hash(job),
"conda_env": conda_env,
"container_img_url": job.container_img_url,
},
f,
)
self._delete_record(self._incomplete_path, f)
|
https://github.com/snakemake/snakemake/issues/611
|
Building DAG of jobs...
Traceback (most recent call last):
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/__init__.py", line 709, in snakemake
keepincomplete=keep_incomplete,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/workflow.py", line 670, in execute
dag.init()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 177, in init
job = self.update(self.file2jobs(file), file=file, progress=progress)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 715, in update
progress=progress,
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/dag.py", line 792, in update_
file.inventory()
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 210, in inventory
self._local_inventory(cache)
File "<base_path>/software/miniconda/envs/snakemake/lib/python3.7/site-packages/snakemake/io.py", line 224, in _local_inventory
with os.scandir(path) as scan:
FileNotFoundError: [Errno 2] No such file or directory: '<analysis_patch>/tmp_batch113/codons'
|
FileNotFoundError
|
def inventory(self, cache: snakemake.io.IOCache):
"""Using client.list_blobs(), we want to iterate over the objects in
the "folder" of a bucket and store information about the IOFiles in the
provided cache (snakemake.io.IOCache) indexed by bucket/blob name.
This will be called by the first mention of a remote object, and
iterate over the entire bucket once (and then not need to again).
This includes:
- cache.exist_remote
- cache_mtime
- cache.size
"""
subfolder = os.path.dirname(self.blob.name)
for blob in self.client.list_blobs(self.bucket_name, prefix=subfolder):
# By way of being listed, it exists. mtime is a datetime object
name = "{}/{}".format(blob.bucket.name, blob.name)
cache.exists_remote[name] = True
cache.mtime[name] = blob.updated.timestamp()
cache.size[name] = blob.size
# Mark bucket and prefix as having an inventory, such that this method is
# only called once for the subfolder in the bucket.
cache.has_inventory.add("%s/%s" % (self.bucket_name, subfolder))
|
def inventory(self, cache: snakemake.io.IOCache):
"""Using client.list_blobs(), we want to iterate over the objects in
the "folder" of a bucket and store information about the IOFiles in the
provided cache (snakemake.io.IOCache) indexed by bucket/blob name.
This will be called by the first mention of a remote object, and
iterate over the entire bucket once (and then not need to again).
This includes:
- cache.exist_remote
- cache_mtime
- cache.size
"""
subfolder = os.path.dirname(self.blob.name)
for blob in self.client.list_blobs(self.bucket_name, prefix=subfolder):
# By way of being listed, it exists. mtime is a datetime object
name = "{}/{}".format(blob.bucket.name, blob.name)
cache.exists_remote[name] = True
cache.mtime[name] = blob.updated
cache.size[name] = blob.size
# Mark bucket and prefix as having an inventory, such that this method is
# only called once for the subfolder in the bucket.
cache.has_inventory.add("%s/%s" % (self.bucket_name, subfolder))
|
https://github.com/snakemake/snakemake/issues/572
|
Traceback (most recent call last):
File "/home/hannes/miniconda3/envs/target-id/lib/python3.6/site-packages/snakemake/__init__.py", line 704, in snakemake
keepincomplete=keep_incomplete,
File "/home/hannes/miniconda3/envs/target-id/lib/python3.6/site-packages/snakemake/workflow.py", line 667, in execute
dag.init()
File "/home/hannes/miniconda3/envs/target-id/lib/python3.6/site-packages/snakemake/dag.py", line 182, in init
self.update_needrun()
File "/home/hannes/miniconda3/envs/target-id/lib/python3.6/site-packages/snakemake/dag.py", line 936, in update_needrun
update_needrun(job)
File "/home/hannes/miniconda3/envs/target-id/lib/python3.6/site-packages/snakemake/dag.py", line 912, in update_needrun
f for f in job.input if f.exists and f.is_newer(output_mintime_)
File "/home/hannes/miniconda3/envs/target-id/lib/python3.6/site-packages/snakemake/dag.py", line 912, in <listcomp>
f for f in job.input if f.exists and f.is_newer(output_mintime_)
File "/home/hannes/miniconda3/envs/target-id/lib/python3.6/site-packages/snakemake/io.py", line 195, in wrapper
return func(self, *args, **kwargs)
File "/home/hannes/miniconda3/envs/target-id/lib/python3.6/site-packages/snakemake/io.py", line 419, in is_newer
return self.mtime > time
TypeError: '>' not supported between instances of 'datetime.datetime' and 'float'
|
TypeError
|
def execute(
self,
targets=None,
dryrun=False,
touch=False,
local_cores=1,
forcetargets=False,
forceall=False,
forcerun=None,
until=[],
omit_from=[],
prioritytargets=None,
quiet=False,
keepgoing=False,
printshellcmds=False,
printreason=False,
printdag=False,
cluster=None,
cluster_sync=None,
jobname=None,
immediate_submit=False,
ignore_ambiguity=False,
printrulegraph=False,
printfilegraph=False,
printd3dag=False,
drmaa=None,
drmaa_log_dir=None,
kubernetes=None,
tibanna=None,
tibanna_sfn=None,
precommand="",
container_image=None,
stats=None,
force_incomplete=False,
ignore_incomplete=False,
list_version_changes=False,
list_code_changes=False,
list_input_changes=False,
list_params_changes=False,
list_untracked=False,
list_conda_envs=False,
summary=False,
archive=None,
delete_all_output=False,
delete_temp_output=False,
detailed_summary=False,
latency_wait=3,
wait_for_files=None,
nolock=False,
unlock=False,
notemp=False,
nodeps=False,
cleanup_metadata=None,
cleanup_conda=False,
cleanup_shadow=False,
cleanup_scripts=True,
subsnakemake=None,
updated_files=None,
keep_target_files=False,
keep_shadow=False,
keep_remote_local=False,
allowed_rules=None,
max_jobs_per_second=None,
max_status_checks_per_second=None,
greediness=1.0,
no_hooks=False,
force_use_threads=False,
create_envs_only=False,
assume_shared_fs=True,
cluster_status=None,
report=None,
export_cwl=False,
batch=None,
keepincomplete=False,
):
self.check_localrules()
self.immediate_submit = immediate_submit
self.cleanup_scripts = cleanup_scripts
def rules(items):
return map(self._rules.__getitem__, filter(self.is_rule, items))
if keep_target_files:
def files(items):
return filterfalse(self.is_rule, items)
else:
def files(items):
relpath = lambda f: f if os.path.isabs(f) else os.path.relpath(f)
return map(relpath, filterfalse(self.is_rule, items))
if not targets:
targets = [self.first_rule] if self.first_rule is not None else list()
if prioritytargets is None:
prioritytargets = list()
if forcerun is None:
forcerun = list()
if until is None:
until = list()
if omit_from is None:
omit_from = list()
priorityrules = set(rules(prioritytargets))
priorityfiles = set(files(prioritytargets))
forcerules = set(rules(forcerun))
forcefiles = set(files(forcerun))
untilrules = set(rules(until))
untilfiles = set(files(until))
omitrules = set(rules(omit_from))
omitfiles = set(files(omit_from))
targetrules = set(
chain(
rules(targets),
filterfalse(Rule.has_wildcards, priorityrules),
filterfalse(Rule.has_wildcards, forcerules),
filterfalse(Rule.has_wildcards, untilrules),
)
)
targetfiles = set(chain(files(targets), priorityfiles, forcefiles, untilfiles))
if forcetargets:
forcefiles.update(targetfiles)
forcerules.update(targetrules)
rules = self.rules
if allowed_rules:
rules = [rule for rule in rules if rule.name in set(allowed_rules)]
if wait_for_files is not None:
try:
snakemake.io.wait_for_files(wait_for_files, latency_wait=latency_wait)
except IOError as e:
logger.error(str(e))
return False
dag = DAG(
self,
rules,
dryrun=dryrun,
targetfiles=targetfiles,
targetrules=targetrules,
# when cleaning up conda, we should enforce all possible jobs
# since their envs shall not be deleted
forceall=forceall or cleanup_conda,
forcefiles=forcefiles,
forcerules=forcerules,
priorityfiles=priorityfiles,
priorityrules=priorityrules,
untilfiles=untilfiles,
untilrules=untilrules,
omitfiles=omitfiles,
omitrules=omitrules,
ignore_ambiguity=ignore_ambiguity,
force_incomplete=force_incomplete,
ignore_incomplete=ignore_incomplete
or printdag
or printrulegraph
or printfilegraph,
notemp=notemp,
keep_remote_local=keep_remote_local,
batch=batch,
)
self.persistence = Persistence(
nolock=nolock,
dag=dag,
conda_prefix=self.conda_prefix,
singularity_prefix=self.singularity_prefix,
shadow_prefix=self.shadow_prefix,
warn_only=dryrun
or printrulegraph
or printfilegraph
or printdag
or summary
or archive
or list_version_changes
or list_code_changes
or list_input_changes
or list_params_changes
or list_untracked
or delete_all_output
or delete_temp_output,
)
if cleanup_metadata:
for f in cleanup_metadata:
self.persistence.cleanup_metadata(f)
return True
logger.info("Building DAG of jobs...")
dag.init()
dag.update_checkpoint_dependencies()
# check incomplete has to run BEFORE any call to postprocess
dag.check_incomplete()
dag.check_dynamic()
if unlock:
try:
self.persistence.cleanup_locks()
logger.info("Unlocking working directory.")
return True
except IOError:
logger.error(
"Error: Unlocking the directory {} failed. Maybe "
"you don't have the permissions?"
)
return False
try:
self.persistence.lock()
except IOError:
logger.error(
"Error: Directory cannot be locked. Please make "
"sure that no other Snakemake process is trying to create "
"the same files in the following directory:\n{}\n"
"If you are sure that no other "
"instances of snakemake are running on this directory, "
"the remaining lock was likely caused by a kill signal or "
"a power loss. It can be removed with "
"the --unlock argument.".format(os.getcwd())
)
return False
if cleanup_shadow:
self.persistence.cleanup_shadow()
return True
if self.subworkflows and not printdag and not printrulegraph and not printfilegraph:
# backup globals
globals_backup = dict(self.globals)
# execute subworkflows
for subworkflow in self.subworkflows:
subworkflow_targets = subworkflow.targets(dag)
logger.debug(
"Files requested from subworkflow:\n {}".format(
"\n ".join(subworkflow_targets)
)
)
updated = list()
if subworkflow_targets:
logger.info("Executing subworkflow {}.".format(subworkflow.name))
if not subsnakemake(
subworkflow.snakefile,
workdir=subworkflow.workdir,
targets=subworkflow_targets,
configfiles=[subworkflow.configfile]
if subworkflow.configfile
else None,
updated_files=updated,
):
return False
dag.updated_subworkflow_files.update(
subworkflow.target(f) for f in updated
)
else:
logger.info(
"Subworkflow {}: Nothing to be done.".format(subworkflow.name)
)
if self.subworkflows:
logger.info("Executing main workflow.")
# rescue globals
self.globals.update(globals_backup)
dag.postprocess()
# deactivate IOCache such that from now on we always get updated
# size, existence and mtime information
# ATTENTION: this may never be removed without really good reason.
# Otherwise weird things may happen.
self.iocache.deactivate()
# clear and deactivate persistence cache, from now on we want to see updates
self.persistence.deactivate_cache()
if nodeps:
missing_input = [
f
for job in dag.targetjobs
for f in job.input
if dag.needrun(job) and not os.path.exists(f)
]
if missing_input:
logger.error(
"Dependency resolution disabled (--nodeps) "
"but missing input "
"files detected. If this happens on a cluster, please make sure "
"that you handle the dependencies yourself or turn off "
"--immediate-submit. Missing input files:\n{}".format(
"\n".join(missing_input)
)
)
return False
updated_files.extend(f for job in dag.needrun_jobs for f in job.output)
if export_cwl:
from snakemake.cwl import dag_to_cwl
import json
with open(export_cwl, "w") as cwl:
json.dump(dag_to_cwl(dag), cwl, indent=4)
return True
elif report:
from snakemake.report import auto_report
auto_report(dag, report)
return True
elif printd3dag:
dag.d3dag()
return True
elif printdag:
print(dag)
return True
elif printrulegraph:
print(dag.rule_dot())
return True
elif printfilegraph:
print(dag.filegraph_dot())
return True
elif summary:
print("\n".join(dag.summary(detailed=False)))
return True
elif detailed_summary:
print("\n".join(dag.summary(detailed=True)))
return True
elif archive:
dag.archive(archive)
return True
elif delete_all_output:
dag.clean(only_temp=False, dryrun=dryrun)
return True
elif delete_temp_output:
dag.clean(only_temp=True, dryrun=dryrun)
return True
elif list_version_changes:
items = list(chain(*map(self.persistence.version_changed, dag.jobs)))
if items:
print(*items, sep="\n")
return True
elif list_code_changes:
items = list(chain(*map(self.persistence.code_changed, dag.jobs)))
for j in dag.jobs:
items.extend(list(j.outputs_older_than_script_or_notebook()))
if items:
print(*items, sep="\n")
return True
elif list_input_changes:
items = list(chain(*map(self.persistence.input_changed, dag.jobs)))
if items:
print(*items, sep="\n")
return True
elif list_params_changes:
items = list(chain(*map(self.persistence.params_changed, dag.jobs)))
if items:
print(*items, sep="\n")
return True
elif list_untracked:
dag.list_untracked()
return True
if self.use_singularity:
if assume_shared_fs:
dag.pull_container_imgs(
dryrun=dryrun or list_conda_envs, quiet=list_conda_envs
)
if self.use_conda:
if assume_shared_fs:
dag.create_conda_envs(
dryrun=dryrun or list_conda_envs or cleanup_conda,
quiet=list_conda_envs,
)
if create_envs_only:
return True
if list_conda_envs:
print("environment", "container", "location", sep="\t")
for env in set(job.conda_env for job in dag.jobs):
if env:
print(
simplify_path(env.file),
env.container_img_url or "",
simplify_path(env.path),
sep="\t",
)
return True
if cleanup_conda:
self.persistence.cleanup_conda()
return True
scheduler = JobScheduler(
self,
dag,
self.cores,
local_cores=local_cores,
dryrun=dryrun,
touch=touch,
cluster=cluster,
cluster_status=cluster_status,
cluster_config=cluster_config,
cluster_sync=cluster_sync,
jobname=jobname,
max_jobs_per_second=max_jobs_per_second,
max_status_checks_per_second=max_status_checks_per_second,
quiet=quiet,
keepgoing=keepgoing,
drmaa=drmaa,
drmaa_log_dir=drmaa_log_dir,
kubernetes=kubernetes,
tibanna=tibanna,
tibanna_sfn=tibanna_sfn,
precommand=precommand,
container_image=container_image,
printreason=printreason,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
greediness=greediness,
force_use_threads=force_use_threads,
assume_shared_fs=assume_shared_fs,
keepincomplete=keepincomplete,
)
if not dryrun:
if len(dag):
shell_exec = shell.get_executable()
if shell_exec is not None:
logger.info("Using shell: {}".format(shell_exec))
if cluster or cluster_sync or drmaa:
logger.resources_info("Provided cluster nodes: {}".format(self.nodes))
else:
warning = (
"" if self.cores > 1 else " (use --cores to define parallelism)"
)
logger.resources_info(
"Provided cores: {}{}".format(self.cores, warning)
)
logger.resources_info(
"Rules claiming more threads will be scaled down."
)
provided_resources = format_resources(self.global_resources)
if provided_resources:
logger.resources_info("Provided resources: " + provided_resources)
if self.run_local and any(rule.group for rule in self.rules):
logger.info("Group jobs: inactive (local execution)")
if not self.use_conda and any(rule.conda_env for rule in self.rules):
logger.info("Conda environments: ignored")
if not self.use_singularity and any(
rule.container_img for rule in self.rules
):
logger.info("Singularity containers: ignored")
logger.run_info("\n".join(dag.stats()))
else:
logger.info("Nothing to be done.")
else:
# the dryrun case
if len(dag):
logger.run_info("\n".join(dag.stats()))
else:
logger.info("Nothing to be done.")
return True
if quiet:
# in case of dryrun and quiet, just print above info and exit
return True
if not dryrun and not no_hooks:
self._onstart(logger.get_logfile())
success = scheduler.schedule()
if success:
if dryrun:
if len(dag):
logger.run_info("\n".join(dag.stats()))
logger.info(
"This was a dry-run (flag -n). The order of jobs "
"does not reflect the order of execution."
)
logger.remove_logfile()
else:
if stats:
scheduler.stats.to_json(stats)
logger.logfile_hint()
if not dryrun and not no_hooks:
self._onsuccess(logger.get_logfile())
return True
else:
if not dryrun and not no_hooks:
self._onerror(logger.get_logfile())
logger.logfile_hint()
return False
|
def execute(
self,
targets=None,
dryrun=False,
touch=False,
local_cores=1,
forcetargets=False,
forceall=False,
forcerun=None,
until=[],
omit_from=[],
prioritytargets=None,
quiet=False,
keepgoing=False,
printshellcmds=False,
printreason=False,
printdag=False,
cluster=None,
cluster_sync=None,
jobname=None,
immediate_submit=False,
ignore_ambiguity=False,
printrulegraph=False,
printfilegraph=False,
printd3dag=False,
drmaa=None,
drmaa_log_dir=None,
kubernetes=None,
tibanna=None,
tibanna_sfn=None,
precommand="",
container_image=None,
stats=None,
force_incomplete=False,
ignore_incomplete=False,
list_version_changes=False,
list_code_changes=False,
list_input_changes=False,
list_params_changes=False,
list_untracked=False,
list_conda_envs=False,
summary=False,
archive=None,
delete_all_output=False,
delete_temp_output=False,
detailed_summary=False,
latency_wait=3,
wait_for_files=None,
nolock=False,
unlock=False,
notemp=False,
nodeps=False,
cleanup_metadata=None,
cleanup_conda=False,
cleanup_shadow=False,
cleanup_scripts=True,
subsnakemake=None,
updated_files=None,
keep_target_files=False,
keep_shadow=False,
keep_remote_local=False,
allowed_rules=None,
max_jobs_per_second=None,
max_status_checks_per_second=None,
greediness=1.0,
no_hooks=False,
force_use_threads=False,
create_envs_only=False,
assume_shared_fs=True,
cluster_status=None,
report=None,
export_cwl=False,
batch=None,
keepincomplete=False,
):
self.check_localrules()
self.immediate_submit = immediate_submit
self.cleanup_scripts = cleanup_scripts
def rules(items):
return map(self._rules.__getitem__, filter(self.is_rule, items))
if keep_target_files:
def files(items):
return filterfalse(self.is_rule, items)
else:
def files(items):
relpath = lambda f: f if os.path.isabs(f) else os.path.relpath(f)
return map(relpath, filterfalse(self.is_rule, items))
if not targets:
targets = [self.first_rule] if self.first_rule is not None else list()
if prioritytargets is None:
prioritytargets = list()
if forcerun is None:
forcerun = list()
if until is None:
until = list()
if omit_from is None:
omit_from = list()
priorityrules = set(rules(prioritytargets))
priorityfiles = set(files(prioritytargets))
forcerules = set(rules(forcerun))
forcefiles = set(files(forcerun))
untilrules = set(rules(until))
untilfiles = set(files(until))
omitrules = set(rules(omit_from))
omitfiles = set(files(omit_from))
targetrules = set(
chain(
rules(targets),
filterfalse(Rule.has_wildcards, priorityrules),
filterfalse(Rule.has_wildcards, forcerules),
filterfalse(Rule.has_wildcards, untilrules),
)
)
targetfiles = set(chain(files(targets), priorityfiles, forcefiles, untilfiles))
if forcetargets:
forcefiles.update(targetfiles)
forcerules.update(targetrules)
rules = self.rules
if allowed_rules:
rules = [rule for rule in rules if rule.name in set(allowed_rules)]
if wait_for_files is not None:
try:
snakemake.io.wait_for_files(wait_for_files, latency_wait=latency_wait)
except IOError as e:
logger.error(str(e))
return False
dag = DAG(
self,
rules,
dryrun=dryrun,
targetfiles=targetfiles,
targetrules=targetrules,
# when cleaning up conda, we should enforce all possible jobs
# since their envs shall not be deleted
forceall=forceall or cleanup_conda,
forcefiles=forcefiles,
forcerules=forcerules,
priorityfiles=priorityfiles,
priorityrules=priorityrules,
untilfiles=untilfiles,
untilrules=untilrules,
omitfiles=omitfiles,
omitrules=omitrules,
ignore_ambiguity=ignore_ambiguity,
force_incomplete=force_incomplete,
ignore_incomplete=ignore_incomplete
or printdag
or printrulegraph
or printfilegraph,
notemp=notemp,
keep_remote_local=keep_remote_local,
batch=batch,
)
self.persistence = Persistence(
nolock=nolock,
dag=dag,
conda_prefix=self.conda_prefix,
singularity_prefix=self.singularity_prefix,
shadow_prefix=self.shadow_prefix,
warn_only=dryrun
or printrulegraph
or printfilegraph
or printdag
or summary
or archive
or list_version_changes
or list_code_changes
or list_input_changes
or list_params_changes
or list_untracked
or delete_all_output
or delete_temp_output,
)
if cleanup_metadata:
for f in cleanup_metadata:
self.persistence.cleanup_metadata(f)
return True
logger.info("Building DAG of jobs...")
dag.init()
dag.update_checkpoint_dependencies()
# check incomplete has to run BEFORE any call to postprocess
dag.check_incomplete()
dag.check_dynamic()
if unlock:
try:
self.persistence.cleanup_locks()
logger.info("Unlocking working directory.")
return True
except IOError:
logger.error(
"Error: Unlocking the directory {} failed. Maybe "
"you don't have the permissions?"
)
return False
try:
self.persistence.lock()
except IOError:
logger.error(
"Error: Directory cannot be locked. Please make "
"sure that no other Snakemake process is trying to create "
"the same files in the following directory:\n{}\n"
"If you are sure that no other "
"instances of snakemake are running on this directory, "
"the remaining lock was likely caused by a kill signal or "
"a power loss. It can be removed with "
"the --unlock argument.".format(os.getcwd())
)
return False
if cleanup_shadow:
self.persistence.cleanup_shadow()
return True
if self.subworkflows and not printdag and not printrulegraph and not printfilegraph:
# backup globals
globals_backup = dict(self.globals)
# execute subworkflows
for subworkflow in self.subworkflows:
subworkflow_targets = subworkflow.targets(dag)
logger.debug(
"Files requested from subworkflow:\n {}".format(
"\n ".join(subworkflow_targets)
)
)
updated = list()
if subworkflow_targets:
logger.info("Executing subworkflow {}.".format(subworkflow.name))
if not subsnakemake(
subworkflow.snakefile,
workdir=subworkflow.workdir,
targets=subworkflow_targets,
configfiles=[subworkflow.configfile],
updated_files=updated,
):
return False
dag.updated_subworkflow_files.update(
subworkflow.target(f) for f in updated
)
else:
logger.info(
"Subworkflow {}: Nothing to be done.".format(subworkflow.name)
)
if self.subworkflows:
logger.info("Executing main workflow.")
# rescue globals
self.globals.update(globals_backup)
dag.postprocess()
# deactivate IOCache such that from now on we always get updated
# size, existence and mtime information
# ATTENTION: this may never be removed without really good reason.
# Otherwise weird things may happen.
self.iocache.deactivate()
# clear and deactivate persistence cache, from now on we want to see updates
self.persistence.deactivate_cache()
if nodeps:
missing_input = [
f
for job in dag.targetjobs
for f in job.input
if dag.needrun(job) and not os.path.exists(f)
]
if missing_input:
logger.error(
"Dependency resolution disabled (--nodeps) "
"but missing input "
"files detected. If this happens on a cluster, please make sure "
"that you handle the dependencies yourself or turn off "
"--immediate-submit. Missing input files:\n{}".format(
"\n".join(missing_input)
)
)
return False
updated_files.extend(f for job in dag.needrun_jobs for f in job.output)
if export_cwl:
from snakemake.cwl import dag_to_cwl
import json
with open(export_cwl, "w") as cwl:
json.dump(dag_to_cwl(dag), cwl, indent=4)
return True
elif report:
from snakemake.report import auto_report
auto_report(dag, report)
return True
elif printd3dag:
dag.d3dag()
return True
elif printdag:
print(dag)
return True
elif printrulegraph:
print(dag.rule_dot())
return True
elif printfilegraph:
print(dag.filegraph_dot())
return True
elif summary:
print("\n".join(dag.summary(detailed=False)))
return True
elif detailed_summary:
print("\n".join(dag.summary(detailed=True)))
return True
elif archive:
dag.archive(archive)
return True
elif delete_all_output:
dag.clean(only_temp=False, dryrun=dryrun)
return True
elif delete_temp_output:
dag.clean(only_temp=True, dryrun=dryrun)
return True
elif list_version_changes:
items = list(chain(*map(self.persistence.version_changed, dag.jobs)))
if items:
print(*items, sep="\n")
return True
elif list_code_changes:
items = list(chain(*map(self.persistence.code_changed, dag.jobs)))
for j in dag.jobs:
items.extend(list(j.outputs_older_than_script_or_notebook()))
if items:
print(*items, sep="\n")
return True
elif list_input_changes:
items = list(chain(*map(self.persistence.input_changed, dag.jobs)))
if items:
print(*items, sep="\n")
return True
elif list_params_changes:
items = list(chain(*map(self.persistence.params_changed, dag.jobs)))
if items:
print(*items, sep="\n")
return True
elif list_untracked:
dag.list_untracked()
return True
if self.use_singularity:
if assume_shared_fs:
dag.pull_container_imgs(
dryrun=dryrun or list_conda_envs, quiet=list_conda_envs
)
if self.use_conda:
if assume_shared_fs:
dag.create_conda_envs(
dryrun=dryrun or list_conda_envs or cleanup_conda,
quiet=list_conda_envs,
)
if create_envs_only:
return True
if list_conda_envs:
print("environment", "container", "location", sep="\t")
for env in set(job.conda_env for job in dag.jobs):
if env:
print(
simplify_path(env.file),
env.container_img_url or "",
simplify_path(env.path),
sep="\t",
)
return True
if cleanup_conda:
self.persistence.cleanup_conda()
return True
scheduler = JobScheduler(
self,
dag,
self.cores,
local_cores=local_cores,
dryrun=dryrun,
touch=touch,
cluster=cluster,
cluster_status=cluster_status,
cluster_config=cluster_config,
cluster_sync=cluster_sync,
jobname=jobname,
max_jobs_per_second=max_jobs_per_second,
max_status_checks_per_second=max_status_checks_per_second,
quiet=quiet,
keepgoing=keepgoing,
drmaa=drmaa,
drmaa_log_dir=drmaa_log_dir,
kubernetes=kubernetes,
tibanna=tibanna,
tibanna_sfn=tibanna_sfn,
precommand=precommand,
container_image=container_image,
printreason=printreason,
printshellcmds=printshellcmds,
latency_wait=latency_wait,
greediness=greediness,
force_use_threads=force_use_threads,
assume_shared_fs=assume_shared_fs,
keepincomplete=keepincomplete,
)
if not dryrun:
if len(dag):
shell_exec = shell.get_executable()
if shell_exec is not None:
logger.info("Using shell: {}".format(shell_exec))
if cluster or cluster_sync or drmaa:
logger.resources_info("Provided cluster nodes: {}".format(self.nodes))
else:
warning = (
"" if self.cores > 1 else " (use --cores to define parallelism)"
)
logger.resources_info(
"Provided cores: {}{}".format(self.cores, warning)
)
logger.resources_info(
"Rules claiming more threads will be scaled down."
)
provided_resources = format_resources(self.global_resources)
if provided_resources:
logger.resources_info("Provided resources: " + provided_resources)
if self.run_local and any(rule.group for rule in self.rules):
logger.info("Group jobs: inactive (local execution)")
if not self.use_conda and any(rule.conda_env for rule in self.rules):
logger.info("Conda environments: ignored")
if not self.use_singularity and any(
rule.container_img for rule in self.rules
):
logger.info("Singularity containers: ignored")
logger.run_info("\n".join(dag.stats()))
else:
logger.info("Nothing to be done.")
else:
# the dryrun case
if len(dag):
logger.run_info("\n".join(dag.stats()))
else:
logger.info("Nothing to be done.")
return True
if quiet:
# in case of dryrun and quiet, just print above info and exit
return True
if not dryrun and not no_hooks:
self._onstart(logger.get_logfile())
success = scheduler.schedule()
if success:
if dryrun:
if len(dag):
logger.run_info("\n".join(dag.stats()))
logger.info(
"This was a dry-run (flag -n). The order of jobs "
"does not reflect the order of execution."
)
logger.remove_logfile()
else:
if stats:
scheduler.stats.to_json(stats)
logger.logfile_hint()
if not dryrun and not no_hooks:
self._onsuccess(logger.get_logfile())
return True
else:
if not dryrun and not no_hooks:
self._onerror(logger.get_logfile())
logger.logfile_hint()
return False
|
https://github.com/snakemake/snakemake/issues/24
|
Traceback (most recent call last):
File "/mypath/miniconda/envs/mydef/lib/python3.7/site-packages/snakemake/__init__.py", line 611, in snakemake
export_cwl=export_cwl,
File "/mypath/miniconda/envs/mydef/lib/python3.7/site-packages/snakemake/workflow.py", line 558, in execute
updated_files=updated,
File "/mypath/miniconda/envs/mydef/lib/python3.7/site-packages/snakemake/__init__.py", line 389, in snakemake
overwrite_config.update(load_configfile(f))
File "/mypath/miniconda/envs/mydef/lib/python3.7/site-packages/snakemake/io.py", line 1303, in load_configfile
config = _load_configfile(configpath)
File "/mypath/miniconda/envs/mydef/lib/python3.7/site-packages/snakemake/io.py", line 1265, in _load_configfile
with open(configpath) as f:
TypeError: expected str, bytes or os.PathLike object, not NoneType
|
TypeError
|
def _get_provenance_hash(self, job: Job):
"""
Recursively calculate hash for the output of the given job
and all upstream jobs in a blockchain fashion.
This is based on an idea of Sven Nahnsen.
Fails if job has more than one output file. The reason is that there
is no way to generate a per-output file hash without generating the files.
This hash, however, shall work without having to generate the files,
just by describing all steps down to a given job.
"""
if job in self._hashes:
return self._hashes[job]
workflow = job.dag.workflow
h = hashlib.sha256()
# Hash shell command or script.
if job.is_shell:
# We cannot use the formatted shell command, because it also contains threads,
# resources, and filenames (which shall be irrelevant for the hash).
h.update(job.rule.shellcmd.encode())
elif job.is_script:
_, source, _ = script.get_source(job.rule.script)
h.update(source.encode())
elif job.is_wrapper:
_, source, _ = script.get_source(
wrapper.get_script(job.rule.wrapper, prefix=workflow.wrapper_prefix)
)
h.update(source.encode())
# Hash params.
for key, value in sorted(job.params._allitems()):
h.update(key.encode())
# If this raises a TypeError, we cannot calculate a reliable hash.
h.update(json.dumps(value, sort_keys=True).encode())
# Hash input files that are not generated by other jobs.
for f in job.input:
if not any(f in depfiles for depfiles in job.dag.dependencies[job].values()):
with open(f, "b") as f:
# Read and update hash string value in blocks of 4K
for byte_block in iter(lambda: f.read(4096), b""):
h.update(byte_block)
# Hash used containers or conda environments.
if workflow.use_conda and job.conda_env:
if workflow.use_singularity and job.conda_env.singularity_img_url:
h.update(job.conda_env.singularity_img_url.encode())
h.update(job.conda_env.content)
elif workflow.use_singularity and job.singularity_img_url:
h.update(job.singularity_img_url.encode())
# Generate hashes of dependencies, and add them in a blockchain fashion (as input to the current hash).
for dep_hash in sorted(
self._get_provenance_hash(dep) for dep in set(job.dag.dependencies[job].keys())
):
h.update(dep_hash.encode())
provenance_hash = h.hexdigest()
# Store for re-use.
self._hashes[job] = provenance_hash
return provenance_hash
|
def _get_provenance_hash(self, job: Job):
"""
Recursively calculate hash for the output of the given job
and all upstream jobs in a blockchain fashion.
This is based on an idea of Sven Nahnsen.
Fails if job has more than one output file. The reason is that there
is no way to generate a per-output file hash without generating the files.
This hash, however, shall work without having to generate the files,
just by describing all steps down to a given job.
"""
if job in self._hashes:
return self._hashes[job]
workflow = job.dag.workflow
h = hashlib.sha256()
# Hash shell command or script.
if job.is_shell:
# We cannot use the formatted shell command, because it also contains threads,
# resources, and filenames (which shall be irrelevant for the hash).
h.update(job.rule.shellcmd.encode())
elif job.is_script:
_, source, _ = script.get_source(job.rule.script)
h.update(source.encode())
elif job.is_wrapper:
_, source, _ = script.get_source(
wrapper.get_script(job.rule.wrapper, prefix=workflow.wrapper_prefix)
)
h.update(source.encode())
# Hash params.
for key, value in sorted(job.params.allitems()):
h.update(key.encode())
# If this raises a TypeError, we cannot calculate a reliable hash.
h.update(json.dumps(value, sort_keys=True).encode())
# Hash input files that are not generated by other jobs.
for f in job.input:
if not any(f in depfiles for depfiles in job.dag.dependencies[job].values()):
with open(f, "b") as f:
# Read and update hash string value in blocks of 4K
for byte_block in iter(lambda: f.read(4096), b""):
h.update(byte_block)
# Hash used containers or conda environments.
if workflow.use_conda and job.conda_env:
if workflow.use_singularity and job.conda_env.singularity_img_url:
h.update(job.conda_env.singularity_img_url.encode())
h.update(job.conda_env.content)
elif workflow.use_singularity and job.singularity_img_url:
h.update(job.singularity_img_url.encode())
# Generate hashes of dependencies, and add them in a blockchain fashion (as input to the current hash).
for dep_hash in sorted(
self._get_provenance_hash(dep) for dep in set(job.dag.dependencies[job].keys())
):
h.update(dep_hash.encode())
provenance_hash = h.hexdigest()
# Store for re-use.
self._hashes[job] = provenance_hash
return provenance_hash
|
https://github.com/snakemake/snakemake/issues/143
|
snakemake --report
Building DAG of jobs...
Creating report...
Adding foo.rst (0 MB).
Could not detect mimetype for foo.rst, assuming text/plain.
Traceback (most recent call last):
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/__init__.py", line 618, in snakemake
batch=batch,
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/workflow.py", line 617, in execute
auto_report(dag, report)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 552, in auto_report
FileRecord(f, job, report_obj.caption, env, category)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 326, in __init__
self.params = logging.format_dict(job.params)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/logging.py", line 384, in format_dict
for name, value in dict.items()
TypeError: 'str' object is not callable
|
TypeError
|
def job_to_cwl(job, dag, outputs, inputs):
"""Convert a job with its dependencies to a CWL workflow step."""
if job.dynamic_output:
raise WorkflowError("Dynamic output is not supported by CWL conversion.")
for f in job.output:
if os.path.isabs(f):
raise WorkflowError(
"All output files have to be relative to the working directory."
)
get_output_id = lambda job, i: "#main/job-{}/{}".format(job.jobid, i)
dep_ids = {
o: get_output_id(dep, i)
for dep, files in dag.dependencies[job].items()
for i, o in enumerate(dep.output)
if o in files
}
files = [f for f in job.input if f not in dep_ids]
if job.conda_env_file:
files.add(os.path.relpath(job.conda_env_file))
out = [get_output_id(job, i) for i, _ in enumerate(job.output)]
def workdir_entry(i, f):
location = "??inputs.input_files[{}].location??".format(i)
if f.is_directory:
entry = {
"class": "Directory",
"basename": os.path.basename(f),
"location": location,
}
else:
entry = {
"class": "File",
"basename": os.path.basename(f),
"location": location,
}
return "$({})".format(
json.dumps(outer_entry(f, entry)).replace('"??', "").replace('??"', "")
).replace('"', "'")
def outer_entry(f, entry):
parent = os.path.dirname(f)
if parent:
return outer_entry(
parent,
{
"class": "Directory",
"basename": os.path.basename(parent),
"listing": [entry],
},
)
else:
return entry
if job in dag.targetjobs:
# TODO this maps output files into the cwd after the workflow is complete.
# We need to find a way to define subdirectories though. Otherwise,
# there can be name clashes, and it will also become very crowded.
outputs.append(
{
"type": {"type": "array", "items": "File"},
"outputSource": "#main/job-{}/output_files".format(job.jobid),
"id": "#main/output/job-{}".format(job.jobid),
}
)
cwl = {
"run": "#snakemake-job",
"requirements": {
"InitialWorkDirRequirement": {
"listing": [
{"writable": True, "entry": workdir_entry(i, f)}
for i, f in enumerate(
chain(
files,
(f for dep in dag.dependencies[job] for f in dep.output),
)
)
]
}
},
"in": {
"cores": {"default": job.threads},
"target_files": {"default": job.output._plainstrings()},
"rules": {"default": [job.rule.name]},
},
"out": ["output_files"],
"id": "#main/job-{}".format(job.jobid),
}
if files:
inputs.append(
{
"type": {"type": "array", "items": "File"},
"default": [{"class": "File", "location": f} for f in files],
"id": "#main/input/job-{}".format(job.jobid),
}
)
input_files = []
if files:
input_files.append("#main/input/job-{}".format(job.jobid))
input_files.extend(
"#main/job-{}/output_files".format(dep.jobid) for dep in dag.dependencies[job]
)
cwl["in"]["input_files"] = {"source": input_files, "linkMerge": "merge_flattened"}
return cwl
|
def job_to_cwl(job, dag, outputs, inputs):
"""Convert a job with its dependencies to a CWL workflow step."""
if job.dynamic_output:
raise WorkflowError("Dynamic output is not supported by CWL conversion.")
for f in job.output:
if os.path.isabs(f):
raise WorkflowError(
"All output files have to be relative to the working directory."
)
get_output_id = lambda job, i: "#main/job-{}/{}".format(job.jobid, i)
dep_ids = {
o: get_output_id(dep, i)
for dep, files in dag.dependencies[job].items()
for i, o in enumerate(dep.output)
if o in files
}
files = [f for f in job.input if f not in dep_ids]
if job.conda_env_file:
files.add(os.path.relpath(job.conda_env_file))
out = [get_output_id(job, i) for i, _ in enumerate(job.output)]
def workdir_entry(i, f):
location = "??inputs.input_files[{}].location??".format(i)
if f.is_directory:
entry = {
"class": "Directory",
"basename": os.path.basename(f),
"location": location,
}
else:
entry = {
"class": "File",
"basename": os.path.basename(f),
"location": location,
}
return "$({})".format(
json.dumps(outer_entry(f, entry)).replace('"??', "").replace('??"', "")
).replace('"', "'")
def outer_entry(f, entry):
parent = os.path.dirname(f)
if parent:
return outer_entry(
parent,
{
"class": "Directory",
"basename": os.path.basename(parent),
"listing": [entry],
},
)
else:
return entry
if job in dag.targetjobs:
# TODO this maps output files into the cwd after the workflow is complete.
# We need to find a way to define subdirectories though. Otherwise,
# there can be name clashes, and it will also become very crowded.
outputs.append(
{
"type": {"type": "array", "items": "File"},
"outputSource": "#main/job-{}/output_files".format(job.jobid),
"id": "#main/output/job-{}".format(job.jobid),
}
)
cwl = {
"run": "#snakemake-job",
"requirements": {
"InitialWorkDirRequirement": {
"listing": [
{"writable": True, "entry": workdir_entry(i, f)}
for i, f in enumerate(
chain(
files,
(f for dep in dag.dependencies[job] for f in dep.output),
)
)
]
}
},
"in": {
"cores": {"default": job.threads},
"target_files": {"default": job.output.plainstrings()},
"rules": {"default": [job.rule.name]},
},
"out": ["output_files"],
"id": "#main/job-{}".format(job.jobid),
}
if files:
inputs.append(
{
"type": {"type": "array", "items": "File"},
"default": [{"class": "File", "location": f} for f in files],
"id": "#main/input/job-{}".format(job.jobid),
}
)
input_files = []
if files:
input_files.append("#main/input/job-{}".format(job.jobid))
input_files.extend(
"#main/job-{}/output_files".format(dep.jobid) for dep in dag.dependencies[job]
)
cwl["in"]["input_files"] = {"source": input_files, "linkMerge": "merge_flattened"}
return cwl
|
https://github.com/snakemake/snakemake/issues/143
|
snakemake --report
Building DAG of jobs...
Creating report...
Adding foo.rst (0 MB).
Could not detect mimetype for foo.rst, assuming text/plain.
Traceback (most recent call last):
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/__init__.py", line 618, in snakemake
batch=batch,
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/workflow.py", line 617, in execute
auto_report(dag, report)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 552, in auto_report
FileRecord(f, job, report_obj.caption, env, category)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 326, in __init__
self.params = logging.format_dict(job.params)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/logging.py", line 384, in format_dict
for name, value in dict.items()
TypeError: 'str' object is not callable
|
TypeError
|
def sleep():
# do not sleep on CI. In that case we just want to quickly test everything.
if os.environ.get("CI") != "true":
time.sleep(10)
|
def sleep():
# do not sleep on CI. In that case we just want to quickly test everything.
if os.environ.get("CIRCLECI") != "true":
time.sleep(10)
|
https://github.com/snakemake/snakemake/issues/143
|
snakemake --report
Building DAG of jobs...
Creating report...
Adding foo.rst (0 MB).
Could not detect mimetype for foo.rst, assuming text/plain.
Traceback (most recent call last):
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/__init__.py", line 618, in snakemake
batch=batch,
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/workflow.py", line 617, in execute
auto_report(dag, report)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 552, in auto_report
FileRecord(f, job, report_obj.caption, env, category)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 326, in __init__
self.params = logging.format_dict(job.params)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/logging.py", line 384, in format_dict
for name, value in dict.items()
TypeError: 'str' object is not callable
|
TypeError
|
def job_args_and_prepare(self, job):
job.prepare()
conda_env = job.conda_env_path
singularity_img = job.singularity_img_path
benchmark = None
benchmark_repeats = job.benchmark_repeats or 1
if job.benchmark is not None:
benchmark = str(job.benchmark)
return (
job.rule,
job.input._plainstrings(),
job.output._plainstrings(),
job.params,
job.wildcards,
job.threads,
job.resources,
job.log._plainstrings(),
benchmark,
benchmark_repeats,
conda_env,
singularity_img,
self.workflow.singularity_args,
self.workflow.use_singularity,
self.workflow.linemaps,
self.workflow.debug,
self.workflow.cleanup_scripts,
job.shadow_dir,
job.jobid,
)
|
def job_args_and_prepare(self, job):
job.prepare()
conda_env = job.conda_env_path
singularity_img = job.singularity_img_path
benchmark = None
benchmark_repeats = job.benchmark_repeats or 1
if job.benchmark is not None:
benchmark = str(job.benchmark)
return (
job.rule,
job.input.plainstrings(),
job.output.plainstrings(),
job.params,
job.wildcards,
job.threads,
job.resources,
job.log.plainstrings(),
benchmark,
benchmark_repeats,
conda_env,
singularity_img,
self.workflow.singularity_args,
self.workflow.use_singularity,
self.workflow.linemaps,
self.workflow.debug,
self.workflow.cleanup_scripts,
job.shadow_dir,
job.jobid,
)
|
https://github.com/snakemake/snakemake/issues/143
|
snakemake --report
Building DAG of jobs...
Creating report...
Adding foo.rst (0 MB).
Could not detect mimetype for foo.rst, assuming text/plain.
Traceback (most recent call last):
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/__init__.py", line 618, in snakemake
batch=batch,
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/workflow.py", line 617, in execute
auto_report(dag, report)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 552, in auto_report
FileRecord(f, job, report_obj.caption, env, category)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 326, in __init__
self.params = logging.format_dict(job.params)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/logging.py", line 384, in format_dict
for name, value in dict.items()
TypeError: 'str' object is not callable
|
TypeError
|
def __init__(
self,
toclone=None,
fromdict=None,
plainstr=False,
strip_constraints=False,
custom_map=None,
):
"""
Create the object.
Arguments
toclone -- another Namedlist that shall be cloned
fromdict -- a dict that shall be converted to a
Namedlist (keys become names)
"""
list.__init__(self)
self._names = dict()
if toclone:
if custom_map is not None:
self.extend(map(custom_map, toclone))
elif plainstr:
self.extend(map(str, toclone))
elif strip_constraints:
self.extend(map(strip_wildcard_constraints, toclone))
else:
self.extend(toclone)
if isinstance(toclone, Namedlist):
self._take_names(toclone._get_names())
if fromdict:
for key, item in fromdict.items():
self.append(item)
self._add_name(key)
|
def __init__(
self,
toclone=None,
fromdict=None,
plainstr=False,
strip_constraints=False,
custom_map=None,
):
"""
Create the object.
Arguments
toclone -- another Namedlist that shall be cloned
fromdict -- a dict that shall be converted to a
Namedlist (keys become names)
"""
list.__init__(self)
self._names = dict()
if toclone:
if custom_map is not None:
self.extend(map(custom_map, toclone))
elif plainstr:
self.extend(map(str, toclone))
elif strip_constraints:
self.extend(map(strip_wildcard_constraints, toclone))
else:
self.extend(toclone)
if isinstance(toclone, Namedlist):
self.take_names(toclone.get_names())
if fromdict:
for key, item in fromdict.items():
self.append(item)
self.add_name(key)
|
https://github.com/snakemake/snakemake/issues/143
|
snakemake --report
Building DAG of jobs...
Creating report...
Adding foo.rst (0 MB).
Could not detect mimetype for foo.rst, assuming text/plain.
Traceback (most recent call last):
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/__init__.py", line 618, in snakemake
batch=batch,
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/workflow.py", line 617, in execute
auto_report(dag, report)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 552, in auto_report
FileRecord(f, job, report_obj.caption, env, category)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 326, in __init__
self.params = logging.format_dict(job.params)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/logging.py", line 384, in format_dict
for name, value in dict.items()
TypeError: 'str' object is not callable
|
TypeError
|
def keys(self):
return self._names.keys()
|
def keys(self):
return self._names
|
https://github.com/snakemake/snakemake/issues/143
|
snakemake --report
Building DAG of jobs...
Creating report...
Adding foo.rst (0 MB).
Could not detect mimetype for foo.rst, assuming text/plain.
Traceback (most recent call last):
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/__init__.py", line 618, in snakemake
batch=batch,
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/workflow.py", line 617, in execute
auto_report(dag, report)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 552, in auto_report
FileRecord(f, job, report_obj.caption, env, category)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 326, in __init__
self.params = logging.format_dict(job.params)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/logging.py", line 384, in format_dict
for name, value in dict.items()
TypeError: 'str' object is not callable
|
TypeError
|
def format_dict(dict_like, omit_keys=[], omit_values=[]):
from snakemake.io import Namedlist
if isinstance(dict_like, Namedlist):
items = dict_like.items()
elif isinstance(dict_like, dict):
items = dict_like.items()
else:
raise ValueError(
"bug: format_dict applied to something neither a dict nor a Namedlist"
)
return ", ".join(
"{}={}".format(name, str(value))
for name, value in items
if name not in omit_keys and value not in omit_values
)
|
def format_dict(dict, omit_keys=[], omit_values=[]):
return ", ".join(
"{}={}".format(name, str(value))
for name, value in dict.items()
if name not in omit_keys and value not in omit_values
)
|
https://github.com/snakemake/snakemake/issues/143
|
snakemake --report
Building DAG of jobs...
Creating report...
Adding foo.rst (0 MB).
Could not detect mimetype for foo.rst, assuming text/plain.
Traceback (most recent call last):
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/__init__.py", line 618, in snakemake
batch=batch,
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/workflow.py", line 617, in execute
auto_report(dag, report)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 552, in auto_report
FileRecord(f, job, report_obj.caption, env, category)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 326, in __init__
self.params = logging.format_dict(job.params)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/logging.py", line 384, in format_dict
for name, value in dict.items()
TypeError: 'str' object is not callable
|
TypeError
|
def dynamic_branch(self, wildcards, input=True):
def get_io(rule):
return (
(rule.input, rule.dynamic_input)
if input
else (rule.output, rule.dynamic_output)
)
def partially_expand(f, wildcards):
"""Expand the wildcards in f from the ones present in wildcards
This is done by replacing all wildcard delimiters by `{{` or `}}`
that are not in `wildcards.keys()`.
"""
# perform the partial expansion from f's string representation
s = str(f).replace("{", "{{").replace("}", "}}")
for key in wildcards.keys():
s = s.replace("{{{{{}}}}}".format(key), "{{{}}}".format(key))
# build result
anno_s = AnnotatedString(s)
anno_s.flags = f.flags
return IOFile(anno_s, f.rule)
io, dynamic_io = get_io(self)
branch = Rule(self)
io_, dynamic_io_ = get_io(branch)
expansion = collections.defaultdict(list)
for i, f in enumerate(io):
if f in dynamic_io:
f = partially_expand(f, wildcards)
try:
for e in reversed(expand(str(f), zip, **wildcards)):
# need to clone the flags so intermediate
# dynamic remote file paths are expanded and
# removed appropriately
ioFile = IOFile(e, rule=branch)
ioFile.clone_flags(f)
expansion[i].append(ioFile)
except KeyError:
return None
# replace the dynamic files with the expanded files
replacements = [(i, io[i], e) for i, e in reversed(list(expansion.items()))]
for i, old, exp in replacements:
dynamic_io_.remove(old)
io_._insert_items(i, exp)
if not input:
for i, old, exp in replacements:
if old in branch.temp_output:
branch.temp_output.discard(old)
branch.temp_output.update(exp)
if old in branch.protected_output:
branch.protected_output.discard(old)
branch.protected_output.update(exp)
if old in branch.touch_output:
branch.touch_output.discard(old)
branch.touch_output.update(exp)
branch.wildcard_names.clear()
non_dynamic_wildcards = dict(
(name, values[0])
for name, values in wildcards.items()
if len(set(values)) == 1
)
# TODO have a look into how to concretize dependencies here
branch._input, _, branch.dependencies = branch.expand_input(
non_dynamic_wildcards
)
branch._output, _ = branch.expand_output(non_dynamic_wildcards)
resources = branch.expand_resources(non_dynamic_wildcards, branch._input, 1)
branch._params = branch.expand_params(
non_dynamic_wildcards,
branch._input,
branch._output,
resources,
omit_callable=True,
)
branch.resources = dict(resources.items())
branch._log = branch.expand_log(non_dynamic_wildcards)
branch._benchmark = branch.expand_benchmark(non_dynamic_wildcards)
branch._conda_env = branch.expand_conda_env(non_dynamic_wildcards)
return branch, non_dynamic_wildcards
return branch
|
def dynamic_branch(self, wildcards, input=True):
def get_io(rule):
return (
(rule.input, rule.dynamic_input)
if input
else (rule.output, rule.dynamic_output)
)
def partially_expand(f, wildcards):
"""Expand the wildcards in f from the ones present in wildcards
This is done by replacing all wildcard delimiters by `{{` or `}}`
that are not in `wildcards.keys()`.
"""
# perform the partial expansion from f's string representation
s = str(f).replace("{", "{{").replace("}", "}}")
for key in wildcards.keys():
s = s.replace("{{{{{}}}}}".format(key), "{{{}}}".format(key))
# build result
anno_s = AnnotatedString(s)
anno_s.flags = f.flags
return IOFile(anno_s, f.rule)
io, dynamic_io = get_io(self)
branch = Rule(self)
io_, dynamic_io_ = get_io(branch)
expansion = collections.defaultdict(list)
for i, f in enumerate(io):
if f in dynamic_io:
f = partially_expand(f, wildcards)
try:
for e in reversed(expand(str(f), zip, **wildcards)):
# need to clone the flags so intermediate
# dynamic remote file paths are expanded and
# removed appropriately
ioFile = IOFile(e, rule=branch)
ioFile.clone_flags(f)
expansion[i].append(ioFile)
except KeyError:
return None
# replace the dynamic files with the expanded files
replacements = [(i, io[i], e) for i, e in reversed(list(expansion.items()))]
for i, old, exp in replacements:
dynamic_io_.remove(old)
io_.insert_items(i, exp)
if not input:
for i, old, exp in replacements:
if old in branch.temp_output:
branch.temp_output.discard(old)
branch.temp_output.update(exp)
if old in branch.protected_output:
branch.protected_output.discard(old)
branch.protected_output.update(exp)
if old in branch.touch_output:
branch.touch_output.discard(old)
branch.touch_output.update(exp)
branch.wildcard_names.clear()
non_dynamic_wildcards = dict(
(name, values[0])
for name, values in wildcards.items()
if len(set(values)) == 1
)
# TODO have a look into how to concretize dependencies here
branch._input, _, branch.dependencies = branch.expand_input(
non_dynamic_wildcards
)
branch._output, _ = branch.expand_output(non_dynamic_wildcards)
resources = branch.expand_resources(non_dynamic_wildcards, branch._input, 1)
branch._params = branch.expand_params(
non_dynamic_wildcards,
branch._input,
branch._output,
resources,
omit_callable=True,
)
branch.resources = dict(resources.items())
branch._log = branch.expand_log(non_dynamic_wildcards)
branch._benchmark = branch.expand_benchmark(non_dynamic_wildcards)
branch._conda_env = branch.expand_conda_env(non_dynamic_wildcards)
return branch, non_dynamic_wildcards
return branch
|
https://github.com/snakemake/snakemake/issues/143
|
snakemake --report
Building DAG of jobs...
Creating report...
Adding foo.rst (0 MB).
Could not detect mimetype for foo.rst, assuming text/plain.
Traceback (most recent call last):
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/__init__.py", line 618, in snakemake
batch=batch,
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/workflow.py", line 617, in execute
auto_report(dag, report)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 552, in auto_report
FileRecord(f, job, report_obj.caption, env, category)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 326, in __init__
self.params = logging.format_dict(job.params)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/logging.py", line 384, in format_dict
for name, value in dict.items()
TypeError: 'str' object is not callable
|
TypeError
|
def check_output_duplicates(self):
"""Check ``Namedlist`` for duplicate entries and raise a ``WorkflowError``
on problems.
"""
seen = dict()
idx = None
for name, value in self.output._allitems():
if name is None:
if idx is None:
idx = 0
else:
idx += 1
if value in seen:
raise WorkflowError(
"Duplicate output file pattern in rule {}. First two "
"duplicate for entries {} and {}".format(
self.name, seen[value], name or idx
)
)
seen[value] = name or idx
|
def check_output_duplicates(self):
"""Check ``Namedlist`` for duplicate entries and raise a ``WorkflowError``
on problems.
"""
seen = dict()
idx = None
for name, value in self.output.allitems():
if name is None:
if idx is None:
idx = 0
else:
idx += 1
if value in seen:
raise WorkflowError(
"Duplicate output file pattern in rule {}. First two "
"duplicate for entries {} and {}".format(
self.name, seen[value], name or idx
)
)
seen[value] = name or idx
|
https://github.com/snakemake/snakemake/issues/143
|
snakemake --report
Building DAG of jobs...
Creating report...
Adding foo.rst (0 MB).
Could not detect mimetype for foo.rst, assuming text/plain.
Traceback (most recent call last):
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/__init__.py", line 618, in snakemake
batch=batch,
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/workflow.py", line 617, in execute
auto_report(dag, report)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 552, in auto_report
FileRecord(f, job, report_obj.caption, env, category)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 326, in __init__
self.params = logging.format_dict(job.params)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/logging.py", line 384, in format_dict
for name, value in dict.items()
TypeError: 'str' object is not callable
|
TypeError
|
def _set_inoutput_item(self, item, output=False, name=None):
"""
Set an item to be input or output.
Arguments
item -- the item
inoutput -- a Namedlist of either input or output items
name -- an optional name for the item
"""
inoutput = self.output if output else self.input
# Check to see if the item is a path, if so, just make it a string
if isinstance(item, Path):
item = str(item)
if isinstance(item, str):
item = self.apply_default_remote(item)
# Check to see that all flags are valid
# Note that "remote", "dynamic", and "expand" are valid for both inputs and outputs.
if isinstance(item, AnnotatedString):
for flag in item.flags:
if not output and flag in [
"protected",
"temp",
"temporary",
"directory",
"touch",
"pipe",
]:
logger.warning(
"The flag '{}' used in rule {} is only valid for outputs, not inputs.".format(
flag, self
)
)
if output and flag in ["ancient"]:
logger.warning(
"The flag '{}' used in rule {} is only valid for inputs, not outputs.".format(
flag, self
)
)
# add the rule to the dependencies
if isinstance(item, _IOFile) and item.rule and item in item.rule.output:
self.dependencies[item] = item.rule
if output:
item = self._update_item_wildcard_constraints(item)
else:
if (
contains_wildcard_constraints(item)
and self.workflow.mode != Mode.subprocess
):
logger.warning("Wildcard constraints in inputs are ignored.")
# record rule if this is an output file output
_item = IOFile(item, rule=self)
if is_flagged(item, "temp"):
if output:
self.temp_output.add(_item)
if is_flagged(item, "protected"):
if output:
self.protected_output.add(_item)
if is_flagged(item, "touch"):
if output:
self.touch_output.add(_item)
if is_flagged(item, "dynamic"):
if output:
self.dynamic_output.add(_item)
else:
self.dynamic_input.add(_item)
if is_flagged(item, "report"):
report_obj = item.flags["report"]
if report_obj.caption is not None:
r = ReportObject(
os.path.join(self.workflow.current_basedir, report_obj.caption),
report_obj.category,
)
item.flags["report"] = r
if is_flagged(item, "subworkflow"):
if output:
raise SyntaxError("Only input files may refer to a subworkflow")
else:
# record the workflow this item comes from
sub = item.flags["subworkflow"]
if _item in self.subworkflow_input:
other = self.subworkflow_input[_item]
if sub != other:
raise WorkflowError(
"The input file {} is ambiguously "
"associated with two subworkflows "
"{} and {}.".format(item, sub, other),
rule=self,
)
self.subworkflow_input[_item] = sub
inoutput.append(_item)
if name:
inoutput._add_name(name)
elif callable(item):
if output:
raise SyntaxError("Only input files can be specified as functions")
inoutput.append(item)
if name:
inoutput._add_name(name)
else:
try:
start = len(inoutput)
for i in item:
self._set_inoutput_item(i, output=output)
if name:
# if the list was named, make it accessible
inoutput._set_name(name, start, end=len(inoutput))
except TypeError:
raise SyntaxError(
"Input and output files have to be specified as strings or lists of strings."
)
|
def _set_inoutput_item(self, item, output=False, name=None):
"""
Set an item to be input or output.
Arguments
item -- the item
inoutput -- a Namedlist of either input or output items
name -- an optional name for the item
"""
inoutput = self.output if output else self.input
# Check to see if the item is a path, if so, just make it a string
if isinstance(item, Path):
item = str(item)
if isinstance(item, str):
item = self.apply_default_remote(item)
# Check to see that all flags are valid
# Note that "remote", "dynamic", and "expand" are valid for both inputs and outputs.
if isinstance(item, AnnotatedString):
for flag in item.flags:
if not output and flag in [
"protected",
"temp",
"temporary",
"directory",
"touch",
"pipe",
]:
logger.warning(
"The flag '{}' used in rule {} is only valid for outputs, not inputs.".format(
flag, self
)
)
if output and flag in ["ancient"]:
logger.warning(
"The flag '{}' used in rule {} is only valid for inputs, not outputs.".format(
flag, self
)
)
# add the rule to the dependencies
if isinstance(item, _IOFile) and item.rule and item in item.rule.output:
self.dependencies[item] = item.rule
if output:
item = self._update_item_wildcard_constraints(item)
else:
if (
contains_wildcard_constraints(item)
and self.workflow.mode != Mode.subprocess
):
logger.warning("Wildcard constraints in inputs are ignored.")
# record rule if this is an output file output
_item = IOFile(item, rule=self)
if is_flagged(item, "temp"):
if output:
self.temp_output.add(_item)
if is_flagged(item, "protected"):
if output:
self.protected_output.add(_item)
if is_flagged(item, "touch"):
if output:
self.touch_output.add(_item)
if is_flagged(item, "dynamic"):
if output:
self.dynamic_output.add(_item)
else:
self.dynamic_input.add(_item)
if is_flagged(item, "report"):
report_obj = item.flags["report"]
if report_obj.caption is not None:
r = ReportObject(
os.path.join(self.workflow.current_basedir, report_obj.caption),
report_obj.category,
)
item.flags["report"] = r
if is_flagged(item, "subworkflow"):
if output:
raise SyntaxError("Only input files may refer to a subworkflow")
else:
# record the workflow this item comes from
sub = item.flags["subworkflow"]
if _item in self.subworkflow_input:
other = self.subworkflow_input[_item]
if sub != other:
raise WorkflowError(
"The input file {} is ambiguously "
"associated with two subworkflows "
"{} and {}.".format(item, sub, other),
rule=self,
)
self.subworkflow_input[_item] = sub
inoutput.append(_item)
if name:
inoutput.add_name(name)
elif callable(item):
if output:
raise SyntaxError("Only input files can be specified as functions")
inoutput.append(item)
if name:
inoutput.add_name(name)
else:
try:
start = len(inoutput)
for i in item:
self._set_inoutput_item(i, output=output)
if name:
# if the list was named, make it accessible
inoutput.set_name(name, start, end=len(inoutput))
except TypeError:
raise SyntaxError(
"Input and output files have to be specified as strings or lists of strings."
)
|
https://github.com/snakemake/snakemake/issues/143
|
snakemake --report
Building DAG of jobs...
Creating report...
Adding foo.rst (0 MB).
Could not detect mimetype for foo.rst, assuming text/plain.
Traceback (most recent call last):
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/__init__.py", line 618, in snakemake
batch=batch,
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/workflow.py", line 617, in execute
auto_report(dag, report)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 552, in auto_report
FileRecord(f, job, report_obj.caption, env, category)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 326, in __init__
self.params = logging.format_dict(job.params)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/logging.py", line 384, in format_dict
for name, value in dict.items()
TypeError: 'str' object is not callable
|
TypeError
|
def _set_params_item(self, item, name=None):
self.params.append(item)
if name:
self.params._add_name(name)
|
def _set_params_item(self, item, name=None):
self.params.append(item)
if name:
self.params.add_name(name)
|
https://github.com/snakemake/snakemake/issues/143
|
snakemake --report
Building DAG of jobs...
Creating report...
Adding foo.rst (0 MB).
Could not detect mimetype for foo.rst, assuming text/plain.
Traceback (most recent call last):
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/__init__.py", line 618, in snakemake
batch=batch,
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/workflow.py", line 617, in execute
auto_report(dag, report)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 552, in auto_report
FileRecord(f, job, report_obj.caption, env, category)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 326, in __init__
self.params = logging.format_dict(job.params)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/logging.py", line 384, in format_dict
for name, value in dict.items()
TypeError: 'str' object is not callable
|
TypeError
|
def _set_log_item(self, item, name=None):
# Pathlib compatibility
if isinstance(item, Path):
item = str(item)
if isinstance(item, str) or callable(item):
if not callable(item):
item = self.apply_default_remote(item)
item = self._update_item_wildcard_constraints(item)
self.log.append(IOFile(item, rule=self) if isinstance(item, str) else item)
if name:
self.log._add_name(name)
else:
try:
start = len(self.log)
for i in item:
self._set_log_item(i)
if name:
self.log._set_name(name, start, end=len(self.log))
except TypeError:
raise SyntaxError("Log files have to be specified as strings.")
|
def _set_log_item(self, item, name=None):
# Pathlib compatibility
if isinstance(item, Path):
item = str(item)
if isinstance(item, str) or callable(item):
if not callable(item):
item = self.apply_default_remote(item)
item = self._update_item_wildcard_constraints(item)
self.log.append(IOFile(item, rule=self) if isinstance(item, str) else item)
if name:
self.log.add_name(name)
else:
try:
start = len(self.log)
for i in item:
self._set_log_item(i)
if name:
self.log.set_name(name, start, end=len(self.log))
except TypeError:
raise SyntaxError("Log files have to be specified as strings.")
|
https://github.com/snakemake/snakemake/issues/143
|
snakemake --report
Building DAG of jobs...
Creating report...
Adding foo.rst (0 MB).
Could not detect mimetype for foo.rst, assuming text/plain.
Traceback (most recent call last):
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/__init__.py", line 618, in snakemake
batch=batch,
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/workflow.py", line 617, in execute
auto_report(dag, report)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 552, in auto_report
FileRecord(f, job, report_obj.caption, env, category)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 326, in __init__
self.params = logging.format_dict(job.params)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/logging.py", line 384, in format_dict
for name, value in dict.items()
TypeError: 'str' object is not callable
|
TypeError
|
def _apply_wildcards(
self,
newitems,
olditems,
wildcards,
concretize=None,
check_return_type=True,
omit_callable=False,
mapping=None,
no_flattening=False,
aux_params=None,
apply_default_remote=True,
incomplete_checkpoint_func=lambda e: None,
allow_unpack=True,
):
if aux_params is None:
aux_params = dict()
for name, item in olditems._allitems():
start = len(newitems)
is_unpack = is_flagged(item, "unpack")
_is_callable = is_callable(item)
if _is_callable:
if omit_callable:
continue
item, incomplete = self.apply_input_function(
item,
wildcards,
incomplete_checkpoint_func=incomplete_checkpoint_func,
is_unpack=is_unpack,
**aux_params,
)
if apply_default_remote:
item = self.apply_default_remote(item)
if is_unpack and not incomplete:
if not allow_unpack:
raise WorkflowError(
"unpack() is not allowed with params. "
"Simply return a dictionary which can be directly ."
"used, e.g. via {params[mykey]}."
)
# Sanity checks before interpreting unpack()
if not isinstance(item, (list, dict)):
raise WorkflowError("Can only use unpack() on list and dict", rule=self)
if name:
raise WorkflowError(
"Cannot combine named input file with unpack()", rule=self
)
# Allow streamlined code with/without unpack
if isinstance(item, list):
pairs = zip([None] * len(item), item)
else:
assert isinstance(item, dict)
pairs = item.items()
else:
pairs = [(name, item)]
for name, item in pairs:
is_iterable = True
if not_iterable(item) or no_flattening:
item = [item]
is_iterable = False
for item_ in item:
if check_return_type and not isinstance(item_, str):
raise WorkflowError(
"Function did not return str or list of str.", rule=self
)
concrete = concretize(item_, wildcards, _is_callable)
newitems.append(concrete)
if mapping is not None:
mapping[concrete] = item_
if name:
newitems._set_name(
name, start, end=len(newitems) if is_iterable else None
)
start = len(newitems)
|
def _apply_wildcards(
self,
newitems,
olditems,
wildcards,
concretize=None,
check_return_type=True,
omit_callable=False,
mapping=None,
no_flattening=False,
aux_params=None,
apply_default_remote=True,
incomplete_checkpoint_func=lambda e: None,
allow_unpack=True,
):
if aux_params is None:
aux_params = dict()
for name, item in olditems.allitems():
start = len(newitems)
is_unpack = is_flagged(item, "unpack")
_is_callable = is_callable(item)
if _is_callable:
if omit_callable:
continue
item, incomplete = self.apply_input_function(
item,
wildcards,
incomplete_checkpoint_func=incomplete_checkpoint_func,
is_unpack=is_unpack,
**aux_params,
)
if apply_default_remote:
item = self.apply_default_remote(item)
if is_unpack and not incomplete:
if not allow_unpack:
raise WorkflowError(
"unpack() is not allowed with params. "
"Simply return a dictionary which can be directly ."
"used, e.g. via {params[mykey]}."
)
# Sanity checks before interpreting unpack()
if not isinstance(item, (list, dict)):
raise WorkflowError("Can only use unpack() on list and dict", rule=self)
if name:
raise WorkflowError(
"Cannot combine named input file with unpack()", rule=self
)
# Allow streamlined code with/without unpack
if isinstance(item, list):
pairs = zip([None] * len(item), item)
else:
assert isinstance(item, dict)
pairs = item.items()
else:
pairs = [(name, item)]
for name, item in pairs:
is_iterable = True
if not_iterable(item) or no_flattening:
item = [item]
is_iterable = False
for item_ in item:
if check_return_type and not isinstance(item_, str):
raise WorkflowError(
"Function did not return str or list of str.", rule=self
)
concrete = concretize(item_, wildcards, _is_callable)
newitems.append(concrete)
if mapping is not None:
mapping[concrete] = item_
if name:
newitems.set_name(
name, start, end=len(newitems) if is_iterable else None
)
start = len(newitems)
|
https://github.com/snakemake/snakemake/issues/143
|
snakemake --report
Building DAG of jobs...
Creating report...
Adding foo.rst (0 MB).
Could not detect mimetype for foo.rst, assuming text/plain.
Traceback (most recent call last):
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/__init__.py", line 618, in snakemake
batch=batch,
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/workflow.py", line 617, in execute
auto_report(dag, report)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 552, in auto_report
FileRecord(f, job, report_obj.caption, env, category)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 326, in __init__
self.params = logging.format_dict(job.params)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/logging.py", line 384, in format_dict
for name, value in dict.items()
TypeError: 'str' object is not callable
|
TypeError
|
def expand_params(self, wildcards, input, output, resources, omit_callable=False):
def concretize_param(p, wildcards, is_from_callable):
if not is_from_callable:
if isinstance(p, str):
return apply_wildcards(p, wildcards)
if isinstance(p, list):
return [
(apply_wildcards(v, wildcards) if isinstance(v, str) else v)
for v in p
]
return p
params = Params()
try:
# When applying wildcards to params, the return type need not be
# a string, so the check is disabled.
self._apply_wildcards(
params,
self.params,
wildcards,
concretize=concretize_param,
check_return_type=False,
omit_callable=omit_callable,
allow_unpack=False,
no_flattening=True,
apply_default_remote=False,
aux_params={
"input": input._plainstrings(),
"resources": resources,
"output": output._plainstrings(),
"threads": resources._cores,
},
incomplete_checkpoint_func=lambda e: "<incomplete checkpoint>",
)
except WildcardError as e:
raise WildcardError(
"Wildcards in params cannot be "
"determined from output files. Note that you have "
"to use a function to deactivate automatic wildcard expansion "
"in params strings, e.g., `lambda wildcards: '{test}'`. Also "
"see https://snakemake.readthedocs.io/en/stable/snakefiles/"
"rules.html#non-file-parameters-for-rules:",
str(e),
rule=self,
)
return params
|
def expand_params(self, wildcards, input, output, resources, omit_callable=False):
def concretize_param(p, wildcards, is_from_callable):
if not is_from_callable:
if isinstance(p, str):
return apply_wildcards(p, wildcards)
if isinstance(p, list):
return [
(apply_wildcards(v, wildcards) if isinstance(v, str) else v)
for v in p
]
return p
params = Params()
try:
# When applying wildcards to params, the return type need not be
# a string, so the check is disabled.
self._apply_wildcards(
params,
self.params,
wildcards,
concretize=concretize_param,
check_return_type=False,
omit_callable=omit_callable,
allow_unpack=False,
no_flattening=True,
apply_default_remote=False,
aux_params={
"input": input.plainstrings(),
"resources": resources,
"output": output.plainstrings(),
"threads": resources._cores,
},
incomplete_checkpoint_func=lambda e: "<incomplete checkpoint>",
)
except WildcardError as e:
raise WildcardError(
"Wildcards in params cannot be "
"determined from output files. Note that you have "
"to use a function to deactivate automatic wildcard expansion "
"in params strings, e.g., `lambda wildcards: '{test}'`. Also "
"see https://snakemake.readthedocs.io/en/stable/snakefiles/"
"rules.html#non-file-parameters-for-rules:",
str(e),
rule=self,
)
return params
|
https://github.com/snakemake/snakemake/issues/143
|
snakemake --report
Building DAG of jobs...
Creating report...
Adding foo.rst (0 MB).
Could not detect mimetype for foo.rst, assuming text/plain.
Traceback (most recent call last):
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/__init__.py", line 618, in snakemake
batch=batch,
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/workflow.py", line 617, in execute
auto_report(dag, report)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 552, in auto_report
FileRecord(f, job, report_obj.caption, env, category)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 326, in __init__
self.params = logging.format_dict(job.params)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/logging.py", line 384, in format_dict
for name, value in dict.items()
TypeError: 'str' object is not callable
|
TypeError
|
def expand_output(self, wildcards):
output = OutputFiles(o.apply_wildcards(wildcards) for o in self.output)
output._take_names(self.output._get_names())
mapping = {f: f_ for f, f_ in zip(output, self.output)}
for f in output:
f.check()
# Note that we do not need to check for duplicate file names after
# expansion as all output patterns have contain all wildcards anyway.
return output, mapping
|
def expand_output(self, wildcards):
output = OutputFiles(o.apply_wildcards(wildcards) for o in self.output)
output.take_names(self.output.get_names())
mapping = {f: f_ for f, f_ in zip(output, self.output)}
for f in output:
f.check()
# Note that we do not need to check for duplicate file names after
# expansion as all output patterns have contain all wildcards anyway.
return output, mapping
|
https://github.com/snakemake/snakemake/issues/143
|
snakemake --report
Building DAG of jobs...
Creating report...
Adding foo.rst (0 MB).
Could not detect mimetype for foo.rst, assuming text/plain.
Traceback (most recent call last):
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/__init__.py", line 618, in snakemake
batch=batch,
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/workflow.py", line 617, in execute
auto_report(dag, report)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 552, in auto_report
FileRecord(f, job, report_obj.caption, env, category)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 326, in __init__
self.params = logging.format_dict(job.params)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/logging.py", line 384, in format_dict
for name, value in dict.items()
TypeError: 'str' object is not callable
|
TypeError
|
def input(self):
return self.rule.input._stripped_constraints()
|
def input(self):
return self.rule.input.stripped_constraints()
|
https://github.com/snakemake/snakemake/issues/143
|
snakemake --report
Building DAG of jobs...
Creating report...
Adding foo.rst (0 MB).
Could not detect mimetype for foo.rst, assuming text/plain.
Traceback (most recent call last):
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/__init__.py", line 618, in snakemake
batch=batch,
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/workflow.py", line 617, in execute
auto_report(dag, report)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 552, in auto_report
FileRecord(f, job, report_obj.caption, env, category)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 326, in __init__
self.params = logging.format_dict(job.params)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/logging.py", line 384, in format_dict
for name, value in dict.items()
TypeError: 'str' object is not callable
|
TypeError
|
def params(self):
return self.rule.params._clone()
|
def params(self):
return self.rule.params.clone()
|
https://github.com/snakemake/snakemake/issues/143
|
snakemake --report
Building DAG of jobs...
Creating report...
Adding foo.rst (0 MB).
Could not detect mimetype for foo.rst, assuming text/plain.
Traceback (most recent call last):
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/__init__.py", line 618, in snakemake
batch=batch,
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/workflow.py", line 617, in execute
auto_report(dag, report)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 552, in auto_report
FileRecord(f, job, report_obj.caption, env, category)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 326, in __init__
self.params = logging.format_dict(job.params)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/logging.py", line 384, in format_dict
for name, value in dict.items()
TypeError: 'str' object is not callable
|
TypeError
|
def __init__(
self,
input,
output,
params,
wildcards,
threads,
resources,
log,
config,
rulename,
bench_iteration,
scriptdir=None,
):
# convert input and output to plain strings as some remote objects cannot
# be pickled
self.input = input._plainstrings()
self.output = output._plainstrings()
self.params = params
self.wildcards = wildcards
self.threads = threads
self.resources = resources
self.log = log._plainstrings()
self.config = config
self.rule = rulename
self.bench_iteration = bench_iteration
self.scriptdir = scriptdir
|
def __init__(
self,
input,
output,
params,
wildcards,
threads,
resources,
log,
config,
rulename,
bench_iteration,
scriptdir=None,
):
# convert input and output to plain strings as some remote objects cannot
# be pickled
self.input = input.plainstrings()
self.output = output.plainstrings()
self.params = params
self.wildcards = wildcards
self.threads = threads
self.resources = resources
self.log = log.plainstrings()
self.config = config
self.rule = rulename
self.bench_iteration = bench_iteration
self.scriptdir = scriptdir
|
https://github.com/snakemake/snakemake/issues/143
|
snakemake --report
Building DAG of jobs...
Creating report...
Adding foo.rst (0 MB).
Could not detect mimetype for foo.rst, assuming text/plain.
Traceback (most recent call last):
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/__init__.py", line 618, in snakemake
batch=batch,
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/workflow.py", line 617, in execute
auto_report(dag, report)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 552, in auto_report
FileRecord(f, job, report_obj.caption, env, category)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 326, in __init__
self.params = logging.format_dict(job.params)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/logging.py", line 384, in format_dict
for name, value in dict.items()
TypeError: 'str' object is not callable
|
TypeError
|
def script(
path,
basedir,
input,
output,
params,
wildcards,
threads,
resources,
log,
config,
rulename,
conda_env,
singularity_img,
singularity_args,
bench_record,
jobid,
bench_iteration,
cleanup_scripts,
shadow_dir,
):
"""
Load a script from the given basedir + path and execute it.
Supports Python 3 and R.
"""
f = None
try:
path, source, language = get_source(path, basedir)
if language == "python":
wrapper_path = path[7:] if path.startswith("file://") else path
snakemake = Snakemake(
input,
output,
params,
wildcards,
threads,
resources,
log,
config,
rulename,
bench_iteration,
os.path.dirname(wrapper_path),
)
snakemake = pickle.dumps(snakemake)
# Obtain search path for current snakemake module.
# The module is needed for unpickling in the script.
# We append it at the end (as a fallback).
searchpath = SNAKEMAKE_SEARCHPATH
if singularity_img is not None:
searchpath = singularity.SNAKEMAKE_MOUNTPOINT
searchpath = '"{}"'.format(searchpath)
# For local scripts, add their location to the path in case they use path-based imports
if path.startswith("file://"):
searchpath += ', "{}"'.format(os.path.dirname(path[7:]))
preamble = textwrap.dedent(
"""
######## Snakemake header ########
import sys; sys.path.extend([{searchpath}]); import pickle; snakemake = pickle.loads({snakemake}); from snakemake.logging import logger; logger.printshellcmds = {printshellcmds}; __real_file__ = __file__; __file__ = {file_override};
######## Original script #########
"""
).format(
searchpath=escape_backslash(searchpath),
snakemake=snakemake,
printshellcmds=logger.printshellcmds,
file_override=repr(os.path.realpath(wrapper_path)),
)
elif language == "r" or language == "rmarkdown":
preamble = textwrap.dedent(
"""
######## Snakemake header ########
library(methods)
Snakemake <- setClass(
"Snakemake",
slots = c(
input = "list",
output = "list",
params = "list",
wildcards = "list",
threads = "numeric",
log = "list",
resources = "list",
config = "list",
rule = "character",
bench_iteration = "numeric",
scriptdir = "character",
source = "function"
)
)
snakemake <- Snakemake(
input = {},
output = {},
params = {},
wildcards = {},
threads = {},
log = {},
resources = {},
config = {},
rule = {},
bench_iteration = {},
scriptdir = {},
source = function(...){{
wd <- getwd()
setwd(snakemake@scriptdir)
source(...)
setwd(wd)
}}
)
######## Original script #########
"""
).format(
REncoder.encode_namedlist(input),
REncoder.encode_namedlist(output),
REncoder.encode_namedlist(params),
REncoder.encode_namedlist(wildcards),
threads,
REncoder.encode_namedlist(log),
REncoder.encode_dict(
{
name: value
for name, value in resources.items()
if name != "_cores" and name != "_nodes"
}
),
REncoder.encode_dict(config),
REncoder.encode_value(rulename),
REncoder.encode_numeric(bench_iteration),
REncoder.encode_value(
os.path.dirname(path[7:])
if path.startswith("file://")
else os.path.dirname(path)
),
)
elif language == "julia":
preamble = textwrap.dedent(
"""
######## Snakemake header ########
struct Snakemake
input::Dict
output::Dict
params::Dict
wildcards::Dict
threads::Int64
log::Dict
resources::Dict
config::Dict
rule::String
bench_iteration
scriptdir::String
#source::Any
end
snakemake = Snakemake(
{}, #input::Dict
{}, #output::Dict
{}, #params::Dict
{}, #wildcards::Dict
{}, #threads::Int64
{}, #log::Dict
{}, #resources::Dict
{}, #config::Dict
{}, #rule::String
{}, #bench_iteration::Int64
{}, #scriptdir::String
#, #source::Any
)
######## Original script #########
""".format(
JuliaEncoder.encode_namedlist(input),
JuliaEncoder.encode_namedlist(output),
JuliaEncoder.encode_namedlist(params),
JuliaEncoder.encode_namedlist(wildcards),
JuliaEncoder.encode_value(threads),
JuliaEncoder.encode_namedlist(log),
JuliaEncoder.encode_dict(
{
name: value
for name, value in resources.items()
if name != "_cores" and name != "_nodes"
}
),
JuliaEncoder.encode_dict(config),
JuliaEncoder.encode_value(rulename),
JuliaEncoder.encode_value(bench_iteration),
JuliaEncoder.encode_value(
os.path.dirname(path[7:])
if path.startswith("file://")
else os.path.dirname(path)
),
).replace("'", '"')
)
else:
raise ValueError(
"Unsupported script: Expecting either Python (.py), R (.R), RMarkdown (.Rmd) or Julia (.jl) script."
)
dir = ".snakemake/scripts"
os.makedirs(dir, exist_ok=True)
with tempfile.NamedTemporaryFile(
suffix="." + os.path.basename(path), dir=dir, delete=False
) as f:
if not language == "rmarkdown":
f.write(preamble.encode())
f.write(source)
else:
# Insert Snakemake object after the RMarkdown header
code = source.decode()
pos = next(islice(re.finditer(r"---\n", code), 1, 2)).start() + 3
f.write(str.encode(code[:pos]))
preamble = textwrap.dedent(
"""
```{r, echo=FALSE, message=FALSE, warning=FALSE}
%s
```
"""
% preamble
)
f.write(preamble.encode())
f.write(str.encode(code[pos:]))
if language == "python":
py_exec = sys.executable
if conda_env is not None:
py = os.path.join(conda_env, "bin", "python")
if os.path.exists(py):
out = subprocess.check_output(
[py, "--version"],
stderr=subprocess.STDOUT,
universal_newlines=True,
)
ver = tuple(
map(int, PY_VER_RE.match(out).group("ver_min").split("."))
)
if ver >= MIN_PY_VERSION:
# Python version is new enough, make use of environment
# to execute script
py_exec = "python"
else:
logger.warning(
"Conda environment defines Python "
"version < {0}.{1}. Using Python of the "
"master process to execute "
"script. Note that this cannot be avoided, "
"because the script uses data structures from "
"Snakemake which are Python >={0}.{1} "
"only.".format(*MIN_PY_VERSION)
)
if singularity_img is not None:
# use python from image
py_exec = "python"
# use the same Python as the running process or the one from the environment
shell("{py_exec} {f.name:q}", bench_record=bench_record)
elif language == "r":
if conda_env is not None and "R_LIBS" in os.environ:
logger.warning(
"R script job uses conda environment but "
"R_LIBS environment variable is set. This "
"is likely not intended, as R_LIBS can "
"interfere with R packages deployed via "
"conda. Consider running `unset R_LIBS` or "
"remove it entirely before executing "
"Snakemake."
)
shell("Rscript --vanilla {f.name:q}", bench_record=bench_record)
elif language == "rmarkdown":
if len(output) != 1:
raise WorkflowError(
"RMarkdown scripts (.Rmd) may only have a single output file."
)
out = os.path.abspath(output[0])
shell(
'Rscript --vanilla -e \'rmarkdown::render("{f.name}", output_file="{out}", quiet=TRUE, knit_root_dir = "{workdir}", params = list(rmd="{f.name}"))\'',
bench_record=bench_record,
workdir=os.getcwd(),
)
elif language == "julia":
shell("julia {f.name:q}", bench_record=bench_record)
except URLError as e:
raise WorkflowError(e)
finally:
if f and cleanup_scripts:
os.remove(f.name)
elif f:
logger.warning("Not cleaning up {} upon request.".format(f.name))
|
def script(
path,
basedir,
input,
output,
params,
wildcards,
threads,
resources,
log,
config,
rulename,
conda_env,
singularity_img,
singularity_args,
bench_record,
jobid,
bench_iteration,
cleanup_scripts,
shadow_dir,
):
"""
Load a script from the given basedir + path and execute it.
Supports Python 3 and R.
"""
f = None
try:
path, source, language = get_source(path, basedir)
if language == "python":
wrapper_path = path[7:] if path.startswith("file://") else path
snakemake = Snakemake(
input,
output,
params,
wildcards,
threads,
resources,
log,
config,
rulename,
bench_iteration,
os.path.dirname(wrapper_path),
)
snakemake = pickle.dumps(snakemake)
# Obtain search path for current snakemake module.
# The module is needed for unpickling in the script.
# We append it at the end (as a fallback).
searchpath = SNAKEMAKE_SEARCHPATH
if singularity_img is not None:
searchpath = singularity.SNAKEMAKE_MOUNTPOINT
searchpath = '"{}"'.format(searchpath)
# For local scripts, add their location to the path in case they use path-based imports
if path.startswith("file://"):
searchpath += ', "{}"'.format(os.path.dirname(path[7:]))
preamble = textwrap.dedent(
"""
######## Snakemake header ########
import sys; sys.path.extend([{searchpath}]); import pickle; snakemake = pickle.loads({snakemake}); from snakemake.logging import logger; logger.printshellcmds = {printshellcmds}; __real_file__ = __file__; __file__ = {file_override};
######## Original script #########
"""
).format(
searchpath=escape_backslash(searchpath),
snakemake=snakemake,
printshellcmds=logger.printshellcmds,
file_override=repr(os.path.realpath(wrapper_path)),
)
elif language == "r" or language == "rmarkdown":
preamble = textwrap.dedent(
"""
######## Snakemake header ########
library(methods)
Snakemake <- setClass(
"Snakemake",
slots = c(
input = "list",
output = "list",
params = "list",
wildcards = "list",
threads = "numeric",
log = "list",
resources = "list",
config = "list",
rule = "character",
bench_iteration = "numeric",
scriptdir = "character",
source = "function"
)
)
snakemake <- Snakemake(
input = {},
output = {},
params = {},
wildcards = {},
threads = {},
log = {},
resources = {},
config = {},
rule = {},
bench_iteration = {},
scriptdir = {},
source = function(...){{
wd <- getwd()
setwd(snakemake@scriptdir)
source(...)
setwd(wd)
}}
)
######## Original script #########
"""
).format(
REncoder.encode_namedlist(input),
REncoder.encode_namedlist(output),
REncoder.encode_namedlist(params),
REncoder.encode_namedlist(wildcards),
threads,
REncoder.encode_namedlist(log),
REncoder.encode_namedlist(
{
name: value
for name, value in resources.items()
if name != "_cores" and name != "_nodes"
}
),
REncoder.encode_dict(config),
REncoder.encode_value(rulename),
REncoder.encode_numeric(bench_iteration),
REncoder.encode_value(
os.path.dirname(path[7:])
if path.startswith("file://")
else os.path.dirname(path)
),
)
elif language == "julia":
preamble = textwrap.dedent(
"""
######## Snakemake header ########
struct Snakemake
input::Dict
output::Dict
params::Dict
wildcards::Dict
threads::Int64
log::Dict
resources::Dict
config::Dict
rule::String
bench_iteration
scriptdir::String
#source::Any
end
snakemake = Snakemake(
{}, #input::Dict
{}, #output::Dict
{}, #params::Dict
{}, #wildcards::Dict
{}, #threads::Int64
{}, #log::Dict
{}, #resources::Dict
{}, #config::Dict
{}, #rule::String
{}, #bench_iteration::Int64
{}, #scriptdir::String
#, #source::Any
)
######## Original script #########
""".format(
JuliaEncoder.encode_namedlist(input),
JuliaEncoder.encode_namedlist(output),
JuliaEncoder.encode_namedlist(params),
JuliaEncoder.encode_namedlist(wildcards),
JuliaEncoder.encode_value(threads),
JuliaEncoder.encode_namedlist(log),
JuliaEncoder.encode_namedlist(
{
name: value
for name, value in resources.items()
if name != "_cores" and name != "_nodes"
}
),
JuliaEncoder.encode_dict(config),
JuliaEncoder.encode_value(rulename),
JuliaEncoder.encode_value(bench_iteration),
JuliaEncoder.encode_value(
os.path.dirname(path[7:])
if path.startswith("file://")
else os.path.dirname(path)
),
).replace("'", '"')
)
else:
raise ValueError(
"Unsupported script: Expecting either Python (.py), R (.R), RMarkdown (.Rmd) or Julia (.jl) script."
)
dir = ".snakemake/scripts"
os.makedirs(dir, exist_ok=True)
with tempfile.NamedTemporaryFile(
suffix="." + os.path.basename(path), dir=dir, delete=False
) as f:
if not language == "rmarkdown":
f.write(preamble.encode())
f.write(source)
else:
# Insert Snakemake object after the RMarkdown header
code = source.decode()
pos = next(islice(re.finditer(r"---\n", code), 1, 2)).start() + 3
f.write(str.encode(code[:pos]))
preamble = textwrap.dedent(
"""
```{r, echo=FALSE, message=FALSE, warning=FALSE}
%s
```
"""
% preamble
)
f.write(preamble.encode())
f.write(str.encode(code[pos:]))
if language == "python":
py_exec = sys.executable
if conda_env is not None:
py = os.path.join(conda_env, "bin", "python")
if os.path.exists(py):
out = subprocess.check_output(
[py, "--version"],
stderr=subprocess.STDOUT,
universal_newlines=True,
)
ver = tuple(
map(int, PY_VER_RE.match(out).group("ver_min").split("."))
)
if ver >= MIN_PY_VERSION:
# Python version is new enough, make use of environment
# to execute script
py_exec = "python"
else:
logger.warning(
"Conda environment defines Python "
"version < {0}.{1}. Using Python of the "
"master process to execute "
"script. Note that this cannot be avoided, "
"because the script uses data structures from "
"Snakemake which are Python >={0}.{1} "
"only.".format(*MIN_PY_VERSION)
)
if singularity_img is not None:
# use python from image
py_exec = "python"
# use the same Python as the running process or the one from the environment
shell("{py_exec} {f.name:q}", bench_record=bench_record)
elif language == "r":
if conda_env is not None and "R_LIBS" in os.environ:
logger.warning(
"R script job uses conda environment but "
"R_LIBS environment variable is set. This "
"is likely not intended, as R_LIBS can "
"interfere with R packages deployed via "
"conda. Consider running `unset R_LIBS` or "
"remove it entirely before executing "
"Snakemake."
)
shell("Rscript --vanilla {f.name:q}", bench_record=bench_record)
elif language == "rmarkdown":
if len(output) != 1:
raise WorkflowError(
"RMarkdown scripts (.Rmd) may only have a single output file."
)
out = os.path.abspath(output[0])
shell(
'Rscript --vanilla -e \'rmarkdown::render("{f.name}", output_file="{out}", quiet=TRUE, knit_root_dir = "{workdir}", params = list(rmd="{f.name}"))\'',
bench_record=bench_record,
workdir=os.getcwd(),
)
elif language == "julia":
shell("julia {f.name:q}", bench_record=bench_record)
except URLError as e:
raise WorkflowError(e)
finally:
if f and cleanup_scripts:
os.remove(f.name)
elif f:
logger.warning("Not cleaning up {} upon request.".format(f.name))
|
https://github.com/snakemake/snakemake/issues/143
|
snakemake --report
Building DAG of jobs...
Creating report...
Adding foo.rst (0 MB).
Could not detect mimetype for foo.rst, assuming text/plain.
Traceback (most recent call last):
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/__init__.py", line 618, in snakemake
batch=batch,
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/workflow.py", line 617, in execute
auto_report(dag, report)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 552, in auto_report
FileRecord(f, job, report_obj.caption, env, category)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/report/__init__.py", line 326, in __init__
self.params = logging.format_dict(job.params)
File "/mnt/work/Qc.py375.venv/lib/python3.7/site-packages/snakemake/logging.py", line 384, in format_dict
for name, value in dict.items()
TypeError: 'str' object is not callable
|
TypeError
|
def __init__(
self, *args, keep_local=False, stay_on_remote=False, is_default=False, **kwargs
):
super(RemoteProvider, self).__init__(
*args,
keep_local=keep_local,
stay_on_remote=stay_on_remote,
is_default=is_default,
**kwargs,
)
self._as = AzureStorageHelper(*args, **kwargs)
|
def __init__(self, *args, stay_on_remote=False, **kwargs):
super(RemoteProvider, self).__init__(*args, stay_on_remote=stay_on_remote, **kwargs)
self._as = AzureStorageHelper(*args, **kwargs)
|
https://github.com/snakemake/snakemake/issues/30
|
snakemake --tibanna --default-remote-prefix=tibanna/run_1
Traceback (most recent call last):
File "/tmp/tmp.1BY3tqvG9v/venv/lib/python3.6/site-packages/snakemake/__init__.py", line 421, in snakemake
keep_local=True, is_default=True
File "/tmp/tmp.1BY3tqvG9v/venv/lib/python3.6/site-packages/snakemake/remote/S3.py", line 41, in __init__
self._s3c = S3Helper(*args, **kwargs) # _private variable by convention
File "/tmp/tmp.1BY3tqvG9v/venv/lib/python3.6/site-packages/snakemake/remote/S3.py", line 167, in __init__
self.s3 = boto3.resource("s3", **kwargs)
File "/tmp/tmp.1BY3tqvG9v/venv/lib/python3.6/site-packages/boto3/__init__.py", line 100, in resource
return _get_default_session().resource(*args, **kwargs)
TypeError: resource() got an unexpected keyword argument 'keep_local'
|
TypeError
|
def __init__(
self, *args, keep_local=False, stay_on_remote=False, is_default=False, **kwargs
):
super(RemoteProvider, self).__init__(
*args,
keep_local=keep_local,
stay_on_remote=stay_on_remote,
is_default=is_default,
**kwargs,
) # in addition to methods provided by AbstractRemoteProvider, we add these in
self._s3c = S3Helper(*args, **kwargs) # _private variable by convention
|
def __init__(
self, *args, stay_on_remote=False, **kwargs
): # this method is evaluated when instantiating this class
super(RemoteProvider, self).__init__(
*args, stay_on_remote=stay_on_remote, **kwargs
) # in addition to methods provided by AbstractRemoteProvider, we add these in
self._s3c = S3Helper(*args, **kwargs) # _private variable by convention
|
https://github.com/snakemake/snakemake/issues/30
|
snakemake --tibanna --default-remote-prefix=tibanna/run_1
Traceback (most recent call last):
File "/tmp/tmp.1BY3tqvG9v/venv/lib/python3.6/site-packages/snakemake/__init__.py", line 421, in snakemake
keep_local=True, is_default=True
File "/tmp/tmp.1BY3tqvG9v/venv/lib/python3.6/site-packages/snakemake/remote/S3.py", line 41, in __init__
self._s3c = S3Helper(*args, **kwargs) # _private variable by convention
File "/tmp/tmp.1BY3tqvG9v/venv/lib/python3.6/site-packages/snakemake/remote/S3.py", line 167, in __init__
self.s3 = boto3.resource("s3", **kwargs)
File "/tmp/tmp.1BY3tqvG9v/venv/lib/python3.6/site-packages/boto3/__init__.py", line 100, in resource
return _get_default_session().resource(*args, **kwargs)
TypeError: resource() got an unexpected keyword argument 'keep_local'
|
TypeError
|
def createuser(email, password, superuser, no_password, no_input, force_update):
"Create a new user."
if not no_input:
if not email:
email = _get_email()
if not (password or no_password):
password = _get_password()
if superuser is None:
superuser = _get_superuser()
if superuser is None:
superuser = False
if not email:
raise click.ClickException("Invalid or missing email address.")
# TODO(mattrobenolt): Accept password over stdin?
if not no_password and not password:
raise click.ClickException("No password set and --no-password not passed.")
from sentry import roles
from sentry.models import User
from django.conf import settings
fields = dict(
email=email,
username=email,
is_superuser=superuser,
is_staff=superuser,
is_active=True,
)
verb = None
try:
user = User.objects.get(username=email)
except User.DoesNotExist:
user = None
if user is not None:
if force_update:
user.update(**fields)
verb = "updated"
else:
click.echo(f"User: {email} exists, use --force-update to force")
sys.exit(3)
else:
user = User.objects.create(**fields)
verb = "created"
# TODO(dcramer): kill this when we improve flows
if settings.SENTRY_SINGLE_ORGANIZATION:
from sentry.models import (
Organization,
OrganizationMember,
OrganizationMemberTeam,
Team,
)
org = Organization.get_default()
if superuser:
role = roles.get_top_dog().id
else:
role = org.default_role
member = OrganizationMember.objects.create(
organization=org, user=user, role=role
)
# if we've only got a single team let's go ahead and give
# access to that team as its likely the desired outcome
teams = list(Team.objects.filter(organization=org)[0:2])
if len(teams) == 1:
OrganizationMemberTeam.objects.create(
team=teams[0], organizationmember=member
)
click.echo(f"Added to organization: {org.slug}")
if password:
user.set_password(password)
user.save()
click.echo(f"User {verb}: {email}")
|
def createuser(email, password, superuser, no_password, no_input, force_update):
"Create a new user."
if not no_input:
if not email:
email = _get_email()
if not (password or no_password):
password = _get_password()
if superuser is None:
superuser = _get_superuser()
if superuser is None:
superuser = False
if not email:
raise click.ClickException("Invalid or missing email address.")
# TODO(mattrobenolt): Accept password over stdin?
if not no_password and not password:
raise click.ClickException("No password set and --no-password not passed.")
from sentry import roles
from sentry.models import User
from django.conf import settings
user = User(
email=email,
username=email,
is_superuser=superuser,
is_staff=superuser,
is_active=True,
)
if password:
user.set_password(password)
if User.objects.filter(username=email).exists():
if force_update:
user.save(force_update=force_update)
click.echo(f"User updated: {email}")
else:
click.echo(f"User: {email} exists, use --force-update to force")
sys.exit(3)
else:
user.save()
click.echo(f"User created: {email}")
# TODO(dcramer): kill this when we improve flows
if settings.SENTRY_SINGLE_ORGANIZATION:
from sentry.models import (
Organization,
OrganizationMember,
OrganizationMemberTeam,
Team,
)
org = Organization.get_default()
if superuser:
role = roles.get_top_dog().id
else:
role = org.default_role
member = OrganizationMember.objects.create(
organization=org, user=user, role=role
)
# if we've only got a single team let's go ahead and give
# access to that team as its likely the desired outcome
teams = list(Team.objects.filter(organization=org)[0:2])
if len(teams) == 1:
OrganizationMemberTeam.objects.create(
team=teams[0], organizationmember=member
)
click.echo(f"Added to organization: {org.slug}")
|
https://github.com/getsentry/sentry/issues/19139
|
$ sudo docker-compose run --rm web sentry --version
sentry, version 10.1.0.dev0 (0bf9ffa08ff2)
$ sudo docker-compose run --rm web sentry createuser --email testuser@test.com --password pass1 --no-superuser --no-input --force-update
21:32:17 [WARNING] sentry.utils.geo: settings.GEOIP_PATH_MMDB not configured.
21:32:21 [INFO] sentry.plugins.github: apps-not-configured
User created: testuser@test.com
Added to organization: sentry
$ sudo docker-compose run --rm web sentry createuser --email testuser@test.com --password pass2 --no-superuser --no-input --force-update
21:33:46 [WARNING] sentry.utils.geo: settings.GEOIP_PATH_MMDB not configured.
21:33:49 [INFO] sentry.plugins.github: apps-not-configured
Traceback (most recent call last):
File "/usr/local/bin/sentry", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python2.7/site-packages/sentry/runner/__init__.py", line 166, in main
cli(prog_name=get_prog(), obj={}, max_content_width=100)
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/click/decorators.py", line 17, in new_func
return f(get_current_context(), *args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/sentry/runner/decorators.py", line 30, in inner
return ctx.invoke(f, *args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/sentry/runner/commands/createuser.py", line 83, in createuser
user.save(force_update=force_update)
File "/usr/local/lib/python2.7/site-packages/sentry/models/user.py", line 141, in save
return super(User, self).save(*args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/django/contrib/auth/base_user.py", line 80, in save
super(AbstractBaseUser, self).save(*args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/django/db/models/base.py", line 808, in save
force_update=force_update, update_fields=update_fields)
File "/usr/local/lib/python2.7/site-packages/django/db/models/base.py", line 838, in save_base
updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
File "/usr/local/lib/python2.7/site-packages/django/db/models/base.py", line 896, in _save_table
raise ValueError("Cannot force an update in save() with no primary key.")
ValueError: Cannot force an update in save() with no primary key.
|
ValueError
|
def put(self, request):
# TODO(dcramer): this should validate options before saving them
for k, v in six.iteritems(request.data):
if v and isinstance(v, six.string_types):
v = v.strip()
try:
option = options.lookup_key(k)
except options.UnknownOption:
# TODO(dcramer): unify API errors
return Response(
{"error": "unknown_option", "errorDetail": {"option": k}}, status=400
)
try:
if not (option.flags & options.FLAG_ALLOW_EMPTY) and not v:
options.delete(k)
else:
options.set(k, v)
except (TypeError, AssertionError) as e:
# TODO(chadwhitacre): Use a custom exception for the
# immutability case, especially since asserts disappear with
# `python -O`.
return Response(
{
"error": "invalid_type"
if type(e) is TypeError
else "immutable_option",
"errorDetail": {"option": k, "message": six.text_type(e)},
},
status=400,
)
# TODO(dcramer): this has nothing to do with configuring options and
# should not be set here
options.set("sentry:version-configured", sentry.get_version())
return Response(status=200)
|
def put(self, request):
# TODO(dcramer): this should validate options before saving them
for k, v in six.iteritems(request.data):
if v and isinstance(v, six.string_types):
v = v.strip()
try:
option = options.lookup_key(k)
except options.UnknownOption:
# TODO(dcramer): unify API errors
return Response(
{"error": "unknown_option", "errorDetail": {"option": k}}, status=400
)
try:
if not (option.flags & options.FLAG_ALLOW_EMPTY) and not v:
options.delete(k)
else:
options.set(k, v)
except TypeError as e:
return Response(
{
"error": "invalid_type",
"errorDetail": {"option": k, "message": six.text_type(e)},
},
status=400,
)
# TODO(dcramer): this has nothing to do with configuring options and
# should not be set here
options.set("sentry:version-configured", sentry.get_version())
return Response(status=200)
|
https://github.com/getsentry/sentry/issues/21159
|
Oct 6 07:18:49 jsentry sentry[4128]: 10.100.33.5 - - [06/Oct/2020:04:18:49 +0000] "GET /api/0/internal/options/ HTTP/1.1" 200 20407 "https://sentry.findmykids.org/manage/settings/" "Mozilla/5.0 (X11; FreeBSD amd64; rv:76.0) Gecko/20100101 Firefox/76.0"
Oct 6 07:19:09 jsentry sentry[4128]: Traceback (most recent call last):
Oct 6 07:19:09 jsentry sentry[4128]: File "/usr/local/lib/python2.7/site-packages/sentry-20.8.0-py2.7.egg/sentry/api/base.py", line 134, in handle_exception
Oct 6 07:19:09 jsentry sentry[4128]: response = super(Endpoint, self).handle_exception(exc)
Oct 6 07:19:09 jsentry sentry[4128]: File "/usr/local/lib/python2.7/site-packages/djangorestframework-3.6.4-py2.7.egg/rest_framework/views.py", line 449, in handle_exception
Oct 6 07:19:09 jsentry sentry[4128]: self.raise_uncaught_exception(exc)
Oct 6 07:19:09 jsentry sentry[4128]: File "/usr/local/lib/python2.7/site-packages/sentry-20.8.0-py2.7.egg/sentry/api/base.py", line 247, in dispatch
Oct 6 07:19:09 jsentry sentry[4128]: response = handler(request, *args, **kwargs)
Oct 6 07:19:09 jsentry sentry[4128]: File "/usr/local/lib/python2.7/site-packages/sentry-20.8.0-py2.7.egg/sentry/api/endpoints/system_options.py", line 74, in put
Oct 6 07:19:09 jsentry sentry[4128]: options.set(k, v)
Oct 6 07:19:09 jsentry sentry[4128]: File "/usr/local/lib/python2.7/site-packages/sentry-20.8.0-py2.7.egg/sentry/options/manager.py", line 83, in set
Oct 6 07:19:09 jsentry sentry[4128]: "%r cannot be changed at runtime because it is configured on disk" % key
Oct 6 07:19:09 jsentry sentry[4128]: AssertionError: u'system.url-prefix' cannot be changed at runtime because it is configured on disk
Oct 6 07:19:09 jsentry sentry[4128]: 10.100.33.5 - - [06/Oct/2020:04:19:09 +0000] "PUT /api/0/internal/options/ HTTP/1.1" 500 746 "https://sentry.findmykids.org/manage/settings/" "Mozilla/5.0 (X11; FreeBSD amd64; rv:76.0) Gecko/20100101 Firefox/76.0"
|
AssertionError
|
def wrap_index(typingctx, idx, size):
"""
Calculate index value "idx" relative to a size "size" value as
(idx % size), where "size" is known to be positive.
Note that we use the mod(%) operation here instead of
(idx < 0 ? idx + size : idx) because we may have situations
where idx > size due to the way indices are calculated
during slice/range analysis.
"""
unified_ty = typingctx.unify_types(idx, size)
# Mixing signed and unsigned ints will unify to double which is
# no good for indexing. If the unified type is not an integer
# then just use int64 as the common index type. This does have
# some overflow potential if the unsigned value is greater than
# 2**63.
if not isinstance(unified_ty, types.Integer):
unified_ty = types.int64
def codegen(context, builder, sig, args):
ll_unified_ty = context.get_data_type(unified_ty)
idx = builder.sext(args[0], ll_unified_ty)
size = builder.sext(args[1], ll_unified_ty)
neg_size = builder.neg(size)
zero = llvmlite.ir.Constant(ll_unified_ty, 0)
idx_negative = builder.icmp_signed("<", idx, zero)
pos_oversize = builder.icmp_signed(">=", idx, size)
neg_oversize = builder.icmp_signed("<=", idx, neg_size)
pos_res = builder.select(pos_oversize, size, idx)
neg_res = builder.select(neg_oversize, zero, builder.add(idx, size))
mod = builder.select(idx_negative, neg_res, pos_res)
return mod
return signature(unified_ty, idx, size), codegen
|
def wrap_index(typingctx, idx, size):
"""
Calculate index value "idx" relative to a size "size" value as
(idx % size), where "size" is known to be positive.
Note that we use the mod(%) operation here instead of
(idx < 0 ? idx + size : idx) because we may have situations
where idx > size due to the way indices are calculated
during slice/range analysis.
"""
unified_ty = typingctx.unify_types(idx, size)
if not unified_ty:
raise ValueError("Argument types for wrap_index must match")
def codegen(context, builder, sig, args):
ll_unified_ty = context.get_data_type(unified_ty)
idx = builder.sext(args[0], ll_unified_ty)
size = builder.sext(args[1], ll_unified_ty)
neg_size = builder.neg(size)
zero = llvmlite.ir.Constant(ll_unified_ty, 0)
idx_negative = builder.icmp_signed("<", idx, zero)
pos_oversize = builder.icmp_signed(">=", idx, size)
neg_oversize = builder.icmp_signed("<=", idx, neg_size)
pos_res = builder.select(pos_oversize, size, idx)
neg_res = builder.select(neg_oversize, zero, builder.add(idx, size))
mod = builder.select(idx_negative, neg_res, pos_res)
return mod
return signature(unified_ty, idx, size), codegen
|
https://github.com/numba/numba/issues/6774
|
Traceback (most recent call last):
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/errors.py", line 744, in new_error_context
yield
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/lowering.py", line 230, in lower_block
self.lower_inst(inst)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/lowering.py", line 443, in lower_inst
func(self, inst)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/parfors/parfor_lowering.py", line 259, in _lower_parfor_parallel
exp_name_to_tuple_var) = _create_gufunc_for_parfor_body(
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/parfors/parfor_lowering.py", line 1361, in _create_gufunc_for_parfor_body
kernel_func = compiler.compile_ir(
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/compiler.py", line 665, in compile_ir
return pipeline.compile_ir(func_ir=func_ir, lifted=lifted,
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/compiler.py", line 362, in compile_ir
return self._compile_ir()
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/compiler.py", line 421, in _compile_ir
return self._compile_core()
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/compiler.py", line 394, in _compile_core
raise e
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/compiler.py", line 385, in _compile_core
pm.run(self.state)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/compiler_machinery.py", line 339, in run
raise patched_exception
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/compiler_machinery.py", line 330, in run
self._runPass(idx, pass_inst, state)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/compiler_machinery.py", line 289, in _runPass
mutated |= check(pss.run_pass, internal_state)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/compiler_machinery.py", line 262, in check
mangled = func(compiler_state)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/typed_passes.py", line 449, in run_pass
NativeLowering().run_pass(state)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/typed_passes.py", line 373, in run_pass
lower.lower()
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/lowering.py", line 174, in lower
self.library.add_ir_module(self.module)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/codegen.py", line 648, in add_ir_module
ll_module = ll.parse_assembly(ir)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/llvmlite/binding/module.py", line 25, in parse_assembly
raise RuntimeError("LLVM IR parsing error\n{0}".format(errmsg))
RuntimeError: Failed in nopython mode pipeline (step: nopython mode backend)
LLVM IR parsing error
<string>:447:18: error: invalid cast opcode for cast from 'i64' to 'double'
%".364" = sext i64 %".362" to double
^
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/auderson/miniconda3/lib/python3.8/site-packages/IPython/core/interactiveshell.py", line 3437, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-77-81f1eb0ce800>", line 11, in <module>
test(np.random.randn(10), 2)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/dispatcher.py", line 433, in _compile_for_args
raise e
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/dispatcher.py", line 366, in _compile_for_args
return self.compile(tuple(argtypes))
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/dispatcher.py", line 857, in compile
cres = self._compiler.compile(args, return_type)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/dispatcher.py", line 77, in compile
status, retval = self._compile_cached(args, return_type)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/dispatcher.py", line 91, in _compile_cached
retval = self._compile_core(args, return_type)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/dispatcher.py", line 104, in _compile_core
cres = compiler.compile_extra(self.targetdescr.typing_context,
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/compiler.py", line 602, in compile_extra
return pipeline.compile_extra(func)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/compiler.py", line 352, in compile_extra
return self._compile_bytecode()
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/compiler.py", line 414, in _compile_bytecode
return self._compile_core()
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/compiler.py", line 394, in _compile_core
raise e
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/compiler.py", line 385, in _compile_core
pm.run(self.state)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/compiler_machinery.py", line 339, in run
raise patched_exception
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/compiler_machinery.py", line 330, in run
self._runPass(idx, pass_inst, state)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/compiler_machinery.py", line 289, in _runPass
mutated |= check(pss.run_pass, internal_state)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/compiler_machinery.py", line 262, in check
mangled = func(compiler_state)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/typed_passes.py", line 449, in run_pass
NativeLowering().run_pass(state)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/typed_passes.py", line 373, in run_pass
lower.lower()
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/lowering.py", line 136, in lower
self.lower_normal_function(self.fndesc)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/lowering.py", line 190, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/lowering.py", line 216, in lower_function_body
self.lower_block(block)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/lowering.py", line 230, in lower_block
self.lower_inst(inst)
File "/home/auderson/miniconda3/lib/python3.8/contextlib.py", line 131, in __exit__
self.gen.throw(type, value, traceback)
File "/home/auderson/miniconda3/lib/python3.8/site-packages/numba/core/errors.py", line 751, in new_error_context
raise newerr.with_traceback(tb)
numba.core.errors.LoweringError: Failed in nopython mode pipeline (step: nopython mode backend)
Failed in nopython mode pipeline (step: nopython mode backend)
LLVM IR parsing error
<string>:447:18: error: invalid cast opcode for cast from 'i64' to 'double'
%".364" = sext i64 %".362" to double
^
File "<ipython-input-77-81f1eb0ce800>", line 6:
def test(x, d):
<source elided>
result = np.empty(n)
for i in prange(n - d + 1):
^
During: lowering "id=215[LoopNest(index_variable = parfor_index.19055, range = (0, $40binary_add.16, 1))]{48: <ir.Block at <ipython-input-77-81f1eb0ce800> (6)>}Var(parfor_index.19055, <ipython-input-77-81f1eb0ce800>:6)" at <ipython-input-77-81f1eb0ce800> (6)
|
RuntimeError
|
def _match_array_expr(self, instr, expr, target_name):
"""
Find whether the given assignment (*instr*) of an expression (*expr*)
to variable *target_name* is an array expression.
"""
# We've matched a subexpression assignment to an
# array variable. Now see if the expression is an
# array expression.
expr_op = expr.op
array_assigns = self.array_assigns
if (expr_op in ("unary", "binop")) and (
expr.fn in npydecl.supported_array_operators
):
# It is an array operator that maps to a ufunc.
# check that all args have internal types
if all(self.typemap[var.name].is_internal for var in expr.list_vars()):
array_assigns[target_name] = instr
elif (expr_op == "call") and (expr.func.name in self.typemap):
# It could be a match for a known ufunc call.
func_type = self.typemap[expr.func.name]
if isinstance(func_type, types.Function):
func_key = func_type.typing_key
if _is_ufunc(func_key):
# If so, check whether an explicit output is passed.
if not self._has_explicit_output(expr, func_key):
# If not, match it as a (sub)expression.
array_assigns[target_name] = instr
|
def _match_array_expr(self, instr, expr, target_name):
"""
Find whether the given assignment (*instr*) of an expression (*expr*)
to variable *target_name* is an array expression.
"""
# We've matched a subexpression assignment to an
# array variable. Now see if the expression is an
# array expression.
expr_op = expr.op
array_assigns = self.array_assigns
if (expr_op in ("unary", "binop")) and (
expr.fn in npydecl.supported_array_operators
):
# It is an array operator that maps to a ufunc.
array_assigns[target_name] = instr
elif (expr_op == "call") and (expr.func.name in self.typemap):
# It could be a match for a known ufunc call.
func_type = self.typemap[expr.func.name]
if isinstance(func_type, types.Function):
func_key = func_type.typing_key
if _is_ufunc(func_key):
# If so, check whether an explicit output is passed.
if not self._has_explicit_output(expr, func_key):
# If not, match it as a (sub)expression.
array_assigns[target_name] = instr
|
https://github.com/numba/numba/issues/5157
|
numba.errors.TypingError: Failed in nopython mode pipeline (step: nopython mode backend)
Failed in nopython mode pipeline (step: nopython frontend)
No conversion from array(int64, 1d, C) to int64 for '$8return_value.3', defined at None
File "array_expr_issue.py", line 68:
def foo(x, y):
return Dummy(x) + Dummy(y)
^
[1] During: typing of assignment at /Users/siu/dev/numba-git/array_expr_issue.py (68)
File "array_expr_issue.py", line 68:
def foo(x, y):
return Dummy(x) + Dummy(y)
^
[1] During: lowering "$14binary_add.6 = arrayexpr(expr=(<built-in function add>, [Var($6call_function.2, array_expr_issue.py:68), Var($12call_function.5, array_expr_issue.py:68)]), ty=array(int64, 1d, C))" at /Users/siu/dev/numba-git/array_expr_issue.py (68)
|
numba.errors.TypingError
|
def _create_empty_module(self, name):
ir_module = lc.Module(name)
ir_module.triple = CUDA_TRIPLE[utils.MACHINE_BITS]
if self._data_layout:
ir_module.data_layout = self._data_layout
nvvm.add_ir_version(ir_module)
return ir_module
|
def _create_empty_module(self, name):
ir_module = lc.Module(name)
ir_module.triple = CUDA_TRIPLE[utils.MACHINE_BITS]
if self._data_layout:
ir_module.data_layout = self._data_layout
return ir_module
|
https://github.com/numba/numba/issues/6719
|
Traceback (most recent call last):
File "repro.py", line 6, in <module>
cuda.compile_ptx_for_current_device(f, [], device=True, debug=True)
File "/home/gmarkall/numbadev/numba/numba/cuda/compiler.py", line 124, in compile_ptx_for_current_device
return compile_ptx(pyfunc, args, debug=-debug, device=device,
File "/home/gmarkall/numbadev/numba/numba/core/compiler_lock.py", line 35, in _acquire_compile_lock
return func(*args, **kwargs)
File "/home/gmarkall/numbadev/numba/numba/cuda/compiler.py", line 114, in compile_ptx
ptx = nvvm.llvm_to_ptx(llvmir, opt=opt, arch=arch, **options)
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 669, in llvm_to_ptx
ptx = cu.compile(**opts)
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 265, in compile
self._try_error(err, 'Failed to compile\n')
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 283, in _try_error
self.driver.check_error(err, "%s\n%s" % (msg, self.get_log()))
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 173, in check_error
raise exc
numba.cuda.cudadrv.error.NvvmError: Failed to compile
DBG version 1.0 incompatible with current version 3.0
<unnamed>: error: incompatible IR detected. Possible mix of compiler/IR from different releases.
NVVM_ERROR_IR_VERSION_MISMATCH
|
numba.cuda.cudadrv.error.NvvmError
|
def compile_cuda(pyfunc, return_type, args, debug=False, inline=False):
from .descriptor import cuda_target
typingctx = cuda_target.typingctx
targetctx = cuda_target.targetctx
flags = compiler.Flags()
# Do not compile (generate native code), just lower (to LLVM)
flags.set("no_compile")
flags.set("no_cpython_wrapper")
flags.set("no_cfunc_wrapper")
if debug:
flags.set("debuginfo")
if inline:
flags.set("forceinline")
# Run compilation pipeline
cres = compiler.compile_extra(
typingctx=typingctx,
targetctx=targetctx,
func=pyfunc,
args=args,
return_type=return_type,
flags=flags,
locals={},
)
library = cres.library
library.finalize()
return cres
|
def compile_cuda(pyfunc, return_type, args, debug=False, inline=False):
# First compilation will trigger the initialization of the CUDA backend.
from .descriptor import CUDATargetDesc
typingctx = CUDATargetDesc.typingctx
targetctx = CUDATargetDesc.targetctx
# TODO handle debug flag
flags = compiler.Flags()
# Do not compile (generate native code), just lower (to LLVM)
flags.set("no_compile")
flags.set("no_cpython_wrapper")
flags.set("no_cfunc_wrapper")
if debug:
flags.set("debuginfo")
if inline:
flags.set("forceinline")
# Run compilation pipeline
cres = compiler.compile_extra(
typingctx=typingctx,
targetctx=targetctx,
func=pyfunc,
args=args,
return_type=return_type,
flags=flags,
locals={},
)
library = cres.library
library.finalize()
return cres
|
https://github.com/numba/numba/issues/6719
|
Traceback (most recent call last):
File "repro.py", line 6, in <module>
cuda.compile_ptx_for_current_device(f, [], device=True, debug=True)
File "/home/gmarkall/numbadev/numba/numba/cuda/compiler.py", line 124, in compile_ptx_for_current_device
return compile_ptx(pyfunc, args, debug=-debug, device=device,
File "/home/gmarkall/numbadev/numba/numba/core/compiler_lock.py", line 35, in _acquire_compile_lock
return func(*args, **kwargs)
File "/home/gmarkall/numbadev/numba/numba/cuda/compiler.py", line 114, in compile_ptx
ptx = nvvm.llvm_to_ptx(llvmir, opt=opt, arch=arch, **options)
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 669, in llvm_to_ptx
ptx = cu.compile(**opts)
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 265, in compile
self._try_error(err, 'Failed to compile\n')
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 283, in _try_error
self.driver.check_error(err, "%s\n%s" % (msg, self.get_log()))
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 173, in check_error
raise exc
numba.cuda.cudadrv.error.NvvmError: Failed to compile
DBG version 1.0 incompatible with current version 3.0
<unnamed>: error: incompatible IR detected. Possible mix of compiler/IR from different releases.
NVVM_ERROR_IR_VERSION_MISMATCH
|
numba.cuda.cudadrv.error.NvvmError
|
def compile_device_template(pyfunc, debug=False, inline=False, opt=True):
"""Create a DeviceFunctionTemplate object and register the object to
the CUDA typing context.
"""
from .descriptor import cuda_target
dft = DeviceFunctionTemplate(pyfunc, debug=debug, inline=inline, opt=opt)
class device_function_template(AbstractTemplate):
key = dft
def generic(self, args, kws):
assert not kws
return dft.compile(args).signature
def get_template_info(cls):
basepath = os.path.dirname(os.path.dirname(numba.__file__))
code, firstlineno = inspect.getsourcelines(pyfunc)
path = inspect.getsourcefile(pyfunc)
sig = str(utils.pysignature(pyfunc))
info = {
"kind": "overload",
"name": getattr(cls.key, "__name__", "unknown"),
"sig": sig,
"filename": utils.safe_relpath(path, start=basepath),
"lines": (firstlineno, firstlineno + len(code) - 1),
"docstring": pyfunc.__doc__,
}
return info
typingctx = cuda_target.typingctx
typingctx.insert_user_function(dft, device_function_template)
return dft
|
def compile_device_template(pyfunc, debug=False, inline=False, opt=True):
"""Create a DeviceFunctionTemplate object and register the object to
the CUDA typing context.
"""
from .descriptor import CUDATargetDesc
dft = DeviceFunctionTemplate(pyfunc, debug=debug, inline=inline, opt=opt)
class device_function_template(AbstractTemplate):
key = dft
def generic(self, args, kws):
assert not kws
return dft.compile(args).signature
def get_template_info(cls):
basepath = os.path.dirname(os.path.dirname(numba.__file__))
code, firstlineno = inspect.getsourcelines(pyfunc)
path = inspect.getsourcefile(pyfunc)
sig = str(utils.pysignature(pyfunc))
info = {
"kind": "overload",
"name": getattr(cls.key, "__name__", "unknown"),
"sig": sig,
"filename": utils.safe_relpath(path, start=basepath),
"lines": (firstlineno, firstlineno + len(code) - 1),
"docstring": pyfunc.__doc__,
}
return info
typingctx = CUDATargetDesc.typingctx
typingctx.insert_user_function(dft, device_function_template)
return dft
|
https://github.com/numba/numba/issues/6719
|
Traceback (most recent call last):
File "repro.py", line 6, in <module>
cuda.compile_ptx_for_current_device(f, [], device=True, debug=True)
File "/home/gmarkall/numbadev/numba/numba/cuda/compiler.py", line 124, in compile_ptx_for_current_device
return compile_ptx(pyfunc, args, debug=-debug, device=device,
File "/home/gmarkall/numbadev/numba/numba/core/compiler_lock.py", line 35, in _acquire_compile_lock
return func(*args, **kwargs)
File "/home/gmarkall/numbadev/numba/numba/cuda/compiler.py", line 114, in compile_ptx
ptx = nvvm.llvm_to_ptx(llvmir, opt=opt, arch=arch, **options)
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 669, in llvm_to_ptx
ptx = cu.compile(**opts)
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 265, in compile
self._try_error(err, 'Failed to compile\n')
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 283, in _try_error
self.driver.check_error(err, "%s\n%s" % (msg, self.get_log()))
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 173, in check_error
raise exc
numba.cuda.cudadrv.error.NvvmError: Failed to compile
DBG version 1.0 incompatible with current version 3.0
<unnamed>: error: incompatible IR detected. Possible mix of compiler/IR from different releases.
NVVM_ERROR_IR_VERSION_MISMATCH
|
numba.cuda.cudadrv.error.NvvmError
|
def declare_device_function(name, restype, argtypes):
from .descriptor import cuda_target
typingctx = cuda_target.typingctx
targetctx = cuda_target.targetctx
sig = typing.signature(restype, *argtypes)
extfn = ExternFunction(name, sig)
class device_function_template(ConcreteTemplate):
key = extfn
cases = [sig]
fndesc = funcdesc.ExternalFunctionDescriptor(
name=name, restype=restype, argtypes=argtypes
)
typingctx.insert_user_function(extfn, device_function_template)
targetctx.insert_user_function(extfn, fndesc)
return extfn
|
def declare_device_function(name, restype, argtypes):
from .descriptor import CUDATargetDesc
typingctx = CUDATargetDesc.typingctx
targetctx = CUDATargetDesc.targetctx
sig = typing.signature(restype, *argtypes)
extfn = ExternFunction(name, sig)
class device_function_template(ConcreteTemplate):
key = extfn
cases = [sig]
fndesc = funcdesc.ExternalFunctionDescriptor(
name=name, restype=restype, argtypes=argtypes
)
typingctx.insert_user_function(extfn, device_function_template)
targetctx.insert_user_function(extfn, fndesc)
return extfn
|
https://github.com/numba/numba/issues/6719
|
Traceback (most recent call last):
File "repro.py", line 6, in <module>
cuda.compile_ptx_for_current_device(f, [], device=True, debug=True)
File "/home/gmarkall/numbadev/numba/numba/cuda/compiler.py", line 124, in compile_ptx_for_current_device
return compile_ptx(pyfunc, args, debug=-debug, device=device,
File "/home/gmarkall/numbadev/numba/numba/core/compiler_lock.py", line 35, in _acquire_compile_lock
return func(*args, **kwargs)
File "/home/gmarkall/numbadev/numba/numba/cuda/compiler.py", line 114, in compile_ptx
ptx = nvvm.llvm_to_ptx(llvmir, opt=opt, arch=arch, **options)
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 669, in llvm_to_ptx
ptx = cu.compile(**opts)
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 265, in compile
self._try_error(err, 'Failed to compile\n')
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 283, in _try_error
self.driver.check_error(err, "%s\n%s" % (msg, self.get_log()))
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 173, in check_error
raise exc
numba.cuda.cudadrv.error.NvvmError: Failed to compile
DBG version 1.0 incompatible with current version 3.0
<unnamed>: error: incompatible IR detected. Possible mix of compiler/IR from different releases.
NVVM_ERROR_IR_VERSION_MISMATCH
|
numba.cuda.cudadrv.error.NvvmError
|
def __init__(self, py_func, sigs, targetoptions):
self.py_func = py_func
self.sigs = []
self.link = targetoptions.pop(
"link",
(),
)
self._can_compile = True
# Specializations for given sets of argument types
self.specializations = {}
# A mapping of signatures to compile results
self.overloads = collections.OrderedDict()
self.targetoptions = targetoptions
# defensive copy
self.targetoptions["extensions"] = list(self.targetoptions.get("extensions", []))
from .descriptor import cuda_target
self.typingctx = cuda_target.typingctx
self._tm = default_type_manager
pysig = utils.pysignature(py_func)
arg_count = len(pysig.parameters)
argnames = tuple(pysig.parameters)
default_values = self.py_func.__defaults__ or ()
defargs = tuple(OmittedArg(val) for val in default_values)
can_fallback = False # CUDA cannot fallback to object mode
try:
lastarg = list(pysig.parameters.values())[-1]
except IndexError:
has_stararg = False
else:
has_stararg = lastarg.kind == lastarg.VAR_POSITIONAL
exact_match_required = False
_dispatcher.Dispatcher.__init__(
self,
self._tm.get_pointer(),
arg_count,
self._fold_args,
argnames,
defargs,
can_fallback,
has_stararg,
exact_match_required,
)
if sigs:
if len(sigs) > 1:
raise TypeError("Only one signature supported at present")
self.compile(sigs[0])
self._can_compile = False
|
def __init__(self, py_func, sigs, targetoptions):
self.py_func = py_func
self.sigs = []
self.link = targetoptions.pop(
"link",
(),
)
self._can_compile = True
# Specializations for given sets of argument types
self.specializations = {}
# A mapping of signatures to compile results
self.overloads = collections.OrderedDict()
self.targetoptions = targetoptions
# defensive copy
self.targetoptions["extensions"] = list(self.targetoptions.get("extensions", []))
from .descriptor import CUDATargetDesc
self.typingctx = CUDATargetDesc.typingctx
self._tm = default_type_manager
pysig = utils.pysignature(py_func)
arg_count = len(pysig.parameters)
argnames = tuple(pysig.parameters)
default_values = self.py_func.__defaults__ or ()
defargs = tuple(OmittedArg(val) for val in default_values)
can_fallback = False # CUDA cannot fallback to object mode
try:
lastarg = list(pysig.parameters.values())[-1]
except IndexError:
has_stararg = False
else:
has_stararg = lastarg.kind == lastarg.VAR_POSITIONAL
exact_match_required = False
_dispatcher.Dispatcher.__init__(
self,
self._tm.get_pointer(),
arg_count,
self._fold_args,
argnames,
defargs,
can_fallback,
has_stararg,
exact_match_required,
)
if sigs:
if len(sigs) > 1:
raise TypeError("Only one signature supported at present")
self.compile(sigs[0])
self._can_compile = False
|
https://github.com/numba/numba/issues/6719
|
Traceback (most recent call last):
File "repro.py", line 6, in <module>
cuda.compile_ptx_for_current_device(f, [], device=True, debug=True)
File "/home/gmarkall/numbadev/numba/numba/cuda/compiler.py", line 124, in compile_ptx_for_current_device
return compile_ptx(pyfunc, args, debug=-debug, device=device,
File "/home/gmarkall/numbadev/numba/numba/core/compiler_lock.py", line 35, in _acquire_compile_lock
return func(*args, **kwargs)
File "/home/gmarkall/numbadev/numba/numba/cuda/compiler.py", line 114, in compile_ptx
ptx = nvvm.llvm_to_ptx(llvmir, opt=opt, arch=arch, **options)
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 669, in llvm_to_ptx
ptx = cu.compile(**opts)
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 265, in compile
self._try_error(err, 'Failed to compile\n')
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 283, in _try_error
self.driver.check_error(err, "%s\n%s" % (msg, self.get_log()))
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 173, in check_error
raise exc
numba.cuda.cudadrv.error.NvvmError: Failed to compile
DBG version 1.0 incompatible with current version 3.0
<unnamed>: error: incompatible IR detected. Possible mix of compiler/IR from different releases.
NVVM_ERROR_IR_VERSION_MISMATCH
|
numba.cuda.cudadrv.error.NvvmError
|
def set_cuda_kernel(lfunc):
from llvmlite.llvmpy.core import MetaData, MetaDataString, Constant, Type
mod = lfunc.module
ops = lfunc, MetaDataString.get(mod, "kernel"), Constant.int(Type.int(), 1)
md = MetaData.get(mod, ops)
nmd = mod.get_or_insert_named_metadata("nvvm.annotations")
nmd.add(md)
|
def set_cuda_kernel(lfunc):
from llvmlite.llvmpy.core import MetaData, MetaDataString, Constant, Type
m = lfunc.module
ops = lfunc, MetaDataString.get(m, "kernel"), Constant.int(Type.int(), 1)
md = MetaData.get(m, ops)
nmd = m.get_or_insert_named_metadata("nvvm.annotations")
nmd.add(md)
# Set NVVM IR version
i32 = ir.IntType(32)
if NVVM().is_nvvm70:
# NVVM IR 1.6, DWARF 3.0
ir_versions = [i32(1), i32(6), i32(3), i32(0)]
else:
# NVVM IR 1.1, DWARF 2.0
ir_versions = [i32(1), i32(2), i32(2), i32(0)]
md_ver = m.add_metadata(ir_versions)
m.add_named_metadata("nvvmir.version", md_ver)
|
https://github.com/numba/numba/issues/6719
|
Traceback (most recent call last):
File "repro.py", line 6, in <module>
cuda.compile_ptx_for_current_device(f, [], device=True, debug=True)
File "/home/gmarkall/numbadev/numba/numba/cuda/compiler.py", line 124, in compile_ptx_for_current_device
return compile_ptx(pyfunc, args, debug=-debug, device=device,
File "/home/gmarkall/numbadev/numba/numba/core/compiler_lock.py", line 35, in _acquire_compile_lock
return func(*args, **kwargs)
File "/home/gmarkall/numbadev/numba/numba/cuda/compiler.py", line 114, in compile_ptx
ptx = nvvm.llvm_to_ptx(llvmir, opt=opt, arch=arch, **options)
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 669, in llvm_to_ptx
ptx = cu.compile(**opts)
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 265, in compile
self._try_error(err, 'Failed to compile\n')
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 283, in _try_error
self.driver.check_error(err, "%s\n%s" % (msg, self.get_log()))
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/nvvm.py", line 173, in check_error
raise exc
numba.cuda.cudadrv.error.NvvmError: Failed to compile
DBG version 1.0 incompatible with current version 3.0
<unnamed>: error: incompatible IR detected. Possible mix of compiler/IR from different releases.
NVVM_ERROR_IR_VERSION_MISMATCH
|
numba.cuda.cudadrv.error.NvvmError
|
def run_pass(self, state):
# Determine whether to even attempt this pass... if there's no
# `literal_unroll as a global or as a freevar then just skip.
found = False
func_ir = state.func_ir
for blk in func_ir.blocks.values():
for asgn in blk.find_insts(ir.Assign):
if isinstance(asgn.value, (ir.Global, ir.FreeVar)):
if asgn.value.value is literal_unroll:
found = True
break
if found:
break
if not found:
return False
# run as subpipeline
from numba.core.compiler_machinery import PassManager
from numba.core.typed_passes import PartialTypeInference
pm = PassManager("literal_unroll_subpipeline")
# get types where possible to help with list->tuple change
pm.add_pass(PartialTypeInference, "performs partial type inference")
# make const lists tuples
pm.add_pass(TransformLiteralUnrollConstListToTuple, "switch const list for tuples")
# recompute partial typemap following IR change
pm.add_pass(PartialTypeInference, "performs partial type inference")
# canonicalise loops
pm.add_pass(IterLoopCanonicalization, "switch iter loops for range driven loops")
# rewrite consts
pm.add_pass(RewriteSemanticConstants, "rewrite semantic constants")
# do the unroll
pm.add_pass(MixedContainerUnroller, "performs mixed container unroll")
# rewrite dynamic getitem to static getitem as it's possible some more
# getitems will now be statically resolvable
pm.add_pass(GenericRewrites, "Generic Rewrites")
pm.finalize()
pm.run(state)
return True
|
def run_pass(self, state):
# Determine whether to even attempt this pass... if there's no
# `literal_unroll as a global or as a freevar then just skip.
found = False
func_ir = state.func_ir
for blk in func_ir.blocks.values():
for asgn in blk.find_insts(ir.Assign):
if isinstance(asgn.value, (ir.Global, ir.FreeVar)):
if asgn.value.value is literal_unroll:
found = True
break
if found:
break
if not found:
return False
# run as subpipeline
from numba.core.compiler_machinery import PassManager
from numba.core.typed_passes import PartialTypeInference
pm = PassManager("literal_unroll_subpipeline")
# get types where possible to help with list->tuple change
pm.add_pass(PartialTypeInference, "performs partial type inference")
# make const lists tuples
pm.add_pass(TransformLiteralUnrollConstListToTuple, "switch const list for tuples")
# recompute partial typemap following IR change
pm.add_pass(PartialTypeInference, "performs partial type inference")
# canonicalise loops
pm.add_pass(IterLoopCanonicalization, "switch iter loops for range driven loops")
# rewrite consts
pm.add_pass(RewriteSemanticConstants, "rewrite semantic constants")
# do the unroll
pm.add_pass(MixedContainerUnroller, "performs mixed container unroll")
pm.finalize()
pm.run(state)
return True
|
https://github.com/numba/numba/issues/6634
|
Using matplotlib backend: Qt5Agg
Populating the interactive namespace from numpy and matplotlib
0.26156099390229226
0.4919898206995005
0.363624558244539
---------------------------------------------------------------------------
TypingError Traceback (most recent call last)
<ipython-input-1-56c6c55bddcf> in <module>
26
27 recarriterator2(rec)
---> 28 recarriterator(rec)
~\anaconda3\lib\site-packages\numba\core\dispatcher.py in _compile_for_args(self, *args, **kws)
413 e.patch_message(msg)
414
--> 415 error_rewrite(e, 'typing')
416 except errors.UnsupportedError as e:
417 # Something unsupported is present in the user code, add help info
~\anaconda3\lib\site-packages\numba\core\dispatcher.py in error_rewrite(e, issue_type)
356 raise e
357 else:
--> 358 reraise(type(e), e, None)
359
360 argtypes = []
~\anaconda3\lib\site-packages\numba\core\utils.py in reraise(tp, value, tb)
78 value = tp()
79 if value.__traceback__ is not tb:
---> 80 raise value.with_traceback(tb)
81 raise value
82
TypingError: Failed in nopython mode pipeline (step: nopython frontend)
No implementation of function Function(<built-in function getitem>) found for signature:
>>> getitem(unaligned array(Record(a[type=float64;offset=0],b[type=float64;offset=8],c[type=float64;offset=16];24;False), 1d, C), unicode_type)
There are 22 candidate implementations:
- Of which 20 did not match due to:
Overload of function 'getitem': File: <numerous>: Line N/A.
With argument(s): '(unaligned array(Record(a[type=float64;offset=0],b[type=float64;offset=8],c[type=float64;offset=16];24;False), 1d, C), unicode_type)':
No match.
- Of which 2 did not match due to:
Overload in function 'GetItemBuffer.generic': File: numba\core\typing\arraydecl.py: Line 162.
With argument(s): '(unaligned array(Record(a[type=float64;offset=0],b[type=float64;offset=8],c[type=float64;offset=16];24;False), 1d, C), unicode_type)':
Rejected as the implementation raised a specific error:
TypeError: unsupported array index type unicode_type in [unicode_type]
raised from C:\Users\leopo\anaconda3\lib\site-packages\numba\core\typing\arraydecl.py:68
During: typing of intrinsic-call at <ipython-input-1-56c6c55bddcf> (8)
File "<ipython-input-1-56c6c55bddcf>", line 8:
def recarriterator(rec):
<source elided>
for o in 'a','b','c':
print(rec[o][0])
^
|
TypingError
|
def np_asfarray(a, dtype=np.float64):
# convert numba dtype types into NumPy dtype
if isinstance(dtype, types.Type):
dtype = as_dtype(dtype)
if not np.issubdtype(dtype, np.inexact):
dx = types.float64
else:
dx = dtype
def impl(a, dtype=np.float64):
return np.asarray(a, dx)
return impl
|
def np_asfarray(a, dtype=np.float64):
dtype = as_dtype(dtype)
if not np.issubdtype(dtype, np.inexact):
dx = types.float64
else:
dx = dtype
def impl(a, dtype=np.float64):
return np.asarray(a, dx)
return impl
|
https://github.com/numba/numba/issues/6449
|
numba.core.errors.TypingError: Failed in nopython mode pipeline (step: nopython frontend)
No implementation of function Function(<function asfarray at 0x7ff2ebd55ef0>) found for signature:
>>> asfarray(array(float64, 1d, C))
There are 2 candidate implementations:
- Of which 2 did not match due to:
Overload in function 'np_asfarray': File: numba/np/arraymath.py: Line 4089.
With argument(s): '(array(float64, 1d, C))':
Rejected as the implementation raised a specific error:
NotImplementedError: <class 'numpy.float64'> cannot be represented as a Numpy dtype
Traceback (most recent call last):
File "<path>/numba/core/typing/templates.py", line 677, in _get_impl
impl, args = self._impl_cache[cache_key]
KeyError: (<numba.core.typing.context.Context object at 0x7ff2dccf0050>, (array(float64, 1d, C),), ())
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<path>/numba/core/types/functions.py", line 298, in get_call_type
sig = temp.apply(nolitargs, nolitkws)
File "<path>/numba/core/typing/templates.py", line 345, in apply
sig = generic(args, kws)
File "<path>/numba/core/typing/templates.py", line 597, in generic
disp, new_args = self._get_impl(args, kws)
File "<path>/numba/core/typing/templates.py", line 679, in _get_impl
impl, args = self._build_impl(cache_key, args, kws)
File "<path>/numba/core/typing/templates.py", line 710, in _build_impl
ovf_result = self._overload_func(*args, **kws)
File "<path>/numba/np/arraymath.py", line 4091, in np_asfarray
dtype = as_dtype(dtype)
File "<path>/numba/np/numpy_support.py", line 152, in as_dtype
% (nbtype,))
NotImplementedError: <class 'numpy.float64'> cannot be represented as a Numpy dtype
raised from <path>/numba/np/numpy_support.py:152
During: resolving callee type: Function(<function asfarray at 0x7ff2ebd55ef0>)
During: typing of call at test_farray.py (6)
File "test_farray.py", line 6:
def foo():
np.asfarray(np.zeros(4,))
^
|
NotImplementedError
|
def _store_object(self, obj):
self._wr = _PickleableWeakRef(obj)
|
def _store_object(self, obj):
self._wr = weakref.ref(obj)
|
https://github.com/numba/numba/issues/6251
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
c:\users\wiese\repos\ga\numba\numba\core\caching.py in IndexDataCacheFile.save(self, key, data)
482 try:
483 # If key already exists, we will overwrite the file
--> 484 data_name = overloads[key]
485 except KeyError:
486 # Find an available name for the data file
KeyError: ((type(CPUDispatcher(<function mul_self at 0x0000025918224C10>)), int64), ('x86_64-pc-windows-msvc', 'broadwell', '+64bit,+adx,-aes,+avx,+avx2,-avx512bf16,-avx512bitalg,-avx512bw,-avx512cd,-avx512dq,-avx512er,-avx512f,-avx512ifma,-avx512pf,-avx512vbmi,-avx512vbmi2,-avx512vl,-avx512vnni,-avx512vpopcntdq,+bmi,+bmi2,-cldemote,-clflushopt,-clwb,-clzero,+cmov,+cx16,+cx8,-enqcmd,+f16c,+fma,-fma4,+fsgsbase,+fxsr,-gfni,+invpcid,-lwp,+lzcnt,+mmx,+movbe,-movdir64b,-movdiri,-mpx,-mwaitx,+pclmul,-pconfig,-pku,+popcnt,-prefetchwt1,+prfchw,-ptwrite,-rdpid,+rdrnd,+rdseed,-rtm,+sahf,-sgx,-sha,-shstk,+sse,+sse2,+sse3,+sse4.1,+sse4.2,-sse4a,+ssse3,-tbm,-vaes,-vpclmulqdq,-waitpkg,-wbnoinvd,-xop,+xsave,-xsavec,+xsaveopt,-xsaves'), ('95d9e4857cb9755b29e175e557e615d8467821eb42c66a521b4270d86f6b8c06', 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'))
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
<ipython-input-7-74c10e900da8> in <module>
7 @numba.njit(cache=True)
8 def compose_self_apply(f, x):
9 return f(f(x))
---> 11 compose_self_apply(mul_self, 2)
c:\users\wiese\repos\ga\numba\numba\core\dispatcher.py in _DispatcherBase._compile_for_args(self, *args, **kws)
431 e.patch_message('\n'.join((str(e).rstrip(), help_msg)))
432 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 433 raise e
c:\users\wiese\repos\ga\numba\numba\core\dispatcher.py in _DispatcherBase._compile_for_args(self, *args, **kws)
364 argtypes.append(self.typeof_pyval(a))
365 try:
--> 366 return self.compile(tuple(argtypes))
367 except errors.ForceLiteralArg as e:
368 # Received request for compiler re-entry with the list of arguments
369 # indicated by e.requested_args.
370 # First, check if any of these args are already Literal-ized
371 already_lit_pos = [i for i in e.requested_args
372 if isinstance(args[i], types.Literal)]
c:\users\wiese\repos\ga\numba\numba\core\compiler_lock.py in _CompilerLock.__call__.<locals>._acquire_compile_lock(*args, **kwargs)
29 @functools.wraps(func)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
c:\users\wiese\repos\ga\numba\numba\core\dispatcher.py in Dispatcher.compile(self, sig)
822 raise e.bind_fold_arguments(folded)
823 self.add_overload(cres)
--> 824 self._cache.save_overload(sig, cres)
825 return cres.entry_point
c:\users\wiese\repos\ga\numba\numba\core\caching.py in Cache.save_overload(self, sig, data)
664 """
665 Save the data for the given signature in the cache.
666 """
667 with self._guard_against_spurious_io_errors():
--> 668 self._save_overload(sig, data)
c:\users\wiese\repos\ga\numba\numba\core\caching.py in Cache._save_overload(self, sig, data)
676 key = self._index_key(sig, _get_codegen(data))
677 data = self._impl.reduce(data)
--> 678 self._cache_file.save(key, data)
c:\users\wiese\repos\ga\numba\numba\core\caching.py in IndexDataCacheFile.save(self, key, data)
491 break
492 overloads[key] = data_name
--> 493 self._save_index(overloads)
494 self._save_data(data_name, data)
c:\users\wiese\repos\ga\numba\numba\core\caching.py in IndexDataCacheFile._save_index(self, overloads)
537 def _save_index(self, overloads):
538 data = self._source_stamp, overloads
--> 539 data = self._dump(data)
540 with self._open_for_write(self._index_path) as f:
541 pickle.dump(self._version, f, protocol=-1)
c:\users\wiese\repos\ga\numba\numba\core\caching.py in IndexDataCacheFile._dump(self, obj)
566 def _dump(self, obj):
--> 567 return pickle.dumps(obj, protocol=-1)
TypeError: cannot pickle 'weakref' object
|
KeyError
|
def _analyze_op_build_tuple(self, scope, equiv_set, expr):
# For the moment, we can't do anything with tuples that
# contain arrays, compared to array dimensions. Return
# None to say we won't track this tuple if a part of it
# is an array.
for x in expr.items:
if isinstance(x, ir.Var) and isinstance(
self.typemap[x.name], types.ArrayCompatible
):
return None
consts = []
for var in expr.items:
x = guard(find_const, self.func_ir, var)
if x is not None:
consts.append(x)
else:
break
else:
out = tuple([ir.Const(x, expr.loc) for x in consts])
return out, [], ir.Const(tuple(consts), expr.loc)
# default return for non-const
return tuple(expr.items), []
|
def _analyze_op_build_tuple(self, scope, equiv_set, expr):
consts = []
for var in expr.items:
x = guard(find_const, self.func_ir, var)
if x is not None:
consts.append(x)
else:
break
else:
out = tuple([ir.Const(x, expr.loc) for x in consts])
return out, [], ir.Const(tuple(consts), expr.loc)
# default return for non-const
return tuple(expr.items), []
|
https://github.com/numba/numba/issues/6399
|
In [1]: @numba.njit(parallel=True)
...: def f(A):
...: S = (A, A)
...: return S[0].sum()
...:
In [2]: f(np.ones((3, 3)))
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-2-c76d1478a056> in <module>
----> 1 f(np.ones((3, 3)))
~/dev/numba/numba/core/dispatcher.py in _compile_for_args(self, *args, **kws)
431 e.patch_message('\n'.join((str(e).rstrip(), help_msg)))
432 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 433 raise e
434
435 def inspect_llvm(self, signature=None):
~/dev/numba/numba/core/dispatcher.py in _compile_for_args(self, *args, **kws)
364 argtypes.append(self.typeof_pyval(a))
365 try:
--> 366 return self.compile(tuple(argtypes))
367 except errors.ForceLiteralArg as e:
368 # Received request for compiler re-entry with the list of arguments
~/dev/numba/numba/core/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
~/dev/numba/numba/core/dispatcher.py in compile(self, sig)
855 self._cache_misses[sig] += 1
856 try:
--> 857 cres = self._compiler.compile(args, return_type)
858 except errors.ForceLiteralArg as e:
859 def folded(args, kws):
~/dev/numba/numba/core/dispatcher.py in compile(self, args, return_type)
75
76 def compile(self, args, return_type):
---> 77 status, retval = self._compile_cached(args, return_type)
78 if status:
79 return retval
~/dev/numba/numba/core/dispatcher.py in _compile_cached(self, args, return_type)
89
90 try:
---> 91 retval = self._compile_core(args, return_type)
92 except errors.TypingError as e:
93 self._failed_cache[key] = e
~/dev/numba/numba/core/dispatcher.py in _compile_core(self, args, return_type)
102
103 impl = self._get_implementation(args, {})
--> 104 cres = compiler.compile_extra(self.targetdescr.typing_context,
105 self.targetdescr.target_context,
106 impl,
~/dev/numba/numba/core/compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library, pipeline_class)
600 pipeline = pipeline_class(typingctx, targetctx, library,
601 args, return_type, flags, locals)
--> 602 return pipeline.compile_extra(func)
603
604
~/dev/numba/numba/core/compiler.py in compile_extra(self, func)
350 self.state.lifted = ()
351 self.state.lifted_from = None
--> 352 return self._compile_bytecode()
353
354 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
~/dev/numba/numba/core/compiler.py in _compile_bytecode(self)
412 """
413 assert self.state.func_ir is None
--> 414 return self._compile_core()
415
416 def _compile_ir(self):
~/dev/numba/numba/core/compiler.py in _compile_core(self)
392 self.state.status.fail_reason = e
393 if is_final_pipeline:
--> 394 raise e
395 else:
396 raise CompilerError("All available pipelines exhausted")
~/dev/numba/numba/core/compiler.py in _compile_core(self)
383 res = None
384 try:
--> 385 pm.run(self.state)
386 if self.state.cr is not None:
387 break
~/dev/numba/numba/core/compiler_machinery.py in run(self, state)
337 (self.pipeline_name, pass_desc)
338 patched_exception = self._patch_error(msg, e)
--> 339 raise patched_exception
340
341 def dependency_analysis(self):
~/dev/numba/numba/core/compiler_machinery.py in run(self, state)
328 pass_inst = _pass_registry.get(pss).pass_inst
329 if isinstance(pass_inst, CompilerPass):
--> 330 self._runPass(idx, pass_inst, state)
331 else:
332 raise BaseException("Legacy pass in use")
~/dev/numba/numba/core/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
~/dev/numba/numba/core/compiler_machinery.py in _runPass(self, index, pss, internal_state)
287 mutated |= check(pss.run_initialization, internal_state)
288 with SimpleTimer() as pass_time:
--> 289 mutated |= check(pss.run_pass, internal_state)
290 with SimpleTimer() as finalize_time:
291 mutated |= check(pss.run_finalizer, internal_state)
~/dev/numba/numba/core/compiler_machinery.py in check(func, compiler_state)
260
261 def check(func, compiler_state):
--> 262 mangled = func(compiler_state)
263 if mangled not in (True, False):
264 msg = ("CompilerPass implementations should return True/False. "
~/dev/numba/numba/core/typed_passes.py in run_pass(self, state)
288 state.flags,
289 state.parfor_diagnostics)
--> 290 parfor_pass.run()
291
292 remove_dels(state.func_ir.blocks)
~/dev/numba/numba/parfors/parfor.py in run(self)
2715 """run parfor conversion pass: replace Numpy calls
2716 with Parfors when possible and optimize the IR."""
-> 2717 self._pre_run()
2718 # run stencil translation to parfor
2719 if self.options.stencil:
~/dev/numba/numba/parfors/parfor.py in _pre_run(self)
2707 def _pre_run(self):
2708 # run array analysis, a pre-requisite for parfor translation
-> 2709 self.array_analysis.run(self.func_ir.blocks)
2710 # NOTE: Prepare _max_label. See #6102
2711 ir_utils._max_label = max(ir_utils._max_label,
~/dev/numba/numba/parfors/array_analysis.py in run(self, blocks, equiv_set)
1118 topo_order = find_topo_order(blocks, cfg=cfg)
1119 # Traverse blocks in topological order
-> 1120 self._run_on_blocks(topo_order, blocks, cfg, init_equiv_set)
1121
1122 if config.DEBUG_ARRAY_OPT >= 1:
~/dev/numba/numba/parfors/array_analysis.py in _run_on_blocks(self, topo_order, blocks, cfg, init_equiv_set)
1138 block = blocks[label]
1139 scope = block.scope
-> 1140 pending_transforms = self._determine_transform(
1141 cfg, block, label, scope, init_equiv_set
1142 )
~/dev/numba/numba/parfors/array_analysis.py in _determine_transform(self, cfg, block, label, scope, init_equiv_set)
1219 )
1220 self.calltypes[inst] = orig_calltype
-> 1221 pre, post = self._analyze_inst(
1222 label, scope, equiv_set, inst, redefined
1223 )
~/dev/numba/numba/parfors/array_analysis.py in _analyze_inst(self, label, scope, equiv_set, inst, redefined)
1311 )
1312 ):
-> 1313 (shape, post) = self._gen_shape_call(
1314 equiv_set, lhs, typ.ndim, shape
1315 )
~/dev/numba/numba/parfors/array_analysis.py in _gen_shape_call(self, equiv_set, var, ndims, shape)
2973 for i in range(ndims):
2974 skip = False
-> 2975 if shape and shape[i]:
2976 if isinstance(shape[i], ir.Var):
2977 typ = self.typemap[shape[i].name]
IndexError: Failed in nopython mode pipeline (step: convert to parfors)
tuple index out of range
|
IndexError
|
def np_cross(a, b):
if not type_can_asarray(a) or not type_can_asarray(b):
raise TypingError("Inputs must be array-like.")
def impl(a, b):
a_ = np.asarray(a)
b_ = np.asarray(b)
if a_.shape[-1] not in (2, 3) or b_.shape[-1] not in (2, 3):
raise ValueError(
(
"Incompatible dimensions for cross product\n"
"(dimension must be 2 or 3)"
)
)
if a_.shape[-1] == 3 or b_.shape[-1] == 3:
return _cross_impl(a_, b_)
else:
raise ValueError(
(
"Dimensions for both inputs is 2.\n"
"Please replace your numpy.cross(a, b) call with "
"a call to `cross2d(a, b)` from `numba.np.extensions`."
)
)
return impl
|
def np_cross(a, b):
if not type_can_asarray(a) or not type_can_asarray(b):
raise TypingError("Inputs must be array-like.")
def impl(a, b):
a_ = np.asarray(a)
b_ = np.asarray(b)
if a_.shape[-1] not in (2, 3) or b_.shape[-1] not in (2, 3):
raise ValueError(
(
"Incompatible dimensions for cross product\n"
"(dimension must be 2 or 3)"
)
)
if a_.shape[-1] == 3 or b_.shape[-1] == 3:
return _cross_impl(a_, b_)
else:
raise ValueError(
(
"Dimensions for both inputs is 2.\n"
"Please replace your numpy.cross(a, b) call with "
"numba.numpy_extensions.cross2d(a, b)."
)
)
return impl
|
https://github.com/numba/numba/issues/6276
|
is_convex(polygon)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-18-dc4c8f71d7e1> in <module>
----> 1 is_convex(polygon)
~/.local/lib/python3.6/site-packages/numba/np/arraymath.py in impl()
4380 else:
4381 raise ValueError((
-> 4382 "Dimensions for both inputs is 2.\n"
4383 "Please replace your numpy.cross(a, b) call with "
4384 "numba.numpy_extensions.cross2d(a, b)."
ValueError: Dimensions for both inputs is 2.
Please replace your numpy.cross(a, b) call with numba.numpy_extensions.cross2d(a, b).
|
ValueError
|
def impl(a, b):
a_ = np.asarray(a)
b_ = np.asarray(b)
if a_.shape[-1] not in (2, 3) or b_.shape[-1] not in (2, 3):
raise ValueError(
("Incompatible dimensions for cross product\n(dimension must be 2 or 3)")
)
if a_.shape[-1] == 3 or b_.shape[-1] == 3:
return _cross_impl(a_, b_)
else:
raise ValueError(
(
"Dimensions for both inputs is 2.\n"
"Please replace your numpy.cross(a, b) call with "
"a call to `cross2d(a, b)` from `numba.np.extensions`."
)
)
|
def impl(a, b):
a_ = np.asarray(a)
b_ = np.asarray(b)
if a_.shape[-1] not in (2, 3) or b_.shape[-1] not in (2, 3):
raise ValueError(
("Incompatible dimensions for cross product\n(dimension must be 2 or 3)")
)
if a_.shape[-1] == 3 or b_.shape[-1] == 3:
return _cross_impl(a_, b_)
else:
raise ValueError(
(
"Dimensions for both inputs is 2.\n"
"Please replace your numpy.cross(a, b) call with "
"numba.numpy_extensions.cross2d(a, b)."
)
)
|
https://github.com/numba/numba/issues/6276
|
is_convex(polygon)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-18-dc4c8f71d7e1> in <module>
----> 1 is_convex(polygon)
~/.local/lib/python3.6/site-packages/numba/np/arraymath.py in impl()
4380 else:
4381 raise ValueError((
-> 4382 "Dimensions for both inputs is 2.\n"
4383 "Please replace your numpy.cross(a, b) call with "
4384 "numba.numpy_extensions.cross2d(a, b)."
ValueError: Dimensions for both inputs is 2.
Please replace your numpy.cross(a, b) call with numba.numpy_extensions.cross2d(a, b).
|
ValueError
|
def unify(self, typingctx, other):
if isinstance(other, List):
dtype = typingctx.unify_pairs(self.dtype, other.dtype)
reflected = self.reflected or other.reflected
if dtype is not None:
siv = self.initial_value
oiv = other.initial_value
if siv is not None and oiv is not None:
use = siv
if siv is None:
use = oiv
return List(dtype, reflected, use)
else:
return List(dtype, reflected)
|
def unify(self, typingctx, other):
if isinstance(other, List):
dtype = typingctx.unify_pairs(self.dtype, other.dtype)
reflected = self.reflected or other.reflected
if dtype is not None:
siv = self.initial_value
oiv = other.initial_value
if siv is not None and oiv is not None:
use = siv
if siv is None:
use = oiv
return List(dtype, reflected, use.initial_value)
else:
return List(dtype, reflected)
|
https://github.com/numba/numba/issues/6082
|
In [7]: numba.__version__
Out[7]: '0.51.0dev0+497.g4aceb2727'
In [8]: numba.njit(lambda: [[1, 2, 3, 4, 5], [1, 2, 3, 4, 6]])()
---------------------------------------------------------------------------
TypingError Traceback (most recent call last)
<ipython-input-8-7fdc3277fbc9> in <module>
----> 1 numba.njit(lambda: [[1, 2, 3, 4, 5], [1, 2, 3, 4, 6]])()
~/dev/numba/numba/core/dispatcher.py in _compile_for_args(self, *args, **kws)
413 e.patch_message(msg)
414
--> 415 error_rewrite(e, 'typing')
416 except errors.UnsupportedError as e:
417 # Something unsupported is present in the user code, add help info
~/dev/numba/numba/core/dispatcher.py in error_rewrite(e, issue_type)
356 raise e
357 else:
--> 358 reraise(type(e), e, None)
359
360 argtypes = []
~/dev/numba/numba/core/utils.py in reraise(tp, value, tb)
78 value = tp()
79 if value.__traceback__ is not tb:
---> 80 raise value.with_traceback(tb)
81 raise value
82
TypingError: Failed in nopython mode pipeline (step: nopython frontend)
Internal error at <numba.core.typeinfer.BuildListConstraint object at 0x7fd342d54f70>.
'list' object has no attribute 'initial_value'
During: typing of <class 'numba.core.types.containers.List'> at <ipython-input-8-7fdc3277fbc9> (1)
Enable logging at debug level for details.
File "<ipython-input-8-7fdc3277fbc9>", line 1:
numba.njit(lambda: [[1, 2, 3, 4, 5], [1, 2, 3, 4, 6]])()
^
|
TypingError
|
def _reduce_function(func, globs):
"""
Reduce a Python function and its globals to picklable components.
If there are cell variables (i.e. references to a closure), their
values will be frozen.
"""
if func.__closure__:
cells = [cell.cell_contents for cell in func.__closure__]
else:
cells = None
return _reduce_code(func.__code__), globs, func.__name__, cells, func.__defaults__
|
def _reduce_function(func, globs):
"""
Reduce a Python function and its globals to picklable components.
If there are cell variables (i.e. references to a closure), their
values will be frozen.
"""
if func.__closure__:
cells = [cell.cell_contents for cell in func.__closure__]
else:
cells = None
return _reduce_code(func.__code__), globs, func.__name__, cells
|
https://github.com/numba/numba/issues/4888
|
[2, 3, 4]
2
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/root/.pyenv/versions/3.7.3/lib/python3.7/multiprocessing/pool.py", line 121, in worker
result = (True, func(*args, **kwds))
File "/root/.pyenv/versions/3.7.3/lib/python3.7/multiprocessing/pool.py", line 44, in mapstar
return list(map(*args))
TypeError: not enough arguments: expected 2, got 1
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "smpe_paper/numba_test.py", line 18, in <module>
print(p.map(add2, [1, 2, 3]))
File "/root/.pyenv/versions/3.7.3/lib/python3.7/multiprocessing/pool.py", line 268, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "/root/.pyenv/versions/3.7.3/lib/python3.7/multiprocessing/pool.py", line 657, in get
raise self._value
TypeError: not enough arguments: expected 2, got 1
(smpe-paper) root@5b798e37487d:/smpe-paper# python smpe_paper/numba_test.py
[2, 3, 4]
2
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/root/.pyenv/versions/3.7.3/lib/python3.7/multiprocessing/pool.py", line 121, in worker
result = (True, func(*args, **kwds))
File "/root/.pyenv/versions/3.7.3/lib/python3.7/multiprocessing/pool.py", line 44, in mapstar
return list(map(*args))
TypeError: not enough arguments: expected 2, got 1
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "smpe_paper/numba_test.py", line 18, in <module>
print(p.map(add2, [1, 2, 3])) # TypeError
File "/root/.pyenv/versions/3.7.3/lib/python3.7/multiprocessing/pool.py", line 268, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "/root/.pyenv/versions/3.7.3/lib/python3.7/multiprocessing/pool.py", line 657, in get
raise self._value
TypeError: not enough arguments: expected 2, got 1
|
TypeError
|
def _rebuild_function(code_reduced, globals, name, cell_values, defaults):
"""
Rebuild a function from its _reduce_function() results.
"""
if cell_values:
cells = tuple(_dummy_closure(v).__closure__[0] for v in cell_values)
else:
cells = ()
code = _rebuild_code(*code_reduced)
modname = globals["__name__"]
try:
_rebuild_module(modname)
except ImportError:
# If the module can't be found, avoid passing it (it would produce
# errors when lowering).
del globals["__name__"]
return FunctionType(code, globals, name, defaults, cells)
|
def _rebuild_function(code_reduced, globals, name, cell_values):
"""
Rebuild a function from its _reduce_function() results.
"""
if cell_values:
cells = tuple(_dummy_closure(v).__closure__[0] for v in cell_values)
else:
cells = ()
code = _rebuild_code(*code_reduced)
modname = globals["__name__"]
try:
_rebuild_module(modname)
except ImportError:
# If the module can't be found, avoid passing it (it would produce
# errors when lowering).
del globals["__name__"]
return FunctionType(code, globals, name, (), cells)
|
https://github.com/numba/numba/issues/4888
|
[2, 3, 4]
2
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/root/.pyenv/versions/3.7.3/lib/python3.7/multiprocessing/pool.py", line 121, in worker
result = (True, func(*args, **kwds))
File "/root/.pyenv/versions/3.7.3/lib/python3.7/multiprocessing/pool.py", line 44, in mapstar
return list(map(*args))
TypeError: not enough arguments: expected 2, got 1
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "smpe_paper/numba_test.py", line 18, in <module>
print(p.map(add2, [1, 2, 3]))
File "/root/.pyenv/versions/3.7.3/lib/python3.7/multiprocessing/pool.py", line 268, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "/root/.pyenv/versions/3.7.3/lib/python3.7/multiprocessing/pool.py", line 657, in get
raise self._value
TypeError: not enough arguments: expected 2, got 1
(smpe-paper) root@5b798e37487d:/smpe-paper# python smpe_paper/numba_test.py
[2, 3, 4]
2
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/root/.pyenv/versions/3.7.3/lib/python3.7/multiprocessing/pool.py", line 121, in worker
result = (True, func(*args, **kwds))
File "/root/.pyenv/versions/3.7.3/lib/python3.7/multiprocessing/pool.py", line 44, in mapstar
return list(map(*args))
TypeError: not enough arguments: expected 2, got 1
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "smpe_paper/numba_test.py", line 18, in <module>
print(p.map(add2, [1, 2, 3])) # TypeError
File "/root/.pyenv/versions/3.7.3/lib/python3.7/multiprocessing/pool.py", line 268, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "/root/.pyenv/versions/3.7.3/lib/python3.7/multiprocessing/pool.py", line 657, in get
raise self._value
TypeError: not enough arguments: expected 2, got 1
|
TypeError
|
def generic(self, args, kws):
"""
Type the overloaded function by compiling the appropriate
implementation for the given args.
"""
disp, new_args = self._get_impl(args, kws)
if disp is None:
return
# Compile and type it for the given types
disp_type = types.Dispatcher(disp)
# Store the compiled overload for use in the lowering phase if there's
# no inlining required (else functions are being compiled which will
# never be used as they are inlined)
if not self._inline.is_never_inline:
# need to run the compiler front end up to type inference to compute
# a signature
from numba.core import typed_passes, compiler
from numba.core.inline_closurecall import InlineWorker
fcomp = disp._compiler
flags = compiler.Flags()
# Updating these causes problems?!
# fcomp.targetdescr.options.parse_as_flags(flags,
# fcomp.targetoptions)
# flags = fcomp._customize_flags(flags)
# spoof a compiler pipline like the one that will be in use
tyctx = fcomp.targetdescr.typing_context
tgctx = fcomp.targetdescr.target_context
compiler_inst = fcomp.pipeline_class(
tyctx,
tgctx,
None,
None,
None,
flags,
None,
)
inline_worker = InlineWorker(
tyctx,
tgctx,
fcomp.locals,
compiler_inst,
flags,
None,
)
# If the inlinee contains something to trigger literal arg dispatch
# then the pipeline call will unconditionally fail due to a raised
# ForceLiteralArg exception. Therefore `resolve` is run first, as
# type resolution must occur at some point, this will hit any
# `literally` calls and because it's going via the dispatcher will
# handle them correctly i.e. ForceLiteralArg propagates. This having
# the desired effect of ensuring the pipeline call is only made in
# situations that will succeed. For context see #5887.
resolve = disp_type.dispatcher.get_call_template
template, pysig, folded_args, kws = resolve(new_args, kws)
ir = inline_worker.run_untyped_passes(disp_type.dispatcher.py_func)
typemap, return_type, calltypes = typed_passes.type_inference_stage(
self.context, ir, folded_args, None
)
sig = Signature(return_type, folded_args, None)
# this stores a load of info for the cost model function if supplied
# it by default is None
self._inline_overloads[sig.args] = {"folded_args": folded_args}
# this stores the compiled overloads, if there's no compiled
# overload available i.e. function is always inlined, the key still
# needs to exist for type resolution
# NOTE: If lowering is failing on a `_EmptyImplementationEntry`,
# the inliner has failed to inline this entry corretly.
impl_init = _EmptyImplementationEntry("always inlined")
self._compiled_overloads[sig.args] = impl_init
if not self._inline.is_always_inline:
# this branch is here because a user has supplied a function to
# determine whether to inline or not. As a result both compiled
# function and inliner info needed, delaying the computation of
# this leads to an internal state mess at present. TODO: Fix!
sig = disp_type.get_call_type(self.context, new_args, kws)
self._compiled_overloads[sig.args] = disp_type.get_overload(sig)
# store the inliner information, it's used later in the cost
# model function call
iinfo = _inline_info(ir, typemap, calltypes, sig)
self._inline_overloads[sig.args] = {"folded_args": folded_args, "iinfo": iinfo}
else:
sig = disp_type.get_call_type(self.context, new_args, kws)
self._compiled_overloads[sig.args] = disp_type.get_overload(sig)
return sig
|
def generic(self, args, kws):
"""
Type the overloaded function by compiling the appropriate
implementation for the given args.
"""
disp, new_args = self._get_impl(args, kws)
if disp is None:
return
# Compile and type it for the given types
disp_type = types.Dispatcher(disp)
# Store the compiled overload for use in the lowering phase if there's
# no inlining required (else functions are being compiled which will
# never be used as they are inlined)
if not self._inline.is_never_inline:
# need to run the compiler front end up to type inference to compute
# a signature
from numba.core import typed_passes, compiler
from numba.core.inline_closurecall import InlineWorker
fcomp = disp._compiler
flags = compiler.Flags()
# Updating these causes problems?!
# fcomp.targetdescr.options.parse_as_flags(flags,
# fcomp.targetoptions)
# flags = fcomp._customize_flags(flags)
# spoof a compiler pipline like the one that will be in use
tyctx = fcomp.targetdescr.typing_context
tgctx = fcomp.targetdescr.target_context
compiler_inst = fcomp.pipeline_class(
tyctx,
tgctx,
None,
None,
None,
flags,
None,
)
inline_worker = InlineWorker(
tyctx,
tgctx,
fcomp.locals,
compiler_inst,
flags,
None,
)
ir = inline_worker.run_untyped_passes(disp_type.dispatcher.py_func)
resolve = disp_type.dispatcher.get_call_template
template, pysig, folded_args, kws = resolve(new_args, kws)
typemap, return_type, calltypes = typed_passes.type_inference_stage(
self.context, ir, folded_args, None
)
sig = Signature(return_type, folded_args, None)
# this stores a load of info for the cost model function if supplied
# it by default is None
self._inline_overloads[sig.args] = {"folded_args": folded_args}
# this stores the compiled overloads, if there's no compiled
# overload available i.e. function is always inlined, the key still
# needs to exist for type resolution
# NOTE: If lowering is failing on a `_EmptyImplementationEntry`,
# the inliner has failed to inline this entry corretly.
impl_init = _EmptyImplementationEntry("always inlined")
self._compiled_overloads[sig.args] = impl_init
if not self._inline.is_always_inline:
# this branch is here because a user has supplied a function to
# determine whether to inline or not. As a result both compiled
# function and inliner info needed, delaying the computation of
# this leads to an internal state mess at present. TODO: Fix!
sig = disp_type.get_call_type(self.context, new_args, kws)
self._compiled_overloads[sig.args] = disp_type.get_overload(sig)
# store the inliner information, it's used later in the cost
# model function call
iinfo = _inline_info(ir, typemap, calltypes, sig)
self._inline_overloads[sig.args] = {"folded_args": folded_args, "iinfo": iinfo}
else:
sig = disp_type.get_call_type(self.context, new_args, kws)
self._compiled_overloads[sig.args] = disp_type.get_overload(sig)
return sig
|
https://github.com/numba/numba/issues/5887
|
DEBUG: dtype= unicode_type
Traceback (most recent call last):
File "test_inline_literally.py", line 36, in <module>
res = test_impl(data, dtype)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\numba\core\dispatcher.py", line 401, in _compile_for_args
error_rewrite(e, 'typing')
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\numba\core\dispatcher.py", line 344, in error_rewrite
reraise(type(e), e, None)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\numba\core\utils.py", line 79, in reraise
raise value.with_traceback(tb)
numba.core.errors.TypingError: Failed in nopython mode pipeline (step: nopython frontend)
Internal error at <numba.core.typeinfer.CallConstraint object at 0x0000023CF5AA24F0>.
'NoneType' object is not callable
[1] During: resolving callee type: Function(<function foo at 0x0000023CF24E5280>)
[2] During: typing of call at test_inline_literally.py (32)
Enable logging at debug level for details.
File "test_inline_literally.py", line 32:
def test_impl(arr, dtype):
return foo(arr, dtype)
^
|
numba.core.errors.TypingError
|
def define_nopython_pipeline(state, name="nopython"):
"""Returns an nopython mode pipeline based PassManager"""
# compose pipeline from untyped, typed and lowering parts
dpb = DefaultPassBuilder
pm = PassManager(name)
untyped_passes = dpb.define_untyped_pipeline(state)
pm.passes.extend(untyped_passes.passes)
typed_passes = dpb.define_typed_pipeline(state)
pm.passes.extend(typed_passes.passes)
lowering_passes = dpb.define_nopython_lowering_pipeline(state)
pm.passes.extend(lowering_passes.passes)
pm.finalize()
return pm
|
def define_nopython_pipeline(state, name="nopython"):
"""Returns an nopython mode pipeline based PassManager"""
pm = PassManager(name)
if state.func_ir is None:
pm.add_pass(TranslateByteCode, "analyzing bytecode")
pm.add_pass(FixupArgs, "fix up args")
pm.add_pass(IRProcessing, "processing IR")
pm.add_pass(WithLifting, "Handle with contexts")
# pre typing
if not state.flags.no_rewrites:
pm.add_pass(RewriteSemanticConstants, "rewrite semantic constants")
pm.add_pass(DeadBranchPrune, "dead branch pruning")
pm.add_pass(GenericRewrites, "nopython rewrites")
pm.add_pass(InlineClosureLikes, "inline calls to locally defined closures")
# convert any remaining closures into functions
pm.add_pass(MakeFunctionToJitFunction, "convert make_function into JIT functions")
# inline functions that have been determined as inlinable and rerun
# branch pruning, this needs to be run after closures are inlined as
# the IR repr of a closure masks call sites if an inlinable is called
# inside a closure
pm.add_pass(InlineInlinables, "inline inlinable functions")
if not state.flags.no_rewrites:
pm.add_pass(DeadBranchPrune, "dead branch pruning")
pm.add_pass(FindLiterallyCalls, "find literally calls")
pm.add_pass(LiteralUnroll, "handles literal_unroll")
if state.flags.enable_ssa:
pm.add_pass(ReconstructSSA, "ssa")
# typing
pm.add_pass(NopythonTypeInference, "nopython frontend")
pm.add_pass(AnnotateTypes, "annotate types")
# strip phis
pm.add_pass(PreLowerStripPhis, "remove phis nodes")
# optimisation
pm.add_pass(InlineOverloads, "inline overloaded functions")
if state.flags.auto_parallel.enabled:
pm.add_pass(PreParforPass, "Preprocessing for parfors")
if not state.flags.no_rewrites:
pm.add_pass(NopythonRewrites, "nopython rewrites")
if state.flags.auto_parallel.enabled:
pm.add_pass(ParforPass, "convert to parfors")
# legalise
pm.add_pass(IRLegalization, "ensure IR is legal prior to lowering")
# lower
pm.add_pass(NoPythonBackend, "nopython mode backend")
pm.add_pass(DumpParforDiagnostics, "dump parfor diagnostics")
pm.finalize()
return pm
|
https://github.com/numba/numba/issues/5476
|
Traceback (most recent call last):
File "test_inline.py", line 14, in <module>
print(fn())
File "numba\numba\dispatcher.py", line 420, in _compile_for_args
raise e
File "numba\numba\dispatcher.py", line 353, in _compile_for_args
return self.compile(tuple(argtypes))
File "numba\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba\numba\dispatcher.py", line 768, in compile
cres = self._compiler.compile(args, return_type)
File "numba\numba\dispatcher.py", line 77, in compile
status, retval = self._compile_cached(args, return_type)
File "numba\numba\dispatcher.py", line 91, in _compile_cached
retval = self._compile_core(args, return_type)
File "numba\numba\dispatcher.py", line 109, in _compile_core
pipeline_class=self.pipeline_class)
File "numba\numba\compiler.py", line 551, in compile_extra
return pipeline.compile_extra(func)
File "numba\numba\compiler.py", line 331, in compile_extra
return self._compile_bytecode()
File "numba\numba\compiler.py", line 393, in _compile_bytecode
return self._compile_core()
File "numba\numba\compiler.py", line 373, in _compile_core
raise e
File "numba\numba\compiler.py", line 364, in _compile_core
pm.run(self.state)
File "numba\numba\compiler_machinery.py", line 347, in run
raise patched_exception
File "numba\numba\compiler_machinery.py", line 338, in run
self._runPass(idx, pass_inst, state)
File "numba\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba\numba\compiler_machinery.py", line 302, in _runPass
mutated |= check(pss.run_pass, internal_state)
File "numba\numba\compiler_machinery.py", line 275, in check
mangled = func(compiler_state)
File "numba\numba\untyped_passes.py", line 312, in run_pass
state.func_ir.blocks = simplify_CFG(state.func_ir.blocks)
File "numba\numba\ir_utils.py", line 1237, in simplify_CFG
return rename_labels(blocks)
File "numba\numba\ir_utils.py", line 1188, in rename_labels
topo_order.remove(return_label)
ValueError: Failed in nopython mode pipeline (step: inline inlinable functions)
list.remove(x): x not in list
|
ValueError
|
def run(self):
"""Run inline closure call pass."""
# Analysis relies on ir.Del presence, strip out later
pp = postproc.PostProcessor(self.func_ir)
pp.run(True)
modified = False
work_list = list(self.func_ir.blocks.items())
debug_print = _make_debug_print("InlineClosureCallPass")
debug_print("START")
while work_list:
label, block = work_list.pop()
for i, instr in enumerate(block.body):
if isinstance(instr, ir.Assign):
lhs = instr.target
expr = instr.value
if isinstance(expr, ir.Expr) and expr.op == "call":
call_name = guard(find_callname, self.func_ir, expr)
func_def = guard(get_definition, self.func_ir, expr.func)
if guard(
self._inline_reduction, work_list, block, i, expr, call_name
):
modified = True
break # because block structure changed
if guard(self._inline_closure, work_list, block, i, func_def):
modified = True
break # because block structure changed
if guard(self._inline_stencil, instr, call_name, func_def):
modified = True
if enable_inline_arraycall:
# Identify loop structure
if modified:
# Need to do some cleanups if closure inlining kicked in
merge_adjacent_blocks(self.func_ir.blocks)
cfg = compute_cfg_from_blocks(self.func_ir.blocks)
debug_print("start inline arraycall")
_debug_dump(cfg)
loops = cfg.loops()
sized_loops = [(k, len(loops[k].body)) for k in loops.keys()]
visited = []
# We go over all loops, bigger loops first (outer first)
for k, s in sorted(sized_loops, key=lambda tup: tup[1], reverse=True):
visited.append(k)
if guard(
_inline_arraycall,
self.func_ir,
cfg,
visited,
loops[k],
self.swapped,
self.parallel_options.comprehension,
self.typed,
):
modified = True
if modified:
_fix_nested_array(self.func_ir)
if modified:
# clean up now dead/unreachable blocks, e.g. unconditionally raising
# an exception in an inlined function would render some parts of the
# inliner unreachable
cfg = compute_cfg_from_blocks(self.func_ir.blocks)
for dead in cfg.dead_nodes():
del self.func_ir.blocks[dead]
# run dead code elimination
dead_code_elimination(self.func_ir)
# do label renaming
self.func_ir.blocks = rename_labels(self.func_ir.blocks)
# inlining done, strip dels
remove_dels(self.func_ir.blocks)
debug_print("END")
|
def run(self):
"""Run inline closure call pass."""
# Analysis relies on ir.Del presence, strip out later
pp = postproc.PostProcessor(self.func_ir)
pp.run(True)
modified = False
work_list = list(self.func_ir.blocks.items())
debug_print = _make_debug_print("InlineClosureCallPass")
debug_print("START")
while work_list:
label, block = work_list.pop()
for i, instr in enumerate(block.body):
if isinstance(instr, ir.Assign):
lhs = instr.target
expr = instr.value
if isinstance(expr, ir.Expr) and expr.op == "call":
call_name = guard(find_callname, self.func_ir, expr)
func_def = guard(get_definition, self.func_ir, expr.func)
if guard(
self._inline_reduction, work_list, block, i, expr, call_name
):
modified = True
break # because block structure changed
if guard(self._inline_closure, work_list, block, i, func_def):
modified = True
break # because block structure changed
if guard(self._inline_stencil, instr, call_name, func_def):
modified = True
if enable_inline_arraycall:
# Identify loop structure
if modified:
# Need to do some cleanups if closure inlining kicked in
merge_adjacent_blocks(self.func_ir.blocks)
cfg = compute_cfg_from_blocks(self.func_ir.blocks)
debug_print("start inline arraycall")
_debug_dump(cfg)
loops = cfg.loops()
sized_loops = [(k, len(loops[k].body)) for k in loops.keys()]
visited = []
# We go over all loops, bigger loops first (outer first)
for k, s in sorted(sized_loops, key=lambda tup: tup[1], reverse=True):
visited.append(k)
if guard(
_inline_arraycall,
self.func_ir,
cfg,
visited,
loops[k],
self.swapped,
self.parallel_options.comprehension,
self.typed,
):
modified = True
if modified:
_fix_nested_array(self.func_ir)
if modified:
# run dead code elimination
dead_code_elimination(self.func_ir)
# do label renaming
self.func_ir.blocks = rename_labels(self.func_ir.blocks)
# inlining done, strip dels
remove_dels(self.func_ir.blocks)
debug_print("END")
|
https://github.com/numba/numba/issues/5476
|
Traceback (most recent call last):
File "test_inline.py", line 14, in <module>
print(fn())
File "numba\numba\dispatcher.py", line 420, in _compile_for_args
raise e
File "numba\numba\dispatcher.py", line 353, in _compile_for_args
return self.compile(tuple(argtypes))
File "numba\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba\numba\dispatcher.py", line 768, in compile
cres = self._compiler.compile(args, return_type)
File "numba\numba\dispatcher.py", line 77, in compile
status, retval = self._compile_cached(args, return_type)
File "numba\numba\dispatcher.py", line 91, in _compile_cached
retval = self._compile_core(args, return_type)
File "numba\numba\dispatcher.py", line 109, in _compile_core
pipeline_class=self.pipeline_class)
File "numba\numba\compiler.py", line 551, in compile_extra
return pipeline.compile_extra(func)
File "numba\numba\compiler.py", line 331, in compile_extra
return self._compile_bytecode()
File "numba\numba\compiler.py", line 393, in _compile_bytecode
return self._compile_core()
File "numba\numba\compiler.py", line 373, in _compile_core
raise e
File "numba\numba\compiler.py", line 364, in _compile_core
pm.run(self.state)
File "numba\numba\compiler_machinery.py", line 347, in run
raise patched_exception
File "numba\numba\compiler_machinery.py", line 338, in run
self._runPass(idx, pass_inst, state)
File "numba\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba\numba\compiler_machinery.py", line 302, in _runPass
mutated |= check(pss.run_pass, internal_state)
File "numba\numba\compiler_machinery.py", line 275, in check
mangled = func(compiler_state)
File "numba\numba\untyped_passes.py", line 312, in run_pass
state.func_ir.blocks = simplify_CFG(state.func_ir.blocks)
File "numba\numba\ir_utils.py", line 1237, in simplify_CFG
return rename_labels(blocks)
File "numba\numba\ir_utils.py", line 1188, in rename_labels
topo_order.remove(return_label)
ValueError: Failed in nopython mode pipeline (step: inline inlinable functions)
list.remove(x): x not in list
|
ValueError
|
def __str__(self):
if self.exc_class is None:
return "<static> raise"
elif self.exc_args is None:
return "<static> raise %s" % (self.exc_class,)
else:
return "<static> raise %s(%s)" % (
self.exc_class,
", ".join(map(repr, self.exc_args)),
)
|
def __str__(self):
if self.exc_class is None:
return "raise"
elif self.exc_args is None:
return "raise %s" % (self.exc_class,)
else:
return "raise %s(%s)" % (self.exc_class, ", ".join(map(repr, self.exc_args)))
|
https://github.com/numba/numba/issues/5476
|
Traceback (most recent call last):
File "test_inline.py", line 14, in <module>
print(fn())
File "numba\numba\dispatcher.py", line 420, in _compile_for_args
raise e
File "numba\numba\dispatcher.py", line 353, in _compile_for_args
return self.compile(tuple(argtypes))
File "numba\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba\numba\dispatcher.py", line 768, in compile
cres = self._compiler.compile(args, return_type)
File "numba\numba\dispatcher.py", line 77, in compile
status, retval = self._compile_cached(args, return_type)
File "numba\numba\dispatcher.py", line 91, in _compile_cached
retval = self._compile_core(args, return_type)
File "numba\numba\dispatcher.py", line 109, in _compile_core
pipeline_class=self.pipeline_class)
File "numba\numba\compiler.py", line 551, in compile_extra
return pipeline.compile_extra(func)
File "numba\numba\compiler.py", line 331, in compile_extra
return self._compile_bytecode()
File "numba\numba\compiler.py", line 393, in _compile_bytecode
return self._compile_core()
File "numba\numba\compiler.py", line 373, in _compile_core
raise e
File "numba\numba\compiler.py", line 364, in _compile_core
pm.run(self.state)
File "numba\numba\compiler_machinery.py", line 347, in run
raise patched_exception
File "numba\numba\compiler_machinery.py", line 338, in run
self._runPass(idx, pass_inst, state)
File "numba\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba\numba\compiler_machinery.py", line 302, in _runPass
mutated |= check(pss.run_pass, internal_state)
File "numba\numba\compiler_machinery.py", line 275, in check
mangled = func(compiler_state)
File "numba\numba\untyped_passes.py", line 312, in run_pass
state.func_ir.blocks = simplify_CFG(state.func_ir.blocks)
File "numba\numba\ir_utils.py", line 1237, in simplify_CFG
return rename_labels(blocks)
File "numba\numba\ir_utils.py", line 1188, in rename_labels
topo_order.remove(return_label)
ValueError: Failed in nopython mode pipeline (step: inline inlinable functions)
list.remove(x): x not in list
|
ValueError
|
def run_pass(self, state):
"""Run inlining of overloads"""
if self._DEBUG:
print("before overload inline".center(80, "-"))
print(state.func_id.unique_name)
print(state.func_ir.dump())
print("".center(80, "-"))
from numba.core.inline_closurecall import InlineWorker, callee_ir_validator
inline_worker = InlineWorker(
state.typingctx,
state.targetctx,
state.locals,
state.pipeline,
state.flags,
callee_ir_validator,
state.typemap,
state.calltypes,
)
modified = False
work_list = list(state.func_ir.blocks.items())
# use a work list, look for call sites via `ir.Expr.op == call` and
# then pass these to `self._do_work` to make decisions about inlining.
while work_list:
label, block = work_list.pop()
for i, instr in enumerate(block.body):
if isinstance(instr, ir.Assign):
expr = instr.value
if isinstance(expr, ir.Expr):
if expr.op == "call":
workfn = self._do_work_call
elif expr.op == "getattr":
workfn = self._do_work_getattr
else:
continue
if guard(workfn, state, work_list, block, i, expr, inline_worker):
modified = True
break # because block structure changed
if self._DEBUG:
print("after overload inline".center(80, "-"))
print(state.func_id.unique_name)
print(state.func_ir.dump())
print("".center(80, "-"))
if modified:
# Remove dead blocks, this is safe as it relies on the CFG only.
cfg = compute_cfg_from_blocks(state.func_ir.blocks)
for dead in cfg.dead_nodes():
del state.func_ir.blocks[dead]
# clean up blocks
dead_code_elimination(state.func_ir, typemap=state.type_annotation.typemap)
# clean up unconditional branches that appear due to inlined
# functions introducing blocks
state.func_ir.blocks = simplify_CFG(state.func_ir.blocks)
if self._DEBUG:
print("after overload inline DCE".center(80, "-"))
print(state.func_id.unique_name)
print(state.func_ir.dump())
print("".center(80, "-"))
return True
|
def run_pass(self, state):
"""Run inlining of overloads"""
if self._DEBUG:
print("before overload inline".center(80, "-"))
print(state.func_ir.dump())
print("".center(80, "-"))
modified = False
work_list = list(state.func_ir.blocks.items())
# use a work list, look for call sites via `ir.Expr.op == call` and
# then pass these to `self._do_work` to make decisions about inlining.
while work_list:
label, block = work_list.pop()
for i, instr in enumerate(block.body):
if isinstance(instr, ir.Assign):
expr = instr.value
if isinstance(expr, ir.Expr):
if expr.op == "call":
workfn = self._do_work_call
elif expr.op == "getattr":
workfn = self._do_work_getattr
else:
continue
if guard(workfn, state, work_list, block, i, expr):
modified = True
break # because block structure changed
if self._DEBUG:
print("after overload inline".center(80, "-"))
print(state.func_ir.dump())
print("".center(80, "-"))
if modified:
# clean up blocks
dead_code_elimination(state.func_ir, typemap=state.type_annotation.typemap)
# clean up unconditional branches that appear due to inlined
# functions introducing blocks
state.func_ir.blocks = simplify_CFG(state.func_ir.blocks)
if self._DEBUG:
print("after overload inline DCE".center(80, "-"))
print(state.func_ir.dump())
print("".center(80, "-"))
return True
|
https://github.com/numba/numba/issues/5476
|
Traceback (most recent call last):
File "test_inline.py", line 14, in <module>
print(fn())
File "numba\numba\dispatcher.py", line 420, in _compile_for_args
raise e
File "numba\numba\dispatcher.py", line 353, in _compile_for_args
return self.compile(tuple(argtypes))
File "numba\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba\numba\dispatcher.py", line 768, in compile
cres = self._compiler.compile(args, return_type)
File "numba\numba\dispatcher.py", line 77, in compile
status, retval = self._compile_cached(args, return_type)
File "numba\numba\dispatcher.py", line 91, in _compile_cached
retval = self._compile_core(args, return_type)
File "numba\numba\dispatcher.py", line 109, in _compile_core
pipeline_class=self.pipeline_class)
File "numba\numba\compiler.py", line 551, in compile_extra
return pipeline.compile_extra(func)
File "numba\numba\compiler.py", line 331, in compile_extra
return self._compile_bytecode()
File "numba\numba\compiler.py", line 393, in _compile_bytecode
return self._compile_core()
File "numba\numba\compiler.py", line 373, in _compile_core
raise e
File "numba\numba\compiler.py", line 364, in _compile_core
pm.run(self.state)
File "numba\numba\compiler_machinery.py", line 347, in run
raise patched_exception
File "numba\numba\compiler_machinery.py", line 338, in run
self._runPass(idx, pass_inst, state)
File "numba\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba\numba\compiler_machinery.py", line 302, in _runPass
mutated |= check(pss.run_pass, internal_state)
File "numba\numba\compiler_machinery.py", line 275, in check
mangled = func(compiler_state)
File "numba\numba\untyped_passes.py", line 312, in run_pass
state.func_ir.blocks = simplify_CFG(state.func_ir.blocks)
File "numba\numba\ir_utils.py", line 1237, in simplify_CFG
return rename_labels(blocks)
File "numba\numba\ir_utils.py", line 1188, in rename_labels
topo_order.remove(return_label)
ValueError: Failed in nopython mode pipeline (step: inline inlinable functions)
list.remove(x): x not in list
|
ValueError
|
def _do_work_getattr(self, state, work_list, block, i, expr, inline_worker):
recv_type = state.type_annotation.typemap[expr.value.name]
recv_type = types.unliteral(recv_type)
matched = state.typingctx.find_matching_getattr_template(
recv_type,
expr.attr,
)
if not matched:
return False
template = matched["template"]
if getattr(template, "is_method", False):
# The attribute template is representing a method.
# Don't inline the getattr.
return False
inline_type = getattr(template, "_inline", None)
if inline_type is None:
# inline not defined
return False
sig = typing.signature(matched["return_type"], recv_type)
arg_typs = sig.args
if not inline_type.is_never_inline:
try:
impl = template._overload_func(recv_type)
if impl is None:
raise Exception # abort for this template
except Exception:
return False
else:
return False
is_method = False
return self._run_inliner(
state,
inline_type,
sig,
template,
arg_typs,
expr,
i,
impl,
block,
work_list,
is_method,
inline_worker,
)
|
def _do_work_getattr(self, state, work_list, block, i, expr):
recv_type = state.type_annotation.typemap[expr.value.name]
recv_type = types.unliteral(recv_type)
matched = state.typingctx.find_matching_getattr_template(
recv_type,
expr.attr,
)
if not matched:
return False
template = matched["template"]
if getattr(template, "is_method", False):
# The attribute template is representing a method.
# Don't inline the getattr.
return False
inline_type = getattr(template, "_inline", None)
if inline_type is None:
# inline not defined
return False
sig = typing.signature(matched["return_type"], recv_type)
arg_typs = sig.args
if not inline_type.is_never_inline:
try:
impl = template._overload_func(recv_type)
if impl is None:
raise Exception # abort for this template
except Exception:
return False
else:
return False
is_method = False
return self._run_inliner(
state,
inline_type,
sig,
template,
arg_typs,
expr,
i,
impl,
block,
work_list,
is_method,
)
|
https://github.com/numba/numba/issues/5476
|
Traceback (most recent call last):
File "test_inline.py", line 14, in <module>
print(fn())
File "numba\numba\dispatcher.py", line 420, in _compile_for_args
raise e
File "numba\numba\dispatcher.py", line 353, in _compile_for_args
return self.compile(tuple(argtypes))
File "numba\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba\numba\dispatcher.py", line 768, in compile
cres = self._compiler.compile(args, return_type)
File "numba\numba\dispatcher.py", line 77, in compile
status, retval = self._compile_cached(args, return_type)
File "numba\numba\dispatcher.py", line 91, in _compile_cached
retval = self._compile_core(args, return_type)
File "numba\numba\dispatcher.py", line 109, in _compile_core
pipeline_class=self.pipeline_class)
File "numba\numba\compiler.py", line 551, in compile_extra
return pipeline.compile_extra(func)
File "numba\numba\compiler.py", line 331, in compile_extra
return self._compile_bytecode()
File "numba\numba\compiler.py", line 393, in _compile_bytecode
return self._compile_core()
File "numba\numba\compiler.py", line 373, in _compile_core
raise e
File "numba\numba\compiler.py", line 364, in _compile_core
pm.run(self.state)
File "numba\numba\compiler_machinery.py", line 347, in run
raise patched_exception
File "numba\numba\compiler_machinery.py", line 338, in run
self._runPass(idx, pass_inst, state)
File "numba\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba\numba\compiler_machinery.py", line 302, in _runPass
mutated |= check(pss.run_pass, internal_state)
File "numba\numba\compiler_machinery.py", line 275, in check
mangled = func(compiler_state)
File "numba\numba\untyped_passes.py", line 312, in run_pass
state.func_ir.blocks = simplify_CFG(state.func_ir.blocks)
File "numba\numba\ir_utils.py", line 1237, in simplify_CFG
return rename_labels(blocks)
File "numba\numba\ir_utils.py", line 1188, in rename_labels
topo_order.remove(return_label)
ValueError: Failed in nopython mode pipeline (step: inline inlinable functions)
list.remove(x): x not in list
|
ValueError
|
def _do_work_call(self, state, work_list, block, i, expr, inline_worker):
# try and get a definition for the call, this isn't always possible as
# it might be a eval(str)/part generated awaiting update etc. (parfors)
to_inline = None
try:
to_inline = state.func_ir.get_definition(expr.func)
except Exception:
return False
# do not handle closure inlining here, another pass deals with that.
if getattr(to_inline, "op", False) == "make_function":
return False
# check this is a known and typed function
try:
func_ty = state.type_annotation.typemap[expr.func.name]
except KeyError:
# e.g. Calls to CUDA Intrinsic have no mapped type so KeyError
return False
if not hasattr(func_ty, "get_call_type"):
return False
sig = state.type_annotation.calltypes[expr]
is_method = False
# search the templates for this overload looking for "inline"
if getattr(func_ty, "template", None) is not None:
# @overload_method
is_method = True
templates = [func_ty.template]
arg_typs = (func_ty.template.this,) + sig.args
else:
# @overload case
templates = getattr(func_ty, "templates", None)
arg_typs = sig.args
if templates is None:
return False
impl = None
for template in templates:
inline_type = getattr(template, "_inline", None)
if inline_type is None:
# inline not defined
continue
if not inline_type.is_never_inline:
try:
impl = template._overload_func(*arg_typs)
if impl is None:
raise Exception # abort for this template
break
except Exception:
continue
else:
return False
# at this point we know we maybe want to inline something and there's
# definitely something that could be inlined.
return self._run_inliner(
state,
inline_type,
sig,
template,
arg_typs,
expr,
i,
impl,
block,
work_list,
is_method,
inline_worker,
)
|
def _do_work_call(self, state, work_list, block, i, expr):
# try and get a definition for the call, this isn't always possible as
# it might be a eval(str)/part generated awaiting update etc. (parfors)
to_inline = None
try:
to_inline = state.func_ir.get_definition(expr.func)
except Exception:
return False
# do not handle closure inlining here, another pass deals with that.
if getattr(to_inline, "op", False) == "make_function":
return False
# check this is a known and typed function
try:
func_ty = state.type_annotation.typemap[expr.func.name]
except KeyError:
# e.g. Calls to CUDA Intrinsic have no mapped type so KeyError
return False
if not hasattr(func_ty, "get_call_type"):
return False
sig = state.type_annotation.calltypes[expr]
is_method = False
# search the templates for this overload looking for "inline"
if getattr(func_ty, "template", None) is not None:
# @overload_method
is_method = True
templates = [func_ty.template]
arg_typs = (func_ty.template.this,) + sig.args
else:
# @overload case
templates = getattr(func_ty, "templates", None)
arg_typs = sig.args
if templates is None:
return False
impl = None
for template in templates:
inline_type = getattr(template, "_inline", None)
if inline_type is None:
# inline not defined
continue
if not inline_type.is_never_inline:
try:
impl = template._overload_func(*arg_typs)
if impl is None:
raise Exception # abort for this template
break
except Exception:
continue
else:
return False
# at this point we know we maybe want to inline something and there's
# definitely something that could be inlined.
return self._run_inliner(
state,
inline_type,
sig,
template,
arg_typs,
expr,
i,
impl,
block,
work_list,
is_method,
)
|
https://github.com/numba/numba/issues/5476
|
Traceback (most recent call last):
File "test_inline.py", line 14, in <module>
print(fn())
File "numba\numba\dispatcher.py", line 420, in _compile_for_args
raise e
File "numba\numba\dispatcher.py", line 353, in _compile_for_args
return self.compile(tuple(argtypes))
File "numba\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba\numba\dispatcher.py", line 768, in compile
cres = self._compiler.compile(args, return_type)
File "numba\numba\dispatcher.py", line 77, in compile
status, retval = self._compile_cached(args, return_type)
File "numba\numba\dispatcher.py", line 91, in _compile_cached
retval = self._compile_core(args, return_type)
File "numba\numba\dispatcher.py", line 109, in _compile_core
pipeline_class=self.pipeline_class)
File "numba\numba\compiler.py", line 551, in compile_extra
return pipeline.compile_extra(func)
File "numba\numba\compiler.py", line 331, in compile_extra
return self._compile_bytecode()
File "numba\numba\compiler.py", line 393, in _compile_bytecode
return self._compile_core()
File "numba\numba\compiler.py", line 373, in _compile_core
raise e
File "numba\numba\compiler.py", line 364, in _compile_core
pm.run(self.state)
File "numba\numba\compiler_machinery.py", line 347, in run
raise patched_exception
File "numba\numba\compiler_machinery.py", line 338, in run
self._runPass(idx, pass_inst, state)
File "numba\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba\numba\compiler_machinery.py", line 302, in _runPass
mutated |= check(pss.run_pass, internal_state)
File "numba\numba\compiler_machinery.py", line 275, in check
mangled = func(compiler_state)
File "numba\numba\untyped_passes.py", line 312, in run_pass
state.func_ir.blocks = simplify_CFG(state.func_ir.blocks)
File "numba\numba\ir_utils.py", line 1237, in simplify_CFG
return rename_labels(blocks)
File "numba\numba\ir_utils.py", line 1188, in rename_labels
topo_order.remove(return_label)
ValueError: Failed in nopython mode pipeline (step: inline inlinable functions)
list.remove(x): x not in list
|
ValueError
|
def _run_inliner(
self,
state,
inline_type,
sig,
template,
arg_typs,
expr,
i,
impl,
block,
work_list,
is_method,
inline_worker,
):
do_inline = True
if not inline_type.is_always_inline:
from numba.core.typing.templates import _inline_info
caller_inline_info = _inline_info(
state.func_ir,
state.type_annotation.typemap,
state.type_annotation.calltypes,
sig,
)
# must be a cost-model function, run the function
iinfo = template._inline_overloads[arg_typs]["iinfo"]
if inline_type.has_cost_model:
do_inline = inline_type.value(expr, caller_inline_info, iinfo)
else:
assert "unreachable"
if do_inline:
if is_method:
if not self._add_method_self_arg(state, expr):
return False
arg_typs = template._inline_overloads[arg_typs]["folded_args"]
iinfo = template._inline_overloads[arg_typs]["iinfo"]
freevars = iinfo.func_ir.func_id.func.__code__.co_freevars
_, _, _, new_blocks = inline_worker.inline_ir(
state.func_ir, block, i, iinfo.func_ir, freevars, arg_typs=arg_typs
)
if work_list is not None:
for blk in new_blocks:
work_list.append(blk)
return True
else:
return False
|
def _run_inliner(
self,
state,
inline_type,
sig,
template,
arg_typs,
expr,
i,
impl,
block,
work_list,
is_method,
):
from numba.core.inline_closurecall import inline_closure_call, callee_ir_validator
do_inline = True
if not inline_type.is_always_inline:
from numba.core.typing.templates import _inline_info
caller_inline_info = _inline_info(
state.func_ir,
state.type_annotation.typemap,
state.type_annotation.calltypes,
sig,
)
# must be a cost-model function, run the function
iinfo = template._inline_overloads[arg_typs]["iinfo"]
if inline_type.has_cost_model:
do_inline = inline_type.value(expr, caller_inline_info, iinfo)
else:
assert "unreachable"
if do_inline:
if is_method:
if not self._add_method_self_arg(state, expr):
return False
arg_typs = template._inline_overloads[arg_typs]["folded_args"]
# pass is typed so use the callee globals
inline_closure_call(
state.func_ir,
impl.__globals__,
block,
i,
impl,
typingctx=state.typingctx,
arg_typs=arg_typs,
typemap=state.type_annotation.typemap,
calltypes=state.type_annotation.calltypes,
work_list=work_list,
replace_freevars=False,
callee_validator=callee_ir_validator,
)
return True
else:
return False
|
https://github.com/numba/numba/issues/5476
|
Traceback (most recent call last):
File "test_inline.py", line 14, in <module>
print(fn())
File "numba\numba\dispatcher.py", line 420, in _compile_for_args
raise e
File "numba\numba\dispatcher.py", line 353, in _compile_for_args
return self.compile(tuple(argtypes))
File "numba\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba\numba\dispatcher.py", line 768, in compile
cres = self._compiler.compile(args, return_type)
File "numba\numba\dispatcher.py", line 77, in compile
status, retval = self._compile_cached(args, return_type)
File "numba\numba\dispatcher.py", line 91, in _compile_cached
retval = self._compile_core(args, return_type)
File "numba\numba\dispatcher.py", line 109, in _compile_core
pipeline_class=self.pipeline_class)
File "numba\numba\compiler.py", line 551, in compile_extra
return pipeline.compile_extra(func)
File "numba\numba\compiler.py", line 331, in compile_extra
return self._compile_bytecode()
File "numba\numba\compiler.py", line 393, in _compile_bytecode
return self._compile_core()
File "numba\numba\compiler.py", line 373, in _compile_core
raise e
File "numba\numba\compiler.py", line 364, in _compile_core
pm.run(self.state)
File "numba\numba\compiler_machinery.py", line 347, in run
raise patched_exception
File "numba\numba\compiler_machinery.py", line 338, in run
self._runPass(idx, pass_inst, state)
File "numba\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba\numba\compiler_machinery.py", line 302, in _runPass
mutated |= check(pss.run_pass, internal_state)
File "numba\numba\compiler_machinery.py", line 275, in check
mangled = func(compiler_state)
File "numba\numba\untyped_passes.py", line 312, in run_pass
state.func_ir.blocks = simplify_CFG(state.func_ir.blocks)
File "numba\numba\ir_utils.py", line 1237, in simplify_CFG
return rename_labels(blocks)
File "numba\numba\ir_utils.py", line 1188, in rename_labels
topo_order.remove(return_label)
ValueError: Failed in nopython mode pipeline (step: inline inlinable functions)
list.remove(x): x not in list
|
ValueError
|
def generic(self, args, kws):
"""
Type the overloaded function by compiling the appropriate
implementation for the given args.
"""
disp, new_args = self._get_impl(args, kws)
if disp is None:
return
# Compile and type it for the given types
disp_type = types.Dispatcher(disp)
# Store the compiled overload for use in the lowering phase if there's
# no inlining required (else functions are being compiled which will
# never be used as they are inlined)
if not self._inline.is_never_inline:
# need to run the compiler front end up to type inference to compute
# a signature
from numba.core import typed_passes, compiler
from numba.core.inline_closurecall import InlineWorker
fcomp = disp._compiler
flags = compiler.Flags()
# Updating these causes problems?!
# fcomp.targetdescr.options.parse_as_flags(flags,
# fcomp.targetoptions)
# flags = fcomp._customize_flags(flags)
# spoof a compiler pipline like the one that will be in use
tyctx = fcomp.targetdescr.typing_context
tgctx = fcomp.targetdescr.target_context
compiler_inst = fcomp.pipeline_class(
tyctx,
tgctx,
None,
None,
None,
flags,
None,
)
inline_worker = InlineWorker(
tyctx,
tgctx,
fcomp.locals,
compiler_inst,
flags,
None,
)
ir = inline_worker.run_untyped_passes(disp_type.dispatcher.py_func)
resolve = disp_type.dispatcher.get_call_template
template, pysig, folded_args, kws = resolve(new_args, kws)
typemap, return_type, calltypes = typed_passes.type_inference_stage(
self.context, ir, folded_args, None
)
sig = Signature(return_type, folded_args, None)
# this stores a load of info for the cost model function if supplied
# it by default is None
self._inline_overloads[sig.args] = {"folded_args": folded_args}
# this stores the compiled overloads, if there's no compiled
# overload available i.e. function is always inlined, the key still
# needs to exist for type resolution
# NOTE: If lowering is failing on a `_EmptyImplementationEntry`,
# the inliner has failed to inline this entry corretly.
impl_init = _EmptyImplementationEntry("always inlined")
self._compiled_overloads[sig.args] = impl_init
if not self._inline.is_always_inline:
# this branch is here because a user has supplied a function to
# determine whether to inline or not. As a result both compiled
# function and inliner info needed, delaying the computation of
# this leads to an internal state mess at present. TODO: Fix!
sig = disp_type.get_call_type(self.context, new_args, kws)
self._compiled_overloads[sig.args] = disp_type.get_overload(sig)
# store the inliner information, it's used later in the cost
# model function call
iinfo = _inline_info(ir, typemap, calltypes, sig)
self._inline_overloads[sig.args] = {"folded_args": folded_args, "iinfo": iinfo}
else:
sig = disp_type.get_call_type(self.context, new_args, kws)
self._compiled_overloads[sig.args] = disp_type.get_overload(sig)
return sig
|
def generic(self, args, kws):
"""
Type the overloaded function by compiling the appropriate
implementation for the given args.
"""
disp, new_args = self._get_impl(args, kws)
if disp is None:
return
# Compile and type it for the given types
disp_type = types.Dispatcher(disp)
# Store the compiled overload for use in the lowering phase if there's
# no inlining required (else functions are being compiled which will
# never be used as they are inlined)
if not self._inline.is_never_inline:
# need to run the compiler front end up to type inference to compute
# a signature
from numba.core import typed_passes, compiler
ir = compiler.run_frontend(disp_type.dispatcher.py_func)
resolve = disp_type.dispatcher.get_call_template
template, pysig, folded_args, kws = resolve(new_args, kws)
typemap, return_type, calltypes = typed_passes.type_inference_stage(
self.context, ir, folded_args, None
)
sig = Signature(return_type, folded_args, None)
# this stores a load of info for the cost model function if supplied
# it by default is None
self._inline_overloads[sig.args] = {"folded_args": folded_args}
# this stores the compiled overloads, if there's no compiled
# overload available i.e. function is always inlined, the key still
# needs to exist for type resolution
# NOTE: If lowering is failing on a `_EmptyImplementationEntry`,
# the inliner has failed to inline this entry corretly.
impl_init = _EmptyImplementationEntry("always inlined")
self._compiled_overloads[sig.args] = impl_init
if not self._inline.is_always_inline:
# this branch is here because a user has supplied a function to
# determine whether to inline or not. As a result both compiled
# function and inliner info needed, delaying the computation of
# this leads to an internal state mess at present. TODO: Fix!
sig = disp_type.get_call_type(self.context, new_args, kws)
self._compiled_overloads[sig.args] = disp_type.get_overload(sig)
# store the inliner information, it's used later in the cost
# model function call
iinfo = _inline_info(ir, typemap, calltypes, sig)
self._inline_overloads[sig.args] = {
"folded_args": folded_args,
"iinfo": iinfo,
}
else:
sig = disp_type.get_call_type(self.context, new_args, kws)
self._compiled_overloads[sig.args] = disp_type.get_overload(sig)
return sig
|
https://github.com/numba/numba/issues/5476
|
Traceback (most recent call last):
File "test_inline.py", line 14, in <module>
print(fn())
File "numba\numba\dispatcher.py", line 420, in _compile_for_args
raise e
File "numba\numba\dispatcher.py", line 353, in _compile_for_args
return self.compile(tuple(argtypes))
File "numba\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba\numba\dispatcher.py", line 768, in compile
cres = self._compiler.compile(args, return_type)
File "numba\numba\dispatcher.py", line 77, in compile
status, retval = self._compile_cached(args, return_type)
File "numba\numba\dispatcher.py", line 91, in _compile_cached
retval = self._compile_core(args, return_type)
File "numba\numba\dispatcher.py", line 109, in _compile_core
pipeline_class=self.pipeline_class)
File "numba\numba\compiler.py", line 551, in compile_extra
return pipeline.compile_extra(func)
File "numba\numba\compiler.py", line 331, in compile_extra
return self._compile_bytecode()
File "numba\numba\compiler.py", line 393, in _compile_bytecode
return self._compile_core()
File "numba\numba\compiler.py", line 373, in _compile_core
raise e
File "numba\numba\compiler.py", line 364, in _compile_core
pm.run(self.state)
File "numba\numba\compiler_machinery.py", line 347, in run
raise patched_exception
File "numba\numba\compiler_machinery.py", line 338, in run
self._runPass(idx, pass_inst, state)
File "numba\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba\numba\compiler_machinery.py", line 302, in _runPass
mutated |= check(pss.run_pass, internal_state)
File "numba\numba\compiler_machinery.py", line 275, in check
mangled = func(compiler_state)
File "numba\numba\untyped_passes.py", line 312, in run_pass
state.func_ir.blocks = simplify_CFG(state.func_ir.blocks)
File "numba\numba\ir_utils.py", line 1237, in simplify_CFG
return rename_labels(blocks)
File "numba\numba\ir_utils.py", line 1188, in rename_labels
topo_order.remove(return_label)
ValueError: Failed in nopython mode pipeline (step: inline inlinable functions)
list.remove(x): x not in list
|
ValueError
|
def run_pass(self, state):
"""Run inlining of inlinables"""
if self._DEBUG:
print("before inline".center(80, "-"))
print(state.func_ir.dump())
print("".center(80, "-"))
from numba.core.inline_closurecall import InlineWorker, callee_ir_validator
inline_worker = InlineWorker(
state.typingctx,
state.targetctx,
state.locals,
state.pipeline,
state.flags,
validator=callee_ir_validator,
)
modified = False
# use a work list, look for call sites via `ir.Expr.op == call` and
# then pass these to `self._do_work` to make decisions about inlining.
work_list = list(state.func_ir.blocks.items())
while work_list:
label, block = work_list.pop()
for i, instr in enumerate(block.body):
if isinstance(instr, ir.Assign):
expr = instr.value
if isinstance(expr, ir.Expr) and expr.op == "call":
if guard(
self._do_work, state, work_list, block, i, expr, inline_worker
):
modified = True
break # because block structure changed
if modified:
# clean up unconditional branches that appear due to inlined
# functions introducing blocks
cfg = compute_cfg_from_blocks(state.func_ir.blocks)
for dead in cfg.dead_nodes():
del state.func_ir.blocks[dead]
post_proc = postproc.PostProcessor(state.func_ir)
post_proc.run()
state.func_ir.blocks = simplify_CFG(state.func_ir.blocks)
if self._DEBUG:
print("after inline".center(80, "-"))
print(state.func_ir.dump())
print("".center(80, "-"))
return True
|
def run_pass(self, state):
"""Run inlining of inlinables"""
if self._DEBUG:
print("before inline".center(80, "-"))
print(state.func_ir.dump())
print("".center(80, "-"))
modified = False
# use a work list, look for call sites via `ir.Expr.op == call` and
# then pass these to `self._do_work` to make decisions about inlining.
work_list = list(state.func_ir.blocks.items())
while work_list:
label, block = work_list.pop()
for i, instr in enumerate(block.body):
if isinstance(instr, ir.Assign):
expr = instr.value
if isinstance(expr, ir.Expr) and expr.op == "call":
if guard(self._do_work, state, work_list, block, i, expr):
modified = True
break # because block structure changed
if modified:
# clean up unconditional branches that appear due to inlined
# functions introducing blocks
state.func_ir.blocks = simplify_CFG(state.func_ir.blocks)
if self._DEBUG:
print("after inline".center(80, "-"))
print(state.func_ir.dump())
print("".center(80, "-"))
return True
|
https://github.com/numba/numba/issues/5476
|
Traceback (most recent call last):
File "test_inline.py", line 14, in <module>
print(fn())
File "numba\numba\dispatcher.py", line 420, in _compile_for_args
raise e
File "numba\numba\dispatcher.py", line 353, in _compile_for_args
return self.compile(tuple(argtypes))
File "numba\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba\numba\dispatcher.py", line 768, in compile
cres = self._compiler.compile(args, return_type)
File "numba\numba\dispatcher.py", line 77, in compile
status, retval = self._compile_cached(args, return_type)
File "numba\numba\dispatcher.py", line 91, in _compile_cached
retval = self._compile_core(args, return_type)
File "numba\numba\dispatcher.py", line 109, in _compile_core
pipeline_class=self.pipeline_class)
File "numba\numba\compiler.py", line 551, in compile_extra
return pipeline.compile_extra(func)
File "numba\numba\compiler.py", line 331, in compile_extra
return self._compile_bytecode()
File "numba\numba\compiler.py", line 393, in _compile_bytecode
return self._compile_core()
File "numba\numba\compiler.py", line 373, in _compile_core
raise e
File "numba\numba\compiler.py", line 364, in _compile_core
pm.run(self.state)
File "numba\numba\compiler_machinery.py", line 347, in run
raise patched_exception
File "numba\numba\compiler_machinery.py", line 338, in run
self._runPass(idx, pass_inst, state)
File "numba\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba\numba\compiler_machinery.py", line 302, in _runPass
mutated |= check(pss.run_pass, internal_state)
File "numba\numba\compiler_machinery.py", line 275, in check
mangled = func(compiler_state)
File "numba\numba\untyped_passes.py", line 312, in run_pass
state.func_ir.blocks = simplify_CFG(state.func_ir.blocks)
File "numba\numba\ir_utils.py", line 1237, in simplify_CFG
return rename_labels(blocks)
File "numba\numba\ir_utils.py", line 1188, in rename_labels
topo_order.remove(return_label)
ValueError: Failed in nopython mode pipeline (step: inline inlinable functions)
list.remove(x): x not in list
|
ValueError
|
def _do_work(self, state, work_list, block, i, expr, inline_worker):
from numba.core.compiler import run_frontend
from numba.core.cpu import InlineOptions
# try and get a definition for the call, this isn't always possible as
# it might be a eval(str)/part generated awaiting update etc. (parfors)
to_inline = None
try:
to_inline = state.func_ir.get_definition(expr.func)
except Exception:
if self._DEBUG:
print("Cannot find definition for %s" % expr.func)
return False
# do not handle closure inlining here, another pass deals with that.
if getattr(to_inline, "op", False) == "make_function":
return False
# see if the definition is a "getattr", in which case walk the IR to
# try and find the python function via the module from which it's
# imported, this should all be encoded in the IR.
if getattr(to_inline, "op", False) == "getattr":
val = resolve_func_from_module(state.func_ir, to_inline)
else:
# This is likely a freevar or global
#
# NOTE: getattr 'value' on a call may fail if it's an ir.Expr as
# getattr is overloaded to look in _kws.
try:
val = getattr(to_inline, "value", False)
except Exception:
raise GuardException
# if something was found...
if val:
# check it's dispatcher-like, the targetoptions attr holds the
# kwargs supplied in the jit decorator and is where 'inline' will
# be if it is present.
topt = getattr(val, "targetoptions", False)
if topt:
inline_type = topt.get("inline", None)
# has 'inline' been specified?
if inline_type is not None:
inline_opt = InlineOptions(inline_type)
# Could this be inlinable?
if not inline_opt.is_never_inline:
# yes, it could be inlinable
do_inline = True
pyfunc = val.py_func
# Has it got an associated cost model?
if inline_opt.has_cost_model:
# yes, it has a cost model, use it to determine
# whether to do the inline
py_func_ir = run_frontend(pyfunc)
do_inline = inline_type(expr, state.func_ir, py_func_ir)
# if do_inline is True then inline!
if do_inline:
_, _, _, new_blocks = inline_worker.inline_function(
state.func_ir,
block,
i,
pyfunc,
)
if work_list is not None:
for blk in new_blocks:
work_list.append(blk)
return True
return False
|
def _do_work(self, state, work_list, block, i, expr):
from numba.core.inline_closurecall import inline_closure_call, callee_ir_validator
from numba.core.compiler import run_frontend
from numba.core.cpu import InlineOptions
# try and get a definition for the call, this isn't always possible as
# it might be a eval(str)/part generated awaiting update etc. (parfors)
to_inline = None
try:
to_inline = state.func_ir.get_definition(expr.func)
except Exception:
if self._DEBUG:
print("Cannot find definition for %s" % expr.func)
return False
# do not handle closure inlining here, another pass deals with that.
if getattr(to_inline, "op", False) == "make_function":
return False
# see if the definition is a "getattr", in which case walk the IR to
# try and find the python function via the module from which it's
# imported, this should all be encoded in the IR.
if getattr(to_inline, "op", False) == "getattr":
val = resolve_func_from_module(state.func_ir, to_inline)
else:
# This is likely a freevar or global
#
# NOTE: getattr 'value' on a call may fail if it's an ir.Expr as
# getattr is overloaded to look in _kws.
try:
val = getattr(to_inline, "value", False)
except Exception:
raise GuardException
# if something was found...
if val:
# check it's dispatcher-like, the targetoptions attr holds the
# kwargs supplied in the jit decorator and is where 'inline' will
# be if it is present.
topt = getattr(val, "targetoptions", False)
if topt:
inline_type = topt.get("inline", None)
# has 'inline' been specified?
if inline_type is not None:
inline_opt = InlineOptions(inline_type)
# Could this be inlinable?
if not inline_opt.is_never_inline:
# yes, it could be inlinable
do_inline = True
pyfunc = val.py_func
# Has it got an associated cost model?
if inline_opt.has_cost_model:
# yes, it has a cost model, use it to determine
# whether to do the inline
py_func_ir = run_frontend(pyfunc)
do_inline = inline_type(expr, state.func_ir, py_func_ir)
# if do_inline is True then inline!
if do_inline:
inline_closure_call(
state.func_ir,
pyfunc.__globals__,
block,
i,
pyfunc,
work_list=work_list,
callee_validator=callee_ir_validator,
)
return True
return False
|
https://github.com/numba/numba/issues/5476
|
Traceback (most recent call last):
File "test_inline.py", line 14, in <module>
print(fn())
File "numba\numba\dispatcher.py", line 420, in _compile_for_args
raise e
File "numba\numba\dispatcher.py", line 353, in _compile_for_args
return self.compile(tuple(argtypes))
File "numba\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba\numba\dispatcher.py", line 768, in compile
cres = self._compiler.compile(args, return_type)
File "numba\numba\dispatcher.py", line 77, in compile
status, retval = self._compile_cached(args, return_type)
File "numba\numba\dispatcher.py", line 91, in _compile_cached
retval = self._compile_core(args, return_type)
File "numba\numba\dispatcher.py", line 109, in _compile_core
pipeline_class=self.pipeline_class)
File "numba\numba\compiler.py", line 551, in compile_extra
return pipeline.compile_extra(func)
File "numba\numba\compiler.py", line 331, in compile_extra
return self._compile_bytecode()
File "numba\numba\compiler.py", line 393, in _compile_bytecode
return self._compile_core()
File "numba\numba\compiler.py", line 373, in _compile_core
raise e
File "numba\numba\compiler.py", line 364, in _compile_core
pm.run(self.state)
File "numba\numba\compiler_machinery.py", line 347, in run
raise patched_exception
File "numba\numba\compiler_machinery.py", line 338, in run
self._runPass(idx, pass_inst, state)
File "numba\numba\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "numba\numba\compiler_machinery.py", line 302, in _runPass
mutated |= check(pss.run_pass, internal_state)
File "numba\numba\compiler_machinery.py", line 275, in check
mangled = func(compiler_state)
File "numba\numba\untyped_passes.py", line 312, in run_pass
state.func_ir.blocks = simplify_CFG(state.func_ir.blocks)
File "numba\numba\ir_utils.py", line 1237, in simplify_CFG
return rename_labels(blocks)
File "numba\numba\ir_utils.py", line 1188, in rename_labels
topo_order.remove(return_label)
ValueError: Failed in nopython mode pipeline (step: inline inlinable functions)
list.remove(x): x not in list
|
ValueError
|
def parfor_defs(parfor, use_set=None, def_set=None):
"""list variables written in this parfor by recursively
calling compute_use_defs() on body and combining block defs.
"""
if use_set is None:
use_set = set()
if def_set is None:
def_set = set()
blocks = wrap_parfor_blocks(parfor)
uses, defs = compute_use_defs(blocks)
cfg = compute_cfg_from_blocks(blocks)
last_label = max(blocks.keys())
unwrap_parfor_blocks(parfor)
# Conservatively, only add defs for blocks that are definitely executed
# Go through blocks in order, as if they are statements of the block that
# includes the parfor, and update uses/defs.
# no need for topo order of ir_utils
topo_order = cfg.topo_order()
# blocks that dominate last block are definitely executed
definitely_executed = cfg.dominators()[last_label]
# except loop bodies that might not execute
for loop in cfg.loops().values():
definitely_executed -= loop.body
for label in topo_order:
if label in definitely_executed:
# see compute_use_defs() in analysis.py
# variables defined in the block that includes the parfor are not
# uses of that block (are not potentially live in the beginning of
# the block)
use_set.update(uses[label] - def_set)
def_set.update(defs[label])
else:
use_set.update(uses[label] - def_set)
# treat loop variables and size variables as use
loop_vars = {l.start.name for l in parfor.loop_nests if isinstance(l.start, ir.Var)}
loop_vars |= {l.stop.name for l in parfor.loop_nests if isinstance(l.stop, ir.Var)}
loop_vars |= {l.step.name for l in parfor.loop_nests if isinstance(l.step, ir.Var)}
use_set.update(loop_vars - def_set)
use_set |= get_parfor_pattern_vars(parfor)
return analysis._use_defs_result(usemap=use_set, defmap=def_set)
|
def parfor_defs(parfor, use_set=None, def_set=None):
"""list variables written in this parfor by recursively
calling compute_use_defs() on body and combining block defs.
"""
if use_set is None:
use_set = set()
if def_set is None:
def_set = set()
blocks = wrap_parfor_blocks(parfor)
uses, defs = compute_use_defs(blocks)
cfg = compute_cfg_from_blocks(blocks)
last_label = max(blocks.keys())
unwrap_parfor_blocks(parfor)
# Conservatively, only add defs for blocks that are definitely executed
# Go through blocks in order, as if they are statements of the block that
# includes the parfor, and update uses/defs.
# no need for topo order of ir_utils
topo_order = cfg.topo_order()
# blocks that dominate last block are definitely executed
definitely_executed = cfg.dominators()[last_label]
# except loop bodies that might not execute
for loop in cfg.loops().values():
definitely_executed -= loop.body
for label in topo_order:
if label in definitely_executed:
# see compute_use_defs() in analysis.py
# variables defined in the block that includes the parfor are not
# uses of that block (are not potentially live in the beginning of
# the block)
use_set.update(uses[label] - def_set)
def_set.update(defs[label])
else:
use_set.update(uses[label] - def_set)
# treat loop variables and size variables as use
loop_vars = {l.start.name for l in parfor.loop_nests if isinstance(l.start, ir.Var)}
loop_vars |= {l.stop.name for l in parfor.loop_nests if isinstance(l.stop, ir.Var)}
loop_vars |= {l.step.name for l in parfor.loop_nests if isinstance(l.step, ir.Var)}
use_set.update(loop_vars)
use_set |= get_parfor_pattern_vars(parfor)
return analysis._use_defs_result(usemap=use_set, defmap=def_set)
|
https://github.com/numba/numba/issues/5597
|
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
./numba/core/errors.py in new_error_context(fmt_, *args, **kwargs)
800 try:
--> 801 yield
802 except NumbaError as e:
./numba/core/lowering.py in lower_block(self, block)
266 loc=self.loc, errcls_=defaulterrcls):
--> 267 self.lower_inst(inst)
268 self.post_block(block)
./numba/core/lowering.py in lower_inst(self, inst)
479 if isinstance(inst, _class):
--> 480 func(self, inst)
481 return
./numba/parfors/parfor_lowering.py in _lower_parfor_parallel(lowerer, parfor)
239 lowerer, parfor, typemap, typingctx, targetctx, flags, {},
--> 240 bool(alias_map), index_var_typ, parfor.races)
241 finally:
./numba/parfors/parfor_lowering.py in _create_gufunc_for_parfor_body(lowerer, parfor, typemap, typingctx, targetctx, flags, locals, has_aliases, index_var_typ, races)
933 warnings.warn(NumbaParallelSafetyWarning(msg, loc))
--> 934 replace_var_with_array(races, loop_body, typemap, lowerer.fndesc.calltypes)
935
./numba/parfors/parfor_lowering.py in replace_var_with_array(vars, loop_body, typemap, calltypes)
1363 def replace_var_with_array(vars, loop_body, typemap, calltypes):
-> 1364 replace_var_with_array_internal(vars, loop_body, typemap, calltypes)
1365 for v in vars:
./numba/parfors/parfor_lowering.py in replace_var_with_array_internal(vars, loop_body, typemap, calltypes)
1360 for label, block in loop_body.items():
-> 1361 block.body = replace_var_with_array_in_block(vars, block, typemap, calltypes)
1362
./numba/parfors/parfor_lowering.py in replace_var_with_array_in_block(vars, block, typemap, calltypes)
1346
-> 1347 setitem_node = ir.SetItem(inst.target, const_var, inst.value, inst.loc)
1348 calltypes[setitem_node] = signature(
./numba/core/ir.py in __init__(self, target, index, value, loc)
590 assert isinstance(index, Var)
--> 591 assert isinstance(value, Var)
592 assert isinstance(loc, Loc)
AssertionError:
|
AssertionError
|
def lower_assign(self, ty, inst):
value = inst.value
# In nopython mode, closure vars are frozen like globals
if isinstance(value, (ir.Const, ir.Global, ir.FreeVar)):
res = self.context.get_constant_generic(self.builder, ty, value.value)
self.incref(ty, res)
return res
elif isinstance(value, ir.Expr):
return self.lower_expr(ty, value)
elif isinstance(value, ir.Var):
val = self.loadvar(value.name)
oty = self.typeof(value.name)
res = self.context.cast(self.builder, val, oty, ty)
self.incref(ty, res)
return res
elif isinstance(value, ir.Arg):
# Cast from the argument type to the local variable type
# (note the "arg.FOO" convention as used in typeinfer)
argty = self.typeof("arg." + value.name)
if isinstance(argty, types.Omitted):
pyval = argty.value
tyctx = self.context.typing_context
valty = tyctx.resolve_value_type_prefer_literal(pyval)
# use the type of the constant value
const = self.context.get_constant_generic(
self.builder,
valty,
pyval,
)
# cast it to the variable type
res = self.context.cast(self.builder, const, valty, ty)
else:
val = self.fnargs[value.index]
res = self.context.cast(self.builder, val, argty, ty)
self.incref(ty, res)
return res
elif isinstance(value, ir.Yield):
res = self.lower_yield(ty, value)
self.incref(ty, res)
return res
raise NotImplementedError(type(value), value)
|
def lower_assign(self, ty, inst):
value = inst.value
# In nopython mode, closure vars are frozen like globals
if isinstance(value, (ir.Const, ir.Global, ir.FreeVar)):
res = self.context.get_constant_generic(self.builder, ty, value.value)
self.incref(ty, res)
return res
elif isinstance(value, ir.Expr):
return self.lower_expr(ty, value)
elif isinstance(value, ir.Var):
val = self.loadvar(value.name)
oty = self.typeof(value.name)
res = self.context.cast(self.builder, val, oty, ty)
self.incref(ty, res)
return res
elif isinstance(value, ir.Arg):
# Cast from the argument type to the local variable type
# (note the "arg.FOO" convention as used in typeinfer)
argty = self.typeof("arg." + value.name)
if isinstance(argty, types.Omitted):
pyval = argty.value
# use the type of the constant value
valty = self.context.typing_context.resolve_value_type(pyval)
const = self.context.get_constant_generic(self.builder, valty, pyval)
# cast it to the variable type
res = self.context.cast(self.builder, const, valty, ty)
else:
val = self.fnargs[value.index]
res = self.context.cast(self.builder, val, argty, ty)
self.incref(ty, res)
return res
elif isinstance(value, ir.Yield):
res = self.lower_yield(ty, value)
self.incref(ty, res)
return res
raise NotImplementedError(type(value), value)
|
https://github.com/numba/numba/issues/5471
|
Traceback (most recent call last):
File "./test.py", line 24, in <module>
f()
File ".../numba/core/dispatcher.py", line 401, in _compile_for_args
error_rewrite(e, 'typing')
File ".../numba/core/dispatcher.py", line 344, in error_rewrite
reraise(type(e), e, None)
File ".../numba/core/utils.py", line 79, in reraise
raise value.with_traceback(tb)
numba.core.errors.TypingError: Failed in nopython mode pipeline (step: nopython frontend)
Invalid use of Function(<function my_func at 0x7ff222b79f70>) with argument(s) of type(s): ()
* parameterized
In definition 0:
TypingError: Failed in nopython mode pipeline (step: nopython frontend)
Invalid use of Function(<function inner at 0x7ff23f0ab700>) with argument(s) of type(s): (unicode_type)
* parameterized
In definition 0:
All templates rejected with literals.
In definition 1:
All templates rejected without literals.
This error is usually caused by passing an argument of a type that is unsupported by the named function.
|
numba.core.errors.TypingError
|
def __call__(self, typeinfer):
with new_error_context("typing of argument at {0}", self.loc):
typevars = typeinfer.typevars
src = typevars[self.src]
if not src.defined:
return
ty = src.getone()
if isinstance(ty, types.Omitted):
ty = typeinfer.context.resolve_value_type_prefer_literal(
ty.value,
)
if not ty.is_precise():
raise TypingError("non-precise type {}".format(ty))
typeinfer.add_type(self.dst, ty, loc=self.loc)
|
def __call__(self, typeinfer):
with new_error_context("typing of argument at {0}", self.loc):
typevars = typeinfer.typevars
src = typevars[self.src]
if not src.defined:
return
ty = src.getone()
if isinstance(ty, types.Omitted):
ty = typeinfer.context.resolve_value_type(ty.value)
if not ty.is_precise():
raise TypingError("non-precise type {}".format(ty))
typeinfer.add_type(self.dst, ty, loc=self.loc)
|
https://github.com/numba/numba/issues/5471
|
Traceback (most recent call last):
File "./test.py", line 24, in <module>
f()
File ".../numba/core/dispatcher.py", line 401, in _compile_for_args
error_rewrite(e, 'typing')
File ".../numba/core/dispatcher.py", line 344, in error_rewrite
reraise(type(e), e, None)
File ".../numba/core/utils.py", line 79, in reraise
raise value.with_traceback(tb)
numba.core.errors.TypingError: Failed in nopython mode pipeline (step: nopython frontend)
Invalid use of Function(<function my_func at 0x7ff222b79f70>) with argument(s) of type(s): ()
* parameterized
In definition 0:
TypingError: Failed in nopython mode pipeline (step: nopython frontend)
Invalid use of Function(<function inner at 0x7ff23f0ab700>) with argument(s) of type(s): (unicode_type)
* parameterized
In definition 0:
All templates rejected with literals.
In definition 1:
All templates rejected without literals.
This error is usually caused by passing an argument of a type that is unsupported by the named function.
|
numba.core.errors.TypingError
|
def __init__(self, value):
self._value = value
super(Omitted, self).__init__("omitted(default=%r)" % (value,))
|
def __init__(self, value):
self.value = value
super(Omitted, self).__init__("omitted(default=%r)" % (value,))
|
https://github.com/numba/numba/issues/5471
|
Traceback (most recent call last):
File "./test.py", line 24, in <module>
f()
File ".../numba/core/dispatcher.py", line 401, in _compile_for_args
error_rewrite(e, 'typing')
File ".../numba/core/dispatcher.py", line 344, in error_rewrite
reraise(type(e), e, None)
File ".../numba/core/utils.py", line 79, in reraise
raise value.with_traceback(tb)
numba.core.errors.TypingError: Failed in nopython mode pipeline (step: nopython frontend)
Invalid use of Function(<function my_func at 0x7ff222b79f70>) with argument(s) of type(s): ()
* parameterized
In definition 0:
TypingError: Failed in nopython mode pipeline (step: nopython frontend)
Invalid use of Function(<function inner at 0x7ff23f0ab700>) with argument(s) of type(s): (unicode_type)
* parameterized
In definition 0:
All templates rejected with literals.
In definition 1:
All templates rejected without literals.
This error is usually caused by passing an argument of a type that is unsupported by the named function.
|
numba.core.errors.TypingError
|
def key(self):
return type(self._value), id(self._value)
|
def key(self):
return type(self.value), id(self.value)
|
https://github.com/numba/numba/issues/5471
|
Traceback (most recent call last):
File "./test.py", line 24, in <module>
f()
File ".../numba/core/dispatcher.py", line 401, in _compile_for_args
error_rewrite(e, 'typing')
File ".../numba/core/dispatcher.py", line 344, in error_rewrite
reraise(type(e), e, None)
File ".../numba/core/utils.py", line 79, in reraise
raise value.with_traceback(tb)
numba.core.errors.TypingError: Failed in nopython mode pipeline (step: nopython frontend)
Invalid use of Function(<function my_func at 0x7ff222b79f70>) with argument(s) of type(s): ()
* parameterized
In definition 0:
TypingError: Failed in nopython mode pipeline (step: nopython frontend)
Invalid use of Function(<function inner at 0x7ff23f0ab700>) with argument(s) of type(s): (unicode_type)
* parameterized
In definition 0:
All templates rejected with literals.
In definition 1:
All templates rejected without literals.
This error is usually caused by passing an argument of a type that is unsupported by the named function.
|
numba.core.errors.TypingError
|
def generic(self, args, kws):
assert not kws
if len(args) == 1:
# One-argument type() -> return the __class__
# Avoid literal types
arg = types.unliteral(args[0])
classty = self.context.resolve_getattr(arg, "__class__")
if classty is not None:
return signature(classty, *args)
|
def generic(self, args, kws):
assert not kws
if len(args) == 1:
# One-argument type() -> return the __class__
classty = self.context.resolve_getattr(args[0], "__class__")
if classty is not None:
return signature(classty, *args)
|
https://github.com/numba/numba/issues/5471
|
Traceback (most recent call last):
File "./test.py", line 24, in <module>
f()
File ".../numba/core/dispatcher.py", line 401, in _compile_for_args
error_rewrite(e, 'typing')
File ".../numba/core/dispatcher.py", line 344, in error_rewrite
reraise(type(e), e, None)
File ".../numba/core/utils.py", line 79, in reraise
raise value.with_traceback(tb)
numba.core.errors.TypingError: Failed in nopython mode pipeline (step: nopython frontend)
Invalid use of Function(<function my_func at 0x7ff222b79f70>) with argument(s) of type(s): ()
* parameterized
In definition 0:
TypingError: Failed in nopython mode pipeline (step: nopython frontend)
Invalid use of Function(<function inner at 0x7ff23f0ab700>) with argument(s) of type(s): (unicode_type)
* parameterized
In definition 0:
All templates rejected with literals.
In definition 1:
All templates rejected without literals.
This error is usually caused by passing an argument of a type that is unsupported by the named function.
|
numba.core.errors.TypingError
|
def unified_function_type(numba_types, require_precise=True):
"""Returns a unified Numba function type if possible.
Parameters
----------
numba_types : tuple
Numba type instances.
require_precise : bool
If True, the returned Numba function type must be precise.
Returns
-------
typ : {numba.core.types.Type, None}
A unified Numba function type. Or ``None`` when the Numba types
cannot be unified, e.g. when the ``numba_types`` contains at
least two different Numba function type instances.
If ``numba_types`` contains a Numba dispatcher type, the unified
Numba function type will be an imprecise ``UndefinedFunctionType``
instance, or None when ``require_precise=True`` is specified.
Specifying ``require_precise=False`` enables unifying imprecise
Numba dispatcher instances when used in tuples or if-then branches
when the precise Numba function cannot be determined on the first
occurrence that is not a call expression.
"""
from numba.core.errors import NumbaExperimentalFeatureWarning
if not (
numba_types
and isinstance(numba_types[0], (types.Dispatcher, types.FunctionType))
):
return
warnings.warn(
"First-class function type feature is experimental",
category=NumbaExperimentalFeatureWarning,
)
mnargs, mxargs = None, None
dispatchers = set()
function = None
undefined_function = None
for t in numba_types:
if isinstance(t, types.Dispatcher):
mnargs1, mxargs1 = get_nargs_range(t.dispatcher.py_func)
if mnargs is None:
mnargs, mxargs = mnargs1, mxargs1
elif not (mnargs, mxargs) == (mnargs1, mxargs1):
return
dispatchers.add(t.dispatcher)
t = t.dispatcher.get_function_type()
if t is None:
continue
if isinstance(t, types.FunctionType):
if mnargs is None:
mnargs = mxargs = t.nargs
elif not (mnargs == mxargs == t.nargs):
return
if isinstance(t, types.UndefinedFunctionType):
if undefined_function is None:
undefined_function = t
else:
# Refuse to unify using function type
return
dispatchers.update(t.dispatchers)
else:
if function is None:
function = t
else:
assert function == t
else:
return
if require_precise and (function is None or undefined_function is not None):
return
if function is not None:
if undefined_function is not None:
assert function.nargs == undefined_function.nargs
function = undefined_function
elif undefined_function is not None:
undefined_function.dispatchers.update(dispatchers)
function = undefined_function
else:
function = types.UndefinedFunctionType(mnargs, dispatchers)
return function
|
def unified_function_type(numba_types, require_precise=True):
"""Returns a unified Numba function type if possible.
Parameters
----------
numba_types : tuple
Numba type instances.
require_precise : bool
If True, the returned Numba function type must be precise.
Returns
-------
typ : {numba.core.types.Type, None}
A unified Numba function type. Or ``None`` when the Numba types
cannot be unified, e.g. when the ``numba_types`` contains at
least two different Numba function type instances.
If ``numba_types`` contains a Numba dispatcher type, the unified
Numba function type will be an imprecise ``UndefinedFunctionType``
instance, or None when ``require_precise=True`` is specified.
Specifying ``require_precise=False`` enables unifying imprecise
Numba dispatcher instances when used in tuples or if-then branches
when the precise Numba function cannot be determined on the first
occurrence that is not a call expression.
"""
from numba.core.errors import NumbaExperimentalFeatureWarning
if not (
numba_types
and isinstance(numba_types[0], (types.Dispatcher, types.FunctionType))
):
return
warnings.warn(
"First-class function type feature is experimental",
category=NumbaExperimentalFeatureWarning,
)
mnargs, mxargs = None, None
dispatchers = set()
function = None
undefined_function = None
for t in numba_types:
if isinstance(t, types.Dispatcher):
mnargs1, mxargs1 = get_nargs_range(t.dispatcher.py_func)
if mnargs is None:
mnargs, mxargs = mnargs1, mxargs1
elif not (mnargs, mxargs) == (mnargs1, mxargs1):
return
dispatchers.add(t.dispatcher)
t = t.dispatcher.get_function_type()
if t is None:
continue
if isinstance(t, types.FunctionType):
if mnargs is None:
mnargs = mxargs = t.nargs
elif not (mnargs == mxargs == t.nargs):
return numba_types
if isinstance(t, types.UndefinedFunctionType):
if undefined_function is None:
undefined_function = t
else:
# Refuse to unify using function type
return
dispatchers.update(t.dispatchers)
else:
if function is None:
function = t
else:
assert function == t
else:
return
if require_precise and (function is None or undefined_function is not None):
return
if function is not None:
if undefined_function is not None:
assert function.nargs == undefined_function.nargs
function = undefined_function
elif undefined_function is not None:
undefined_function.dispatchers.update(dispatchers)
function = undefined_function
else:
function = types.UndefinedFunctionType(mnargs, dispatchers)
return function
|
https://github.com/numba/numba/issues/5685
|
Traceback (most recent call last):
File "/home/lucio/mypyprojects/numba/numba/core/errors.py", line 726, in new_error_context
yield
File "/home/lucio/mypyprojects/numba/numba/core/lowering.py", line 267, in lower_block
self.lower_inst(inst)
File "/home/lucio/mypyprojects/numba/numba/core/lowering.py", line 364, in lower_inst
val = self.lower_assign(ty, inst)
File "/home/lucio/mypyprojects/numba/numba/core/lowering.py", line 538, in lower_assign
return self.lower_expr(ty, value)
File "/home/lucio/mypyprojects/numba/numba/core/lowering.py", line 1194, in lower_expr
self.loadvar(expr.index.name)))
File "/home/lucio/mypyprojects/numba/numba/core/base.py", line 1155, in __call__
res = self._imp(self._context, builder, self._sig, args, loc=loc)
File "/home/lucio/mypyprojects/numba/numba/core/base.py", line 1185, in wrapper
return fn(*args, **kwargs)
File "/home/lucio/mypyprojects/numba/numba/cpython/tupleobj.py", line 254, in getitem_typed
sig.return_type) == sig.return_type
File "/home/lucio/mypyprojects/numba/numba/core/typing/context.py", line 630, in unify_types
unified = self.unify_pairs(unified, tp)
File "/home/lucio/mypyprojects/numba/numba/core/typing/context.py", line 649, in unify_pairs
unified = first.unify(self, second)
File "/home/lucio/mypyprojects/numba/numba/core/types/containers.py", line 320, in unify
print(Tuple(unified))
File "/home/lucio/mypyprojects/numba/numba/core/types/abstract.py", line 65, in __call__
inst = type.__call__(cls, *args, **kwargs)
File "/home/lucio/mypyprojects/numba/numba/core/types/containers.py", line 283, in __new__
return UniTuple(dtype=t, count=len(types))
File "/home/lucio/mypyprojects/numba/numba/core/types/abstract.py", line 66, in __call__
return cls._intern(inst)
File "/home/lucio/mypyprojects/numba/numba/core/types/abstract.py", line 50, in _intern
orig = _typecache.get(wr)
File "/home/lucio/mypyprojects/numba/numba/core/types/abstract.py", line 118, in __hash__
return hash(self.key)
TypeError: unhashable type: 'list'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/lucio/.PyCharm2018.3/config/scratches/numba unification problem.py", line 37, in <module>
baz(np.zeros((5, 10)), funcs)
File "/home/lucio/mypyprojects/numba/numba/core/dispatcher.py", line 420, in _compile_for_args
raise e
File "/home/lucio/mypyprojects/numba/numba/core/dispatcher.py", line 353, in _compile_for_args
return self.compile(tuple(argtypes))
File "/home/lucio/mypyprojects/numba/numba/core/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "/home/lucio/mypyprojects/numba/numba/core/dispatcher.py", line 794, in compile
cres = self._compiler.compile(args, return_type)
File "/home/lucio/mypyprojects/numba/numba/core/dispatcher.py", line 77, in compile
status, retval = self._compile_cached(args, return_type)
File "/home/lucio/mypyprojects/numba/numba/core/dispatcher.py", line 91, in _compile_cached
retval = self._compile_core(args, return_type)
File "/home/lucio/mypyprojects/numba/numba/core/dispatcher.py", line 109, in _compile_core
pipeline_class=self.pipeline_class)
File "/home/lucio/mypyprojects/numba/numba/core/compiler.py", line 568, in compile_extra
return pipeline.compile_extra(func)
File "/home/lucio/mypyprojects/numba/numba/core/compiler.py", line 339, in compile_extra
return self._compile_bytecode()
File "/home/lucio/mypyprojects/numba/numba/core/compiler.py", line 401, in _compile_bytecode
return self._compile_core()
File "/home/lucio/mypyprojects/numba/numba/core/compiler.py", line 381, in _compile_core
raise e
File "/home/lucio/mypyprojects/numba/numba/core/compiler.py", line 372, in _compile_core
pm.run(self.state)
File "/home/lucio/mypyprojects/numba/numba/core/compiler_machinery.py", line 341, in run
raise patched_exception
File "/home/lucio/mypyprojects/numba/numba/core/compiler_machinery.py", line 332, in run
self._runPass(idx, pass_inst, state)
File "/home/lucio/mypyprojects/numba/numba/core/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "/home/lucio/mypyprojects/numba/numba/core/compiler_machinery.py", line 291, in _runPass
mutated |= check(pss.run_pass, internal_state)
File "/home/lucio/mypyprojects/numba/numba/core/compiler_machinery.py", line 264, in check
mangled = func(compiler_state)
File "/home/lucio/mypyprojects/numba/numba/core/typed_passes.py", line 442, in run_pass
NativeLowering().run_pass(state)
File "/home/lucio/mypyprojects/numba/numba/core/typed_passes.py", line 370, in run_pass
lower.lower()
File "/home/lucio/mypyprojects/numba/numba/core/lowering.py", line 179, in lower
self.lower_normal_function(self.fndesc)
File "/home/lucio/mypyprojects/numba/numba/core/lowering.py", line 227, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "/home/lucio/mypyprojects/numba/numba/core/lowering.py", line 253, in lower_function_body
self.lower_block(block)
File "/home/lucio/mypyprojects/numba/numba/core/lowering.py", line 267, in lower_block
self.lower_inst(inst)
File "/home/lucio/anaconda3/envs/numbaenv/lib/python3.6/contextlib.py", line 99, in __exit__
self.gen.throw(type, value, traceback)
File "/home/lucio/mypyprojects/numba/numba/core/errors.py", line 733, in new_error_context
reraise(type(newerr), newerr, tb)
File "/home/lucio/mypyprojects/numba/numba/core/utils.py", line 81, in reraise
raise value
numba.core.errors.LoweringError: Failed in nopython mode pipeline (step: nopython mode backend)
unhashable type: 'list'
File "numba unification problem.py", line 30:
def baz(a, funcs):
<source elided>
idx = 0
for i in literal_unroll(funcs):
^
[1] During: lowering "i.33 = typed_getitem(value=$8.3, dtype=Tuple(type(CPUDispatcher(<function foo1 at 0x7f11868ccea0>)), type(CPUDispatcher(<function bar1 at 0x7f11740aaae8>))), index=$phi18.1)" at /home/lucio/.PyCharm2018.3/config/scratches/numba unification problem.py (30)
|
TypeError
|
def unified_function_type(numba_types, require_precise=True):
"""Returns a unified Numba function type if possible.
Parameters
----------
numba_types : tuple
Numba type instances.
require_precise : bool
If True, the returned Numba function type must be precise.
Returns
-------
typ : {numba.core.types.Type, None}
A unified Numba function type. Or ``None`` when the Numba types
cannot be unified, e.g. when the ``numba_types`` contains at
least two different Numba function type instances.
If ``numba_types`` contains a Numba dispatcher type, the unified
Numba function type will be an imprecise ``UndefinedFunctionType``
instance, or None when ``require_precise=True`` is specified.
Specifying ``require_precise=False`` enables unifying imprecise
Numba dispatcher instances when used in tuples or if-then branches
when the precise Numba function cannot be determined on the first
occurrence that is not a call expression.
"""
from numba.core.errors import NumbaExperimentalFeatureWarning
if not (
numba_types
and isinstance(numba_types[0], (types.Dispatcher, types.FunctionType))
):
return
warnings.warn(
"First-class function type feature is experimental",
category=NumbaExperimentalFeatureWarning,
)
mnargs, mxargs = None, None
dispatchers = set()
function = None
undefined_function = None
for t in numba_types:
if isinstance(t, types.Dispatcher):
mnargs1, mxargs1 = get_nargs_range(t.dispatcher.py_func)
if mnargs is None:
mnargs, mxargs = mnargs1, mxargs1
elif not (mnargs, mxargs) == (mnargs1, mxargs1):
return
dispatchers.add(t.dispatcher)
t = t.dispatcher.get_function_type()
if t is None:
continue
if isinstance(t, types.FunctionType):
if mnargs is None:
mnargs = mxargs = t.nargs
elif not (mnargs == mxargs == t.nargs):
return numba_types
if isinstance(t, types.UndefinedFunctionType):
if undefined_function is None:
undefined_function = t
dispatchers.update(t.dispatchers)
else:
if function is None:
function = t
else:
assert function == t
else:
return
if require_precise and (function is None or undefined_function is not None):
return
if function is not None:
if undefined_function is not None:
assert function.nargs == undefined_function.nargs
function = undefined_function
elif undefined_function is not None:
undefined_function.dispatchers.update(dispatchers)
function = undefined_function
else:
function = types.UndefinedFunctionType(mnargs, dispatchers)
return function
|
def unified_function_type(numba_types, require_precise=True):
"""Returns a unified Numba function type if possible.
Parameters
----------
numba_types : tuple
Numba type instances.
require_precise : bool
If True, the returned Numba function type must be precise.
Returns
-------
typ : {numba.core.types.Type, None}
A unified Numba function type. Or ``None`` when the Numba types
cannot be unified, e.g. when the ``numba_types`` contains at
least two different Numba function type instances.
If ``numba_types`` contains a Numba dispatcher type, the unified
Numba function type will be an imprecise ``UndefinedFunctionType``
instance, or None when ``require_precise=True`` is specified.
Specifying ``require_precise=False`` enables unifying imprecise
Numba dispatcher instances when used in tuples or if-then branches
when the precise Numba function cannot be determined on the first
occurrence that is not a call expression.
"""
from numba.core.errors import NumbaExperimentalFeatureWarning
if not (
numba_types
and isinstance(numba_types[0], (types.Dispatcher, types.FunctionType))
):
return
warnings.warn(
"First-class function type feature is experimental",
category=NumbaExperimentalFeatureWarning,
)
mnargs, mxargs = None, None
dispatchers = set()
function = None
undefined_function = None
for t in numba_types:
if isinstance(t, types.Dispatcher):
mnargs1, mxargs1 = get_nargs_range(t.dispatcher.py_func)
if mnargs is None:
mnargs, mxargs = mnargs1, mxargs1
elif not (mnargs, mxargs) == (mnargs1, mxargs1):
return
dispatchers.add(t.dispatcher)
t = t.dispatcher.get_function_type()
if t is None:
continue
if isinstance(t, types.FunctionType):
if mnargs is None:
mnargs = mxargs = t.nargs
elif not (mnargs == mxargs == t.nargs):
return numba_types
if isinstance(t, types.UndefinedFunctionType):
if undefined_function is None:
undefined_function = t
elif 0:
assert undefined_function == t
dispatchers.update(t.dispatchers)
else:
if function is None:
function = t
else:
assert function == t
else:
return
if require_precise and (function is None or undefined_function is not None):
return
if function is not None:
if undefined_function is not None:
assert function.nargs == undefined_function.nargs
function = undefined_function
elif undefined_function is not None:
undefined_function.dispatchers.update(dispatchers)
function = undefined_function
else:
function = types.UndefinedFunctionType(mnargs, dispatchers)
return function
|
https://github.com/numba/numba/issues/5615
|
0.50.0dev0+128.g82c7d37ad
<path>/numba/core/utils.py:508: NumbaExperimentalFeatureWarning: First-class function type feature is experimental
warnings.warn("First-class function type feature is experimental",
gi196.py:22: NumbaExperimentalFeatureWarning: First-class function type feature is experimental
for t in literal_unroll(fcs):
gi196.py:22: NumbaExperimentalFeatureWarning: First-class function type feature is experimental
for t in literal_unroll(fcs):
gi196.py:23: NumbaExperimentalFeatureWarning: First-class function type feature is experimental
i, j = t
<path>/numba/core/utils.py:508: NumbaExperimentalFeatureWarning: First-class function type feature is experimental
warnings.warn("First-class function type feature is experimental",
Traceback (most recent call last):
File "<path>/numba/core/errors.py", line 726, in new_error_context
yield
File "<path>/numba/core/lowering.py", line 267, in lower_block
self.lower_inst(inst)
File "<path>/numba/core/lowering.py", line 364, in lower_inst
val = self.lower_assign(ty, inst)
File "<path>/numba/core/lowering.py", line 538, in lower_assign
return self.lower_expr(ty, value)
File "<path>/numba/core/lowering.py", line 1193, in lower_expr
return impl(self.builder, (self.loadvar(expr.value.name),
File "<path>/numba/core/base.py", line 1155, in __call__
res = self._imp(self._context, builder, self._sig, args, loc=loc)
File "<path>/numba/core/base.py", line 1185, in wrapper
return fn(*args, **kwargs)
File "<path>/numba/cpython/tupleobj.py", line 253, in getitem_typed
DOCAST = context.typing_context.unify_types(sig.args[0][i],
File "<path>/numba/core/typing/context.py", line 630, in unify_types
unified = self.unify_pairs(unified, tp)
File "<path>/numba/core/typing/context.py", line 649, in unify_pairs
unified = first.unify(self, second)
File "<path>/numba/core/types/containers.py", line 319, in unify
return Tuple(unified)
File "<path>/numba/core/types/abstract.py", line 65, in __call__
inst = type.__call__(cls, *args, **kwargs)
File "<path>/numba/core/types/containers.py", line 280, in __new__
t = utils.unified_function_type(types, require_precise=True)
File "<path>/numba/core/utils.py", line 536, in unified_function_type
assert undefined_function == t
AssertionError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "gi196.py", line 28, in <module>
print(bar(tup))
File "<path>/numba/core/dispatcher.py", line 420, in _compile_for_args
raise e
File "<path>/numba/core/dispatcher.py", line 353, in _compile_for_args
return self.compile(tuple(argtypes))
File "<path>/numba/core/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "<path>/numba/core/dispatcher.py", line 794, in compile
cres = self._compiler.compile(args, return_type)
File "<path>/numba/core/dispatcher.py", line 77, in compile
status, retval = self._compile_cached(args, return_type)
File "<path>/numba/core/dispatcher.py", line 91, in _compile_cached
retval = self._compile_core(args, return_type)
File "<path>/numba/core/dispatcher.py", line 104, in _compile_core
cres = compiler.compile_extra(self.targetdescr.typing_context,
File "<path>/numba/core/compiler.py", line 568, in compile_extra
return pipeline.compile_extra(func)
File "<path>/numba/core/compiler.py", line 339, in compile_extra
return self._compile_bytecode()
File "<path>/numba/core/compiler.py", line 401, in _compile_bytecode
return self._compile_core()
File "<path>/numba/core/compiler.py", line 381, in _compile_core
raise e
File "<path>/numba/core/compiler.py", line 372, in _compile_core
pm.run(self.state)
File "<path>/numba/core/compiler_machinery.py", line 341, in run
raise patched_exception
File "<path>/numba/core/compiler_machinery.py", line 332, in run
self._runPass(idx, pass_inst, state)
File "<path>/numba/core/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "<path>/numba/core/compiler_machinery.py", line 291, in _runPass
mutated |= check(pss.run_pass, internal_state)
File "<path>/numba/core/compiler_machinery.py", line 264, in check
mangled = func(compiler_state)
File "<path>/numba/core/typed_passes.py", line 442, in run_pass
NativeLowering().run_pass(state)
File "<path>/numba/core/typed_passes.py", line 370, in run_pass
lower.lower()
File "<path>/numba/core/lowering.py", line 179, in lower
self.lower_normal_function(self.fndesc)
File "<path>/numba/core/lowering.py", line 227, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "<path>/numba/core/lowering.py", line 253, in lower_function_body
self.lower_block(block)
File "<path>/numba/core/lowering.py", line 267, in lower_block
self.lower_inst(inst)
File "<env>/lib/python3.8/contextlib.py", line 131, in __exit__
self.gen.throw(type, value, traceback)
File "<path>/numba/core/errors.py", line 733, in new_error_context
reraise(type(newerr), newerr, tb)
File "<path>/numba/core/utils.py", line 80, in reraise
raise value
numba.core.errors.LoweringError: Failed in nopython mode pipeline (step: nopython mode backend)
File "gi196.py", line 22:
def bar(fcs):
<source elided>
a = 10
for t in literal_unroll(fcs):
^
[1] During: lowering "t.25 = typed_getitem(value=$14call_function.4, dtype=Tuple(type(CPUDispatcher(<function foo1 at 0x7f7d000f5ca0>)), type(CPUDispatcher(<function foo2 at 0x7f7ceeec6d30>))), index=$phi20.1)" at gi196.py (22)
|
numba.core.errors.LoweringError
|
def _legalize_parameter_names(var_list):
"""
Legalize names in the variable list for use as a Python function's
parameter names.
"""
var_map = OrderedDict()
for var in var_list:
old_name = var.name
new_name = var.scope.redefine(old_name, loc=var.loc).name
new_name = new_name.replace("$", "_").replace(".", "_")
# Caller should ensure the names are unique
if new_name in var_map:
raise AssertionError(f"{new_name!r} not unique")
var_map[new_name] = var, old_name
var.name = new_name
param_names = list(var_map)
try:
yield param_names
finally:
# Make sure the old names are restored, to avoid confusing
# other parts of Numba (see issue #1466)
for var, old_name in var_map.values():
var.name = old_name
|
def _legalize_parameter_names(var_list):
"""
Legalize names in the variable list for use as a Python function's
parameter names.
"""
var_map = OrderedDict()
for var in var_list:
old_name = var.name
new_name = old_name.replace("$", "_").replace(".", "_")
# Caller should ensure the names are unique
assert new_name not in var_map
var_map[new_name] = var, old_name
var.name = new_name
param_names = list(var_map)
try:
yield param_names
finally:
# Make sure the old names are restored, to avoid confusing
# other parts of Numba (see issue #1466)
for var, old_name in var_map.values():
var.name = old_name
|
https://github.com/numba/numba/issues/5599
|
--------------------------------------------------------------------
AssertionError Traceback (most recent call last)
/anaconda3/lib/python3.7/site-packages/numba/core/errors.py in new_error_context(fmt_, *args, **kwargs)
800 try:
--> 801 yield
802 except NumbaError as e:
/anaconda3/lib/python3.7/site-packages/numba/core/lowering.py in lower_block(self, block)
266 loc=self.loc, errcls_=defaulterrcls):
--> 267 self.lower_inst(inst)
268 self.post_block(block)
/anaconda3/lib/python3.7/site-packages/numba/core/lowering.py in lower_inst(self, inst)
363 ty = self.typeof(inst.target.name)
--> 364 val = self.lower_assign(ty, inst)
365 self.storevar(val, inst.target.name)
/anaconda3/lib/python3.7/site-packages/numba/core/lowering.py in lower_assign(self, ty, inst)
537 elif isinstance(value, ir.Expr):
--> 538 return self.lower_expr(ty, value)
539
/anaconda3/lib/python3.7/site-packages/numba/core/lowering.py in lower_expr(self, resty, expr)
1257 elif expr.op in self.context.special_ops:
-> 1258 res = self.context.special_ops[expr.op](self, expr)
1259 return res
/anaconda3/lib/python3.7/site-packages/numba/np/ufunc/array_exprs.py in _lower_array_expr(lowerer, expr)
340
--> 341 with _legalize_parameter_names(expr_var_unique) as expr_params:
342
/anaconda3/lib/python3.7/contextlib.py in __enter__(self)
111 try:
--> 112 return next(self.gen)
113 except StopIteration:
/anaconda3/lib/python3.7/site-packages/numba/np/ufunc/array_exprs.py in _legalize_parameter_names(var_list)
312 # Caller should ensure the names are unique
--> 313 assert new_name not in var_map
314 var_map[new_name] = var, old_name
AssertionError:
During handling of the above exception, another exception occurred:
LoweringError Traceback (most recent call last)
<ipython-input-5-10e1bfec8407> in <module>
13
14 x = 5
---> 15 f(x)
/anaconda3/lib/python3.7/site-packages/numba/core/dispatcher.py in _compile_for_args(self, *args, **kws)
418 e.patch_message('\n'.join((str(e).rstrip(), help_msg)))
419 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 420 raise e
421
422 def inspect_llvm(self, signature=None):
/anaconda3/lib/python3.7/site-packages/numba/core/dispatcher.py in _compile_for_args(self, *args, **kws)
351 argtypes.append(self.typeof_pyval(a))
352 try:
--> 353 return self.compile(tuple(argtypes))
354 except errors.ForceLiteralArg as e:
355 # Received request for compiler re-entry with the list of arguments
/anaconda3/lib/python3.7/site-packages/numba/core/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
/anaconda3/lib/python3.7/site-packages/numba/core/dispatcher.py in compile(self, sig)
792 self._cache_misses[sig] += 1
793 try:
--> 794 cres = self._compiler.compile(args, return_type)
795 except errors.ForceLiteralArg as e:
796 def folded(args, kws):
/anaconda3/lib/python3.7/site-packages/numba/core/dispatcher.py in compile(self, args, return_type)
75
76 def compile(self, args, return_type):
---> 77 status, retval = self._compile_cached(args, return_type)
78 if status:
79 return retval
/anaconda3/lib/python3.7/site-packages/numba/core/dispatcher.py in _compile_cached(self, args, return_type)
89
90 try:
---> 91 retval = self._compile_core(args, return_type)
92 except errors.TypingError as e:
93 self._failed_cache[key] = e
/anaconda3/lib/python3.7/site-packages/numba/core/dispatcher.py in _compile_core(self, args, return_type)
107 args=args, return_type=return_type,
108 flags=flags, locals=self.locals,
--> 109 pipeline_class=self.pipeline_class)
110 # Check typing error if object mode is used
111 if cres.typing_error is not None and not flags.enable_pyobject:
/anaconda3/lib/python3.7/site-packages/numba/core/compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library, pipeline_class)
566 pipeline = pipeline_class(typingctx, targetctx, library,
567 args, return_type, flags, locals)
--> 568 return pipeline.compile_extra(func)
569
570
/anaconda3/lib/python3.7/site-packages/numba/core/compiler.py in compile_extra(self, func)
337 self.state.lifted = ()
338 self.state.lifted_from = None
--> 339 return self._compile_bytecode()
340
341 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
/anaconda3/lib/python3.7/site-packages/numba/core/compiler.py in _compile_bytecode(self)
399 """
400 assert self.state.func_ir is None
--> 401 return self._compile_core()
402
403 def _compile_ir(self):
/anaconda3/lib/python3.7/site-packages/numba/core/compiler.py in _compile_core(self)
379 self.state.status.fail_reason = e
380 if is_final_pipeline:
--> 381 raise e
382 else:
383 raise CompilerError("All available pipelines exhausted")
/anaconda3/lib/python3.7/site-packages/numba/core/compiler.py in _compile_core(self)
370 res = None
371 try:
--> 372 pm.run(self.state)
373 if self.state.cr is not None:
374 break
/anaconda3/lib/python3.7/site-packages/numba/core/compiler_machinery.py in run(self, state)
339 (self.pipeline_name, pass_desc)
340 patched_exception = self._patch_error(msg, e)
--> 341 raise patched_exception
342
343 def dependency_analysis(self):
/anaconda3/lib/python3.7/site-packages/numba/core/compiler_machinery.py in run(self, state)
330 pass_inst = _pass_registry.get(pss).pass_inst
331 if isinstance(pass_inst, CompilerPass):
--> 332 self._runPass(idx, pass_inst, state)
333 else:
334 raise BaseException("Legacy pass in use")
/anaconda3/lib/python3.7/site-packages/numba/core/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
/anaconda3/lib/python3.7/site-packages/numba/core/compiler_machinery.py in _runPass(self, index, pss, internal_state)
289 mutated |= check(pss.run_initialization, internal_state)
290 with SimpleTimer() as pass_time:
--> 291 mutated |= check(pss.run_pass, internal_state)
292 with SimpleTimer() as finalize_time:
293 mutated |= check(pss.run_finalizer, internal_state)
/anaconda3/lib/python3.7/site-packages/numba/core/compiler_machinery.py in check(func, compiler_state)
262
263 def check(func, compiler_state):
--> 264 mangled = func(compiler_state)
265 if mangled not in (True, False):
266 msg = ("CompilerPass implementations should return True/False. "
/anaconda3/lib/python3.7/site-packages/numba/core/typed_passes.py in run_pass(self, state)
440
441 # TODO: Pull this out into the pipeline
--> 442 NativeLowering().run_pass(state)
443 lowered = state['cr']
444 signature = typing.signature(state.return_type, *state.args)
/anaconda3/lib/python3.7/site-packages/numba/core/typed_passes.py in run_pass(self, state)
368 lower = lowering.Lower(targetctx, library, fndesc, interp,
369 metadata=metadata)
--> 370 lower.lower()
371 if not flags.no_cpython_wrapper:
372 lower.create_cpython_wrapper(flags.release_gil)
/anaconda3/lib/python3.7/site-packages/numba/core/lowering.py in lower(self)
177 if self.generator_info is None:
178 self.genlower = None
--> 179 self.lower_normal_function(self.fndesc)
180 else:
181 self.genlower = self.GeneratorLower(self)
/anaconda3/lib/python3.7/site-packages/numba/core/lowering.py in lower_normal_function(self, fndesc)
225 # Init argument values
226 self.extract_function_arguments()
--> 227 entry_block_tail = self.lower_function_body()
228
229 # Close tail of entry block
/anaconda3/lib/python3.7/site-packages/numba/core/lowering.py in lower_function_body(self)
251 bb = self.blkmap[offset]
252 self.builder.position_at_end(bb)
--> 253 self.lower_block(block)
254 self.post_lower()
255 return entry_block_tail
/anaconda3/lib/python3.7/site-packages/numba/core/lowering.py in lower_block(self, block)
265 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
266 loc=self.loc, errcls_=defaulterrcls):
--> 267 self.lower_inst(inst)
268 self.post_block(block)
269
/anaconda3/lib/python3.7/contextlib.py in __exit__(self, type, value, traceback)
128 value = type()
129 try:
--> 130 self.gen.throw(type, value, traceback)
131 except StopIteration as exc:
132 # Suppress StopIteration *unless* it's the same exception that
/anaconda3/lib/python3.7/site-packages/numba/core/errors.py in new_error_context(fmt_, *args, **kwargs)
806 newerr = errcls(e).add_context(_format_msg(fmt_, args, kwargs))
807 tb = sys.exc_info()[2] if numba.core.config.FULL_TRACEBACKS else None
--> 808 reraise(type(newerr), newerr, tb)
809
810
/anaconda3/lib/python3.7/site-packages/numba/core/utils.py in reraise(tp, value, tb)
78 if value.__traceback__ is not tb:
79 raise value.with_traceback(tb)
---> 80 raise value
81
82
LoweringError: Failed in nopython mode pipeline (step: nopython mode backend)
File "<ipython-input-5-10e1bfec8407>", line 10:
def f(x):
<source elided>
for _ in range(2):
val = arr * arr
^
[1] During: lowering "$30binary_multiply.4 = arrayexpr(expr=(<built-in function mul>, [Var(arr_2, <ipython-input-5-10e1bfec8407>:9), Var(arr.2, <ipython-input-5-10e1bfec8407>:9)]), ty=array(float64, 1d, C))" at <ipython-input-5-10e1bfec8407> (10)
|
AssertionError
|
def _lower_array_expr(lowerer, expr):
"""Lower an array expression built by RewriteArrayExprs."""
expr_name = "__numba_array_expr_%s" % (hex(hash(expr)).replace("-", "_"))
expr_filename = expr.loc.filename
expr_var_list = expr.list_vars()
# The expression may use a given variable several times, but we
# should only create one parameter for it.
expr_var_unique = sorted(set(expr_var_list), key=lambda var: var.name)
# Arguments are the names external to the new closure
expr_args = [var.name for var in expr_var_unique]
# 1. Create an AST tree from the array expression.
with _legalize_parameter_names(expr_var_unique) as expr_params:
ast_args = [ast.arg(param_name, None) for param_name in expr_params]
# Parse a stub function to ensure the AST is populated with
# reasonable defaults for the Python version.
ast_module = ast.parse(
"def {0}(): return".format(expr_name), expr_filename, "exec"
)
assert hasattr(ast_module, "body") and len(ast_module.body) == 1
ast_fn = ast_module.body[0]
ast_fn.args.args = ast_args
ast_fn.body[0].value, namespace = _arr_expr_to_ast(expr.expr)
ast.fix_missing_locations(ast_module)
# 2. Compile the AST module and extract the Python function.
code_obj = compile(ast_module, expr_filename, "exec")
exec(code_obj, namespace)
impl = namespace[expr_name]
# 3. Now compile a ufunc using the Python function as kernel.
context = lowerer.context
builder = lowerer.builder
outer_sig = expr.ty(*(lowerer.typeof(name) for name in expr_args))
inner_sig_args = []
for argty in outer_sig.args:
if isinstance(argty, types.Optional):
argty = argty.type
if isinstance(argty, types.Array):
inner_sig_args.append(argty.dtype)
else:
inner_sig_args.append(argty)
inner_sig = outer_sig.return_type.dtype(*inner_sig_args)
# Follow the Numpy error model. Note this also allows e.g. vectorizing
# division (issue #1223).
flags = compiler.Flags()
flags.set("error_model", "numpy")
cres = context.compile_subroutine(
builder, impl, inner_sig, flags=flags, caching=False
)
# Create kernel subclass calling our native function
from numba.np import npyimpl
class ExprKernel(npyimpl._Kernel):
def generate(self, *args):
arg_zip = zip(args, self.outer_sig.args, inner_sig.args)
cast_args = [self.cast(val, inty, outty) for val, inty, outty in arg_zip]
result = self.context.call_internal(
builder, cres.fndesc, inner_sig, cast_args
)
return self.cast(result, inner_sig.return_type, self.outer_sig.return_type)
args = [lowerer.loadvar(name) for name in expr_args]
return npyimpl.numpy_ufunc_kernel(
context, builder, outer_sig, args, ExprKernel, explicit_output=False
)
|
def _lower_array_expr(lowerer, expr):
"""Lower an array expression built by RewriteArrayExprs."""
expr_name = "__numba_array_expr_%s" % (hex(hash(expr)).replace("-", "_"))
expr_filename = expr.loc.filename
expr_var_list = expr.list_vars()
# The expression may use a given variable several times, but we
# should only create one parameter for it.
expr_var_unique = sorted(set(expr_var_list), key=lambda var: var.name)
# Arguments are the names external to the new closure
expr_args = [var.name for var in expr_var_unique]
# 1. Create an AST tree from the array expression.
with _legalize_parameter_names(expr_var_unique) as expr_params:
if hasattr(ast, "arg"):
# Should be Python 3.x
ast_args = [ast.arg(param_name, None) for param_name in expr_params]
else:
# Should be Python 2.x
ast_args = [ast.Name(param_name, ast.Param()) for param_name in expr_params]
# Parse a stub function to ensure the AST is populated with
# reasonable defaults for the Python version.
ast_module = ast.parse(
"def {0}(): return".format(expr_name), expr_filename, "exec"
)
assert hasattr(ast_module, "body") and len(ast_module.body) == 1
ast_fn = ast_module.body[0]
ast_fn.args.args = ast_args
ast_fn.body[0].value, namespace = _arr_expr_to_ast(expr.expr)
ast.fix_missing_locations(ast_module)
# 2. Compile the AST module and extract the Python function.
code_obj = compile(ast_module, expr_filename, "exec")
exec(code_obj, namespace)
impl = namespace[expr_name]
# 3. Now compile a ufunc using the Python function as kernel.
context = lowerer.context
builder = lowerer.builder
outer_sig = expr.ty(*(lowerer.typeof(name) for name in expr_args))
inner_sig_args = []
for argty in outer_sig.args:
if isinstance(argty, types.Optional):
argty = argty.type
if isinstance(argty, types.Array):
inner_sig_args.append(argty.dtype)
else:
inner_sig_args.append(argty)
inner_sig = outer_sig.return_type.dtype(*inner_sig_args)
# Follow the Numpy error model. Note this also allows e.g. vectorizing
# division (issue #1223).
flags = compiler.Flags()
flags.set("error_model", "numpy")
cres = context.compile_subroutine(
builder, impl, inner_sig, flags=flags, caching=False
)
# Create kernel subclass calling our native function
from numba.np import npyimpl
class ExprKernel(npyimpl._Kernel):
def generate(self, *args):
arg_zip = zip(args, self.outer_sig.args, inner_sig.args)
cast_args = [self.cast(val, inty, outty) for val, inty, outty in arg_zip]
result = self.context.call_internal(
builder, cres.fndesc, inner_sig, cast_args
)
return self.cast(result, inner_sig.return_type, self.outer_sig.return_type)
args = [lowerer.loadvar(name) for name in expr_args]
return npyimpl.numpy_ufunc_kernel(
context, builder, outer_sig, args, ExprKernel, explicit_output=False
)
|
https://github.com/numba/numba/issues/5599
|
--------------------------------------------------------------------
AssertionError Traceback (most recent call last)
/anaconda3/lib/python3.7/site-packages/numba/core/errors.py in new_error_context(fmt_, *args, **kwargs)
800 try:
--> 801 yield
802 except NumbaError as e:
/anaconda3/lib/python3.7/site-packages/numba/core/lowering.py in lower_block(self, block)
266 loc=self.loc, errcls_=defaulterrcls):
--> 267 self.lower_inst(inst)
268 self.post_block(block)
/anaconda3/lib/python3.7/site-packages/numba/core/lowering.py in lower_inst(self, inst)
363 ty = self.typeof(inst.target.name)
--> 364 val = self.lower_assign(ty, inst)
365 self.storevar(val, inst.target.name)
/anaconda3/lib/python3.7/site-packages/numba/core/lowering.py in lower_assign(self, ty, inst)
537 elif isinstance(value, ir.Expr):
--> 538 return self.lower_expr(ty, value)
539
/anaconda3/lib/python3.7/site-packages/numba/core/lowering.py in lower_expr(self, resty, expr)
1257 elif expr.op in self.context.special_ops:
-> 1258 res = self.context.special_ops[expr.op](self, expr)
1259 return res
/anaconda3/lib/python3.7/site-packages/numba/np/ufunc/array_exprs.py in _lower_array_expr(lowerer, expr)
340
--> 341 with _legalize_parameter_names(expr_var_unique) as expr_params:
342
/anaconda3/lib/python3.7/contextlib.py in __enter__(self)
111 try:
--> 112 return next(self.gen)
113 except StopIteration:
/anaconda3/lib/python3.7/site-packages/numba/np/ufunc/array_exprs.py in _legalize_parameter_names(var_list)
312 # Caller should ensure the names are unique
--> 313 assert new_name not in var_map
314 var_map[new_name] = var, old_name
AssertionError:
During handling of the above exception, another exception occurred:
LoweringError Traceback (most recent call last)
<ipython-input-5-10e1bfec8407> in <module>
13
14 x = 5
---> 15 f(x)
/anaconda3/lib/python3.7/site-packages/numba/core/dispatcher.py in _compile_for_args(self, *args, **kws)
418 e.patch_message('\n'.join((str(e).rstrip(), help_msg)))
419 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 420 raise e
421
422 def inspect_llvm(self, signature=None):
/anaconda3/lib/python3.7/site-packages/numba/core/dispatcher.py in _compile_for_args(self, *args, **kws)
351 argtypes.append(self.typeof_pyval(a))
352 try:
--> 353 return self.compile(tuple(argtypes))
354 except errors.ForceLiteralArg as e:
355 # Received request for compiler re-entry with the list of arguments
/anaconda3/lib/python3.7/site-packages/numba/core/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
/anaconda3/lib/python3.7/site-packages/numba/core/dispatcher.py in compile(self, sig)
792 self._cache_misses[sig] += 1
793 try:
--> 794 cres = self._compiler.compile(args, return_type)
795 except errors.ForceLiteralArg as e:
796 def folded(args, kws):
/anaconda3/lib/python3.7/site-packages/numba/core/dispatcher.py in compile(self, args, return_type)
75
76 def compile(self, args, return_type):
---> 77 status, retval = self._compile_cached(args, return_type)
78 if status:
79 return retval
/anaconda3/lib/python3.7/site-packages/numba/core/dispatcher.py in _compile_cached(self, args, return_type)
89
90 try:
---> 91 retval = self._compile_core(args, return_type)
92 except errors.TypingError as e:
93 self._failed_cache[key] = e
/anaconda3/lib/python3.7/site-packages/numba/core/dispatcher.py in _compile_core(self, args, return_type)
107 args=args, return_type=return_type,
108 flags=flags, locals=self.locals,
--> 109 pipeline_class=self.pipeline_class)
110 # Check typing error if object mode is used
111 if cres.typing_error is not None and not flags.enable_pyobject:
/anaconda3/lib/python3.7/site-packages/numba/core/compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library, pipeline_class)
566 pipeline = pipeline_class(typingctx, targetctx, library,
567 args, return_type, flags, locals)
--> 568 return pipeline.compile_extra(func)
569
570
/anaconda3/lib/python3.7/site-packages/numba/core/compiler.py in compile_extra(self, func)
337 self.state.lifted = ()
338 self.state.lifted_from = None
--> 339 return self._compile_bytecode()
340
341 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
/anaconda3/lib/python3.7/site-packages/numba/core/compiler.py in _compile_bytecode(self)
399 """
400 assert self.state.func_ir is None
--> 401 return self._compile_core()
402
403 def _compile_ir(self):
/anaconda3/lib/python3.7/site-packages/numba/core/compiler.py in _compile_core(self)
379 self.state.status.fail_reason = e
380 if is_final_pipeline:
--> 381 raise e
382 else:
383 raise CompilerError("All available pipelines exhausted")
/anaconda3/lib/python3.7/site-packages/numba/core/compiler.py in _compile_core(self)
370 res = None
371 try:
--> 372 pm.run(self.state)
373 if self.state.cr is not None:
374 break
/anaconda3/lib/python3.7/site-packages/numba/core/compiler_machinery.py in run(self, state)
339 (self.pipeline_name, pass_desc)
340 patched_exception = self._patch_error(msg, e)
--> 341 raise patched_exception
342
343 def dependency_analysis(self):
/anaconda3/lib/python3.7/site-packages/numba/core/compiler_machinery.py in run(self, state)
330 pass_inst = _pass_registry.get(pss).pass_inst
331 if isinstance(pass_inst, CompilerPass):
--> 332 self._runPass(idx, pass_inst, state)
333 else:
334 raise BaseException("Legacy pass in use")
/anaconda3/lib/python3.7/site-packages/numba/core/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
/anaconda3/lib/python3.7/site-packages/numba/core/compiler_machinery.py in _runPass(self, index, pss, internal_state)
289 mutated |= check(pss.run_initialization, internal_state)
290 with SimpleTimer() as pass_time:
--> 291 mutated |= check(pss.run_pass, internal_state)
292 with SimpleTimer() as finalize_time:
293 mutated |= check(pss.run_finalizer, internal_state)
/anaconda3/lib/python3.7/site-packages/numba/core/compiler_machinery.py in check(func, compiler_state)
262
263 def check(func, compiler_state):
--> 264 mangled = func(compiler_state)
265 if mangled not in (True, False):
266 msg = ("CompilerPass implementations should return True/False. "
/anaconda3/lib/python3.7/site-packages/numba/core/typed_passes.py in run_pass(self, state)
440
441 # TODO: Pull this out into the pipeline
--> 442 NativeLowering().run_pass(state)
443 lowered = state['cr']
444 signature = typing.signature(state.return_type, *state.args)
/anaconda3/lib/python3.7/site-packages/numba/core/typed_passes.py in run_pass(self, state)
368 lower = lowering.Lower(targetctx, library, fndesc, interp,
369 metadata=metadata)
--> 370 lower.lower()
371 if not flags.no_cpython_wrapper:
372 lower.create_cpython_wrapper(flags.release_gil)
/anaconda3/lib/python3.7/site-packages/numba/core/lowering.py in lower(self)
177 if self.generator_info is None:
178 self.genlower = None
--> 179 self.lower_normal_function(self.fndesc)
180 else:
181 self.genlower = self.GeneratorLower(self)
/anaconda3/lib/python3.7/site-packages/numba/core/lowering.py in lower_normal_function(self, fndesc)
225 # Init argument values
226 self.extract_function_arguments()
--> 227 entry_block_tail = self.lower_function_body()
228
229 # Close tail of entry block
/anaconda3/lib/python3.7/site-packages/numba/core/lowering.py in lower_function_body(self)
251 bb = self.blkmap[offset]
252 self.builder.position_at_end(bb)
--> 253 self.lower_block(block)
254 self.post_lower()
255 return entry_block_tail
/anaconda3/lib/python3.7/site-packages/numba/core/lowering.py in lower_block(self, block)
265 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
266 loc=self.loc, errcls_=defaulterrcls):
--> 267 self.lower_inst(inst)
268 self.post_block(block)
269
/anaconda3/lib/python3.7/contextlib.py in __exit__(self, type, value, traceback)
128 value = type()
129 try:
--> 130 self.gen.throw(type, value, traceback)
131 except StopIteration as exc:
132 # Suppress StopIteration *unless* it's the same exception that
/anaconda3/lib/python3.7/site-packages/numba/core/errors.py in new_error_context(fmt_, *args, **kwargs)
806 newerr = errcls(e).add_context(_format_msg(fmt_, args, kwargs))
807 tb = sys.exc_info()[2] if numba.core.config.FULL_TRACEBACKS else None
--> 808 reraise(type(newerr), newerr, tb)
809
810
/anaconda3/lib/python3.7/site-packages/numba/core/utils.py in reraise(tp, value, tb)
78 if value.__traceback__ is not tb:
79 raise value.with_traceback(tb)
---> 80 raise value
81
82
LoweringError: Failed in nopython mode pipeline (step: nopython mode backend)
File "<ipython-input-5-10e1bfec8407>", line 10:
def f(x):
<source elided>
for _ in range(2):
val = arr * arr
^
[1] During: lowering "$30binary_multiply.4 = arrayexpr(expr=(<built-in function mul>, [Var(arr_2, <ipython-input-5-10e1bfec8407>:9), Var(arr.2, <ipython-input-5-10e1bfec8407>:9)]), ty=array(float64, 1d, C))" at <ipython-input-5-10e1bfec8407> (10)
|
AssertionError
|
def _arrayexpr_tree_to_ir(
func_ir,
typingctx,
typemap,
calltypes,
equiv_set,
init_block,
expr_out_var,
expr,
parfor_index_tuple_var,
all_parfor_indices,
avail_vars,
):
"""generate IR from array_expr's expr tree recursively. Assign output to
expr_out_var and returns the whole IR as a list of Assign nodes.
"""
el_typ = typemap[expr_out_var.name]
scope = expr_out_var.scope
loc = expr_out_var.loc
out_ir = []
if isinstance(expr, tuple):
op, arr_expr_args = expr
arg_vars = []
for arg in arr_expr_args:
arg_out_var = ir.Var(scope, mk_unique_var("$arg_out_var"), loc)
typemap[arg_out_var.name] = el_typ
out_ir += _arrayexpr_tree_to_ir(
func_ir,
typingctx,
typemap,
calltypes,
equiv_set,
init_block,
arg_out_var,
arg,
parfor_index_tuple_var,
all_parfor_indices,
avail_vars,
)
arg_vars.append(arg_out_var)
if op in npydecl.supported_array_operators:
el_typ1 = typemap[arg_vars[0].name]
if len(arg_vars) == 2:
el_typ2 = typemap[arg_vars[1].name]
func_typ = typingctx.resolve_function_type(op, (el_typ1, el_typ2), {})
ir_expr = ir.Expr.binop(op, arg_vars[0], arg_vars[1], loc)
if op == operator.truediv:
func_typ, ir_expr = _gen_np_divide(
arg_vars[0], arg_vars[1], out_ir, typemap
)
else:
func_typ = typingctx.resolve_function_type(op, (el_typ1,), {})
ir_expr = ir.Expr.unary(op, arg_vars[0], loc)
calltypes[ir_expr] = func_typ
el_typ = func_typ.return_type
out_ir.append(ir.Assign(ir_expr, expr_out_var, loc))
for T in array_analysis.MAP_TYPES:
if isinstance(op, T):
# elif isinstance(op, (np.ufunc, DUFunc)):
# function calls are stored in variables which are not removed
# op is typing_key to the variables type
func_var_name = _find_func_var(typemap, op, avail_vars, loc=loc)
func_var = ir.Var(scope, mk_unique_var(func_var_name), loc)
typemap[func_var.name] = typemap[func_var_name]
func_var_def = copy.deepcopy(func_ir.get_definition(func_var_name))
if (
isinstance(func_var_def, ir.Expr)
and func_var_def.op == "getattr"
and func_var_def.attr == "sqrt"
):
g_math_var = ir.Var(scope, mk_unique_var("$math_g_var"), loc)
typemap[g_math_var.name] = types.misc.Module(math)
g_math = ir.Global("math", math, loc)
g_math_assign = ir.Assign(g_math, g_math_var, loc)
func_var_def = ir.Expr.getattr(g_math_var, "sqrt", loc)
out_ir.append(g_math_assign)
# out_ir.append(func_var_def)
ir_expr = ir.Expr.call(func_var, arg_vars, (), loc)
call_typ = typemap[func_var.name].get_call_type(
typingctx, tuple(typemap[a.name] for a in arg_vars), {}
)
calltypes[ir_expr] = call_typ
el_typ = call_typ.return_type
# signature(el_typ, el_typ)
out_ir.append(ir.Assign(func_var_def, func_var, loc))
out_ir.append(ir.Assign(ir_expr, expr_out_var, loc))
elif isinstance(expr, ir.Var):
var_typ = typemap[expr.name]
if isinstance(var_typ, types.Array):
el_typ = var_typ.dtype
ir_expr = _gen_arrayexpr_getitem(
equiv_set,
expr,
parfor_index_tuple_var,
all_parfor_indices,
el_typ,
calltypes,
typingctx,
typemap,
init_block,
out_ir,
)
else:
# assert typemap[expr.name]==el_typ
el_typ = var_typ
ir_expr = expr
out_ir.append(ir.Assign(ir_expr, expr_out_var, loc))
elif isinstance(expr, ir.Const):
el_typ = typing.Context().resolve_value_type(expr.value)
out_ir.append(ir.Assign(expr, expr_out_var, loc))
if len(out_ir) == 0:
raise errors.UnsupportedRewriteError(
f"Don't know how to translate array expression '{expr:r}'",
loc=expr.loc,
)
typemap.pop(expr_out_var.name, None)
typemap[expr_out_var.name] = el_typ
return out_ir
|
def _arrayexpr_tree_to_ir(
func_ir,
typingctx,
typemap,
calltypes,
equiv_set,
init_block,
expr_out_var,
expr,
parfor_index_tuple_var,
all_parfor_indices,
avail_vars,
):
"""generate IR from array_expr's expr tree recursively. Assign output to
expr_out_var and returns the whole IR as a list of Assign nodes.
"""
el_typ = typemap[expr_out_var.name]
scope = expr_out_var.scope
loc = expr_out_var.loc
out_ir = []
if isinstance(expr, tuple):
op, arr_expr_args = expr
arg_vars = []
for arg in arr_expr_args:
arg_out_var = ir.Var(scope, mk_unique_var("$arg_out_var"), loc)
typemap[arg_out_var.name] = el_typ
out_ir += _arrayexpr_tree_to_ir(
func_ir,
typingctx,
typemap,
calltypes,
equiv_set,
init_block,
arg_out_var,
arg,
parfor_index_tuple_var,
all_parfor_indices,
avail_vars,
)
arg_vars.append(arg_out_var)
if op in npydecl.supported_array_operators:
el_typ1 = typemap[arg_vars[0].name]
if len(arg_vars) == 2:
el_typ2 = typemap[arg_vars[1].name]
func_typ = typingctx.resolve_function_type(op, (el_typ1, el_typ), {})
ir_expr = ir.Expr.binop(op, arg_vars[0], arg_vars[1], loc)
if op == operator.truediv:
func_typ, ir_expr = _gen_np_divide(
arg_vars[0], arg_vars[1], out_ir, typemap
)
else:
func_typ = typingctx.resolve_function_type(op, (el_typ1,), {})
ir_expr = ir.Expr.unary(op, arg_vars[0], loc)
calltypes[ir_expr] = func_typ
el_typ = func_typ.return_type
out_ir.append(ir.Assign(ir_expr, expr_out_var, loc))
for T in array_analysis.MAP_TYPES:
if isinstance(op, T):
# elif isinstance(op, (np.ufunc, DUFunc)):
# function calls are stored in variables which are not removed
# op is typing_key to the variables type
func_var_name = _find_func_var(typemap, op, avail_vars, loc=loc)
func_var = ir.Var(scope, mk_unique_var(func_var_name), loc)
typemap[func_var.name] = typemap[func_var_name]
func_var_def = copy.deepcopy(func_ir.get_definition(func_var_name))
if (
isinstance(func_var_def, ir.Expr)
and func_var_def.op == "getattr"
and func_var_def.attr == "sqrt"
):
g_math_var = ir.Var(scope, mk_unique_var("$math_g_var"), loc)
typemap[g_math_var.name] = types.misc.Module(math)
g_math = ir.Global("math", math, loc)
g_math_assign = ir.Assign(g_math, g_math_var, loc)
func_var_def = ir.Expr.getattr(g_math_var, "sqrt", loc)
out_ir.append(g_math_assign)
# out_ir.append(func_var_def)
ir_expr = ir.Expr.call(func_var, arg_vars, (), loc)
call_typ = typemap[func_var.name].get_call_type(
typingctx, tuple(typemap[a.name] for a in arg_vars), {}
)
calltypes[ir_expr] = call_typ
el_typ = call_typ.return_type
# signature(el_typ, el_typ)
out_ir.append(ir.Assign(func_var_def, func_var, loc))
out_ir.append(ir.Assign(ir_expr, expr_out_var, loc))
elif isinstance(expr, ir.Var):
var_typ = typemap[expr.name]
if isinstance(var_typ, types.Array):
el_typ = var_typ.dtype
ir_expr = _gen_arrayexpr_getitem(
equiv_set,
expr,
parfor_index_tuple_var,
all_parfor_indices,
el_typ,
calltypes,
typingctx,
typemap,
init_block,
out_ir,
)
else:
# assert typemap[expr.name]==el_typ
el_typ = var_typ
ir_expr = expr
out_ir.append(ir.Assign(ir_expr, expr_out_var, loc))
elif isinstance(expr, ir.Const):
el_typ = typing.Context().resolve_value_type(expr.value)
out_ir.append(ir.Assign(expr, expr_out_var, loc))
if len(out_ir) == 0:
raise errors.UnsupportedRewriteError(
f"Don't know how to translate array expression '{expr:r}'",
loc=expr.loc,
)
typemap.pop(expr_out_var.name, None)
typemap[expr_out_var.name] = el_typ
return out_ir
|
https://github.com/numba/numba/issues/5575
|
(SDC_MASTER) C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\num
ba>python -W ignore .\example_script.py
DEBUG: typing operator_add: lhs=StringArrayType(), rhs=StringArrayType()
DEBUG: typing operator_add: lhs=StringArrayType(), rhs=bool
DEBUG: typing: str_arr_operator_eq_overload: left, right StringArrayType() bool
Traceback (most recent call last):
File ".\example_script.py", line 98, in <module>
print(foo(a1, a2))
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\dispatcher.py", line 401, in _compile_for_args
error_rewrite(e, 'typing')
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\dispatcher.py", line 344, in error_rewrite
reraise(type(e), e, None)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\utils.py", line 79, in reraise
raise value.with_traceback(tb)
numba.core.errors.TypingError: Failed in nopython mode pipeline (step: convert t
o parfors)
Invalid use of Function(<built-in function eq>) with argument(s) of type(s): (St
ringArrayType(), bool)
Known signatures:
* (bool, bool) -> bool
* (int8, int8) -> bool
* (int16, int16) -> bool
* (int32, int32) -> bool
* (int64, int64) -> bool
* (uint8, uint8) -> bool
* (uint16, uint16) -> bool
* (uint32, uint32) -> bool
* (uint64, uint64) -> bool
* (float32, float32) -> bool
* (float64, float64) -> bool
* (complex64, complex64) -> bool
* (complex128, complex128) -> bool
* parameterized
In definition 0:
All templates rejected with literals.
...
All templates rejected with literals.
In definition 35:
All templates rejected without literals.
This error is usually caused by passing an argument of a type that is unsupported by the named function.
|
numba.core.errors.TypingError
|
def extend(self, iterable):
# Empty iterable, do nothing
if len(iterable) == 0:
return self
if not self._typed:
# Need to get the first element of the iterable to initialise the
# type of the list. FIXME: this may be a problem if the iterable
# can not be sliced.
self._initialise_list(iterable[0])
return _extend(self, iterable)
|
def extend(self, iterable):
if not self._typed:
# Need to get the first element of the iterable to initialise the
# type of the list. FIXME: this may be a problem if the iterable
# can not be sliced.
self._initialise_list(iterable[0])
self.append(iterable[0])
return _extend(self, iterable[1:])
return _extend(self, iterable)
|
https://github.com/numba/numba/issues/5152
|
Traceback (most recent call last):
File "/Users/vhaenel/git/numba/numba/typeinfer.py", line 148, in propagate
constraint(typeinfer)
File "/Users/vhaenel/git/numba/numba/typeinfer.py", line 486, in __call__
self.resolve(typeinfer, typevars, fnty)
File "/Users/vhaenel/git/numba/numba/typeinfer.py", line 506, in resolve
sig = typeinfer.resolve_call(fnty, pos_args, kw_args)
File "/Users/vhaenel/git/numba/numba/typeinfer.py", line 1440, in resolve_call
return self.context.resolve_function_type(fnty, pos_args, kw_args)
File "/Users/vhaenel/git/numba/numba/typing/context.py", line 216, in resolve_function_type
raise last_exception
File "/Users/vhaenel/git/numba/numba/typing/context.py", line 199, in resolve_function_type
res = self._resolve_user_function_type(func, args, kws)
File "/Users/vhaenel/git/numba/numba/typing/context.py", line 251, in _resolve_user_function_type
return func.get_call_type(self, args, kws)
File "/Users/vhaenel/git/numba/numba/types/functions.py", line 217, in get_call_type
out = template.apply(args, kws)
File "/Users/vhaenel/git/numba/numba/typing/templates.py", line 245, in apply
sig = generic(args, kws)
File "/Users/vhaenel/git/numba/numba/typing/templates.py", line 806, in generic
sig = self._get_signature(self.context, fnty, args, kws)
File "/Users/vhaenel/git/numba/numba/typing/templates.py", line 757, in _get_signature
sig = fnty.get_call_type(typingctx, args, kws)
File "/Users/vhaenel/git/numba/numba/types/functions.py", line 150, in get_call_type
failures.raise_error()
File "/Users/vhaenel/git/numba/numba/types/functions.py", line 79, in raise_error
raise errors.TypingError(self.format())
numba.errors.TypingError: Invalid use of Function(<function impl_extend at 0x1152bddd0>) with argument(s) of type(s): (ListType[int64], Tuple())
* parameterized
In definition 0:
TypingError: extend argument must be iterable
raised from /Users/vhaenel/git/numba/numba/listobject.py:880
In definition 1:
TypingError: extend argument must be iterable
raised from /Users/vhaenel/git/numba/numba/listobject.py:880
This error is usually caused by passing an argument of a type that is unsupported by the named function.
[1] During: resolving callee type: BoundFunction((<class 'numba.types.containers.ListType'>, 'extend') for ListType[int64])
[2] During: typing of call at /Users/vhaenel/git/numba/numba/typed/typedlist.py (82)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "foo/foo107.py", line 4, in <module>
l.extend((1,))
File "/Users/vhaenel/git/numba/numba/typed/typedlist.py", line 301, in extend
return _extend(self, iterable[1:])
File "/Users/vhaenel/git/numba/numba/dispatcher.py", line 401, in _compile_for_args
error_rewrite(e, 'typing')
File "/Users/vhaenel/git/numba/numba/dispatcher.py", line 342, in error_rewrite
raise e
File "/Users/vhaenel/git/numba/numba/dispatcher.py", line 353, in _compile_for_args
return self.compile(tuple(argtypes))
File "/Users/vhaenel/git/numba/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "/Users/vhaenel/git/numba/numba/dispatcher.py", line 768, in compile
cres = self._compiler.compile(args, return_type)
File "/Users/vhaenel/git/numba/numba/dispatcher.py", line 81, in compile
raise retval
File "/Users/vhaenel/git/numba/numba/dispatcher.py", line 91, in _compile_cached
retval = self._compile_core(args, return_type)
File "/Users/vhaenel/git/numba/numba/dispatcher.py", line 109, in _compile_core
pipeline_class=self.pipeline_class)
File "/Users/vhaenel/git/numba/numba/compiler.py", line 551, in compile_extra
return pipeline.compile_extra(func)
File "/Users/vhaenel/git/numba/numba/compiler.py", line 331, in compile_extra
return self._compile_bytecode()
File "/Users/vhaenel/git/numba/numba/compiler.py", line 393, in _compile_bytecode
return self._compile_core()
File "/Users/vhaenel/git/numba/numba/compiler.py", line 373, in _compile_core
raise e
File "/Users/vhaenel/git/numba/numba/compiler.py", line 364, in _compile_core
pm.run(self.state)
File "/Users/vhaenel/git/numba/numba/compiler_machinery.py", line 347, in run
raise patched_exception
File "/Users/vhaenel/git/numba/numba/compiler_machinery.py", line 338, in run
self._runPass(idx, pass_inst, state)
File "/Users/vhaenel/git/numba/numba/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "/Users/vhaenel/git/numba/numba/compiler_machinery.py", line 302, in _runPass
mutated |= check(pss.run_pass, internal_state)
File "/Users/vhaenel/git/numba/numba/compiler_machinery.py", line 275, in check
mangled = func(compiler_state)
File "/Users/vhaenel/git/numba/numba/typed_passes.py", line 95, in run_pass
raise_errors=self._raise_errors)
File "/Users/vhaenel/git/numba/numba/typed_passes.py", line 67, in type_inference_stage
infer.propagate(raise_errors=raise_errors)
File "/Users/vhaenel/git/numba/numba/typeinfer.py", line 985, in propagate
raise errors[0]
numba.errors.TypingError: Failed in nopython mode pipeline (step: nopython frontend)
Invalid use of Function(<function impl_extend at 0x1152bddd0>) with argument(s) of type(s): (ListType[int64], Tuple())
* parameterized
In definition 0:
TypingError: extend argument must be iterable
raised from /Users/vhaenel/git/numba/numba/listobject.py:880
In definition 1:
TypingError: extend argument must be iterable
raised from /Users/vhaenel/git/numba/numba/listobject.py:880
This error is usually caused by passing an argument of a type that is unsupported by the named function.
[1] During: resolving callee type: BoundFunction((<class 'numba.types.containers.ListType'>, 'extend') for ListType[int64])
[2] During: typing of call at /Users/vhaenel/git/numba/numba/typed/typedlist.py (82)
File "numba/typed/typedlist.py", line 82:
def _extend(l, iterable):
return l.extend(iterable)
|
numba.errors.TypingError
|
def generic(self, args, kws):
# Redirect resolution to __init__
instance_type = self.key.instance_type
ctor = instance_type.jitmethods["__init__"]
boundargs = (instance_type.get_reference_type(),) + args
disp_type = types.Dispatcher(ctor)
sig = disp_type.get_call_type(self.context, boundargs, kws)
if not isinstance(sig.return_type, types.NoneType):
raise TypeError(f"__init__() should return None, not '{sig.return_type}'")
# Actual constructor returns an instance value (not None)
out = templates.signature(instance_type, *sig.args[1:])
return out
|
def generic(self, args, kws):
# Redirect resolution to __init__
instance_type = self.key.instance_type
ctor = instance_type.jitmethods["__init__"]
boundargs = (instance_type.get_reference_type(),) + args
disp_type = types.Dispatcher(ctor)
sig = disp_type.get_call_type(self.context, boundargs, kws)
# Actual constructor returns an instance value (not None)
out = templates.signature(instance_type, *sig.args[1:])
return out
|
https://github.com/numba/numba/issues/4985
|
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
c:\program files\python36-64\lib\site-packages\numba\errors.py in new_error_context(fmt_, *args, **kwargs)
716 try:
--> 717 yield
718 except NumbaError as e:
c:\program files\python36-64\lib\site-packages\numba\lowering.py in lower_block(self, block)
259 loc=self.loc, errcls_=defaulterrcls):
--> 260 self.lower_inst(inst)
261
c:\program files\python36-64\lib\site-packages\numba\lowering.py in lower_inst(self, inst)
302 ty = self.typeof(inst.target.name)
--> 303 val = self.lower_assign(ty, inst)
304 self.storevar(val, inst.target.name)
c:\program files\python36-64\lib\site-packages\numba\lowering.py in lower_assign(self, ty, inst)
464 elif isinstance(value, ir.Expr):
--> 465 return self.lower_expr(ty, value)
466
c:\program files\python36-64\lib\site-packages\numba\lowering.py in lower_expr(self, resty, expr)
926 elif expr.op == 'call':
--> 927 res = self.lower_call(resty, expr)
928 return res
c:\program files\python36-64\lib\site-packages\numba\lowering.py in lower_call(self, resty, expr)
718 else:
--> 719 res = self._lower_call_normal(fnty, expr, signature)
720
c:\program files\python36-64\lib\site-packages\numba\lowering.py in _lower_call_normal(self, fnty, expr, signature)
897
--> 898 res = impl(self.builder, argvals, self.loc)
899 return res
c:\program files\python36-64\lib\site-packages\numba\targets\base.py in __call__(self, builder, args, loc)
1131 def __call__(self, builder, args, loc=None):
-> 1132 res = self._imp(self._context, builder, self._sig, args, loc=loc)
1133 self._context.add_linking_libs(getattr(self, 'libs', ()))
c:\program files\python36-64\lib\site-packages\numba\targets\base.py in wrapper(*args, **kwargs)
1156 kwargs.pop('loc') # drop unused loc
-> 1157 return fn(*args, **kwargs)
1158
c:\program files\python36-64\lib\site-packages\numba\jitclass\base.py in ctor_impl(context, builder, sig, args)
522 realargs = [inst_struct._getvalue()] + list(args)
--> 523 call(builder, realargs)
524
c:\program files\python36-64\lib\site-packages\numba\targets\base.py in __call__(self, builder, args, loc)
1131 def __call__(self, builder, args, loc=None):
-> 1132 res = self._imp(self._context, builder, self._sig, args, loc=loc)
1133 self._context.add_linking_libs(getattr(self, 'libs', ()))
c:\program files\python36-64\lib\site-packages\numba\targets\base.py in wrapper(*args, **kwargs)
1156 kwargs.pop('loc') # drop unused loc
-> 1157 return fn(*args, **kwargs)
1158
c:\program files\python36-64\lib\site-packages\numba\targets\imputils.py in imp(context, builder, sig, args)
195 context.call_conv.return_status_propagate(builder, status)
--> 196 assert sig.return_type == fndesc.restype
197 # Reconstruct optional return type
AssertionError:
During handling of the above exception, another exception occurred:
LoweringError Traceback (most recent call last)
<ipython-input-60-3e6844f03bc2> in <module>
2 start=np.zeros(2,dtype=np.int32)
3 end=np.zeros(2,dtype=np.int32)+2
----> 4 LineVectorIntegral(start,end,fields)
5
c:\program files\python36-64\lib\site-packages\numba\jitclass\base.py in __call__(cls, *args, **kwargs)
124 bind = cls._ctor_sig.bind(None, *args, **kwargs)
125 bind.apply_defaults()
--> 126 return cls._ctor(*bind.args[1:], **bind.kwargs)
127
128
c:\program files\python36-64\lib\site-packages\numba\dispatcher.py in _compile_for_args(self, *args, **kws)
418 e.patch_message('\n'.join((str(e).rstrip(), help_msg)))
419 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 420 raise e
421
422 def inspect_llvm(self, signature=None):
c:\program files\python36-64\lib\site-packages\numba\dispatcher.py in _compile_for_args(self, *args, **kws)
351 argtypes.append(self.typeof_pyval(a))
352 try:
--> 353 return self.compile(tuple(argtypes))
354 except errors.ForceLiteralArg as e:
355 # Received request for compiler re-entry with the list of arguments
c:\program files\python36-64\lib\site-packages\numba\compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
c:\program files\python36-64\lib\site-packages\numba\dispatcher.py in compile(self, sig)
766 self._cache_misses[sig] += 1
767 try:
--> 768 cres = self._compiler.compile(args, return_type)
769 except errors.ForceLiteralArg as e:
770 def folded(args, kws):
c:\program files\python36-64\lib\site-packages\numba\dispatcher.py in compile(self, args, return_type)
75
76 def compile(self, args, return_type):
---> 77 status, retval = self._compile_cached(args, return_type)
78 if status:
79 return retval
c:\program files\python36-64\lib\site-packages\numba\dispatcher.py in _compile_cached(self, args, return_type)
89
90 try:
---> 91 retval = self._compile_core(args, return_type)
92 except errors.TypingError as e:
93 self._failed_cache[key] = e
c:\program files\python36-64\lib\site-packages\numba\dispatcher.py in _compile_core(self, args, return_type)
107 args=args, return_type=return_type,
108 flags=flags, locals=self.locals,
--> 109 pipeline_class=self.pipeline_class)
110 # Check typing error if object mode is used
111 if cres.typing_error is not None and not flags.enable_pyobject:
c:\program files\python36-64\lib\site-packages\numba\compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library, pipeline_class)
526 pipeline = pipeline_class(typingctx, targetctx, library,
527 args, return_type, flags, locals)
--> 528 return pipeline.compile_extra(func)
529
530
c:\program files\python36-64\lib\site-packages\numba\compiler.py in compile_extra(self, func)
324 self.state.lifted = ()
325 self.state.lifted_from = None
--> 326 return self._compile_bytecode()
327
328 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
c:\program files\python36-64\lib\site-packages\numba\compiler.py in _compile_bytecode(self)
383 """
384 assert self.state.func_ir is None
--> 385 return self._compile_core()
386
387 def _compile_ir(self):
c:\program files\python36-64\lib\site-packages\numba\compiler.py in _compile_core(self)
363 self.state.status.fail_reason = e
364 if is_final_pipeline:
--> 365 raise e
366 else:
367 raise CompilerError("All available pipelines exhausted")
c:\program files\python36-64\lib\site-packages\numba\compiler.py in _compile_core(self)
354 res = None
355 try:
--> 356 pm.run(self.state)
357 if self.state.cr is not None:
358 break
c:\program files\python36-64\lib\site-packages\numba\compiler_machinery.py in run(self, state)
326 (self.pipeline_name, pass_desc)
327 patched_exception = self._patch_error(msg, e)
--> 328 raise patched_exception
329
330 def dependency_analysis(self):
c:\program files\python36-64\lib\site-packages\numba\compiler_machinery.py in run(self, state)
317 pass_inst = _pass_registry.get(pss).pass_inst
318 if isinstance(pass_inst, CompilerPass):
--> 319 self._runPass(idx, pass_inst, state)
320 else:
321 raise BaseException("Legacy pass in use")
c:\program files\python36-64\lib\site-packages\numba\compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
c:\program files\python36-64\lib\site-packages\numba\compiler_machinery.py in _runPass(self, index, pss, internal_state)
279 mutated |= check(pss.run_initialization, internal_state)
280 with SimpleTimer() as pass_time:
--> 281 mutated |= check(pss.run_pass, internal_state)
282 with SimpleTimer() as finalize_time:
283 mutated |= check(pss.run_finalizer, internal_state)
c:\program files\python36-64\lib\site-packages\numba\compiler_machinery.py in check(func, compiler_state)
266
267 def check(func, compiler_state):
--> 268 mangled = func(compiler_state)
269 if mangled not in (True, False):
270 msg = ("CompilerPass implementations should return True/False. "
c:\program files\python36-64\lib\site-packages\numba\typed_passes.py in run_pass(self, state)
378 state.library.enable_object_caching()
379
--> 380 NativeLowering().run_pass(state) # TODO: Pull this out into the pipeline
381 lowered = state['cr']
382 signature = typing.signature(state.return_type, *state.args)
c:\program files\python36-64\lib\site-packages\numba\typed_passes.py in run_pass(self, state)
323 lower = lowering.Lower(targetctx, library, fndesc, interp,
324 metadata=metadata)
--> 325 lower.lower()
326 if not flags.no_cpython_wrapper:
327 lower.create_cpython_wrapper(flags.release_gil)
c:\program files\python36-64\lib\site-packages\numba\lowering.py in lower(self)
177 if self.generator_info is None:
178 self.genlower = None
--> 179 self.lower_normal_function(self.fndesc)
180 else:
181 self.genlower = self.GeneratorLower(self)
c:\program files\python36-64\lib\site-packages\numba\lowering.py in lower_normal_function(self, fndesc)
218 # Init argument values
219 self.extract_function_arguments()
--> 220 entry_block_tail = self.lower_function_body()
221
222 # Close tail of entry block
c:\program files\python36-64\lib\site-packages\numba\lowering.py in lower_function_body(self)
243 bb = self.blkmap[offset]
244 self.builder.position_at_end(bb)
--> 245 self.lower_block(block)
246
247 self.post_lower()
c:\program files\python36-64\lib\site-packages\numba\lowering.py in lower_block(self, block)
258 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
259 loc=self.loc, errcls_=defaulterrcls):
--> 260 self.lower_inst(inst)
261
262 def create_cpython_wrapper(self, release_gil=False):
c:\program files\python36-64\lib\contextlib.py in __exit__(self, type, value, traceback)
97 value = type()
98 try:
---> 99 self.gen.throw(type, value, traceback)
100 except StopIteration as exc:
101 # Suppress StopIteration *unless* it's the same exception that
c:\program files\python36-64\lib\site-packages\numba\errors.py in new_error_context(fmt_, *args, **kwargs)
723 from numba import config
724 tb = sys.exc_info()[2] if config.FULL_TRACEBACKS else None
--> 725 six.reraise(type(newerr), newerr, tb)
726
727
c:\program files\python36-64\lib\site-packages\numba\six.py in reraise(tp, value, tb)
667 if value.__traceback__ is not tb:
668 raise value.with_traceback(tb)
--> 669 raise value
670
671 else:
LoweringError: Failed in nopython mode pipeline (step: nopython mode backend)
File "<string>", line 3:
<source missing, REPL/exec in use?>
[1] During: lowering "$0.5 = call $0.1(start, end, fields, func=$0.1, args=[Var(start, <string> (3)), Var(end, <string> (3)), Var(fields, <string> (3))], kws=(), vararg=None)" at <string> (3)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
You are currently using Numba version 0.46.0.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
|
AssertionError
|
def match(self, func_ir, block, typemap, calltypes):
"""
Detect all getitem expressions and find which ones have
string literal indexes
"""
self.getitems = getitems = {}
self.block = block
self.calltypes = calltypes
for expr in block.find_exprs(op="getitem"):
if expr.op == "getitem":
index_ty = typemap[expr.index.name]
if isinstance(index_ty, types.StringLiteral):
getitems[expr] = (expr.index, index_ty.literal_value)
return len(getitems) > 0
|
def match(self, func_ir, block, typemap, calltypes):
"""
Detect all getitem expressions and find which ones have
string literal indexes
"""
self.getitems = getitems = {}
self.block = block
for expr in block.find_exprs(op="getitem"):
if expr.op == "getitem":
index_ty = typemap[expr.index.name]
if isinstance(index_ty, types.StringLiteral):
getitems[expr] = (expr.index, index_ty.literal_value)
return len(getitems) > 0
|
https://github.com/numba/numba/issues/5565
|
Traceback (most recent call last):
File "c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\numba\core\errors.py", line 801, in new_error_context
yield
File "c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\numba\core\lowering.py", line 267, in lower_block
self.lower_inst(inst)
File "c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\numba\core\lowering.py", line 364, in lower_inst
val = self.lower_assign(ty, inst)
File "c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\numba\core\lowering.py", line 538, in lower_assign
return self.lower_expr(ty, value)
File "c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\numba\core\lowering.py", line 1183, in lower_expr
signature = self.fndesc.calltypes[expr]
KeyError: static_getitem(value=$8call_method.3, index=C, index_var=col_name)
|
KeyError
|
def apply(self):
"""
Rewrite all matching getitems as static_getitems where the index
is the literal value of the string.
"""
new_block = self.block.copy()
new_block.clear()
for inst in self.block.body:
if isinstance(inst, ir.Assign):
expr = inst.value
if expr in self.getitems:
const, lit_val = self.getitems[expr]
new_expr = ir.Expr.static_getitem(
value=expr.value, index=lit_val, index_var=expr.index, loc=expr.loc
)
self.calltypes[new_expr] = self.calltypes[expr]
inst = ir.Assign(value=new_expr, target=inst.target, loc=inst.loc)
new_block.append(inst)
return new_block
|
def apply(self):
"""
Rewrite all matching getitems as static_getitems where the index
is the literal value of the string.
"""
new_block = self.block.copy()
new_block.clear()
for inst in self.block.body:
if isinstance(inst, ir.Assign):
expr = inst.value
if expr in self.getitems:
const, lit_val = self.getitems[expr]
new_expr = ir.Expr.static_getitem(
value=expr.value, index=lit_val, index_var=expr.index, loc=expr.loc
)
inst = ir.Assign(value=new_expr, target=inst.target, loc=inst.loc)
new_block.append(inst)
return new_block
|
https://github.com/numba/numba/issues/5565
|
Traceback (most recent call last):
File "c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\numba\core\errors.py", line 801, in new_error_context
yield
File "c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\numba\core\lowering.py", line 267, in lower_block
self.lower_inst(inst)
File "c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\numba\core\lowering.py", line 364, in lower_inst
val = self.lower_assign(ty, inst)
File "c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\numba\core\lowering.py", line 538, in lower_assign
return self.lower_expr(ty, value)
File "c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\numba\core\lowering.py", line 1183, in lower_expr
signature = self.fndesc.calltypes[expr]
KeyError: static_getitem(value=$8call_method.3, index=C, index_var=col_name)
|
KeyError
|
def call(cls, func, args, kws, loc, vararg=None):
assert isinstance(func, Var)
assert isinstance(loc, Loc)
op = "call"
return cls(op=op, loc=loc, func=func, args=args, kws=kws, vararg=vararg)
|
def call(cls, func, args, kws, loc, vararg=None):
assert isinstance(func, (Var, Intrinsic))
assert isinstance(loc, Loc)
op = "call"
return cls(op=op, loc=loc, func=func, args=args, kws=kws, vararg=vararg)
|
https://github.com/numba/numba/issues/5408
|
======================================================================
FAIL: test_set_registers_57 (numba.cuda.tests.cudadrv.test_linker.TestLinker)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/devices.py", line 224, in _require_cuda_context
return fn(*args, **kws)
File "/home/gmarkall/numbadev/numba/numba/cuda/tests/cudadrv/test_linker.py", line 92, in test_set_registers_57
self.assertEquals(57, compiled._func.get().attrs.regs)
AssertionError: 57 != 56
|
AssertionError
|
def lower_call(self, resty, expr):
signature = self.fndesc.calltypes[expr]
self.debug_print("# lower_call: expr = {0}".format(expr))
if isinstance(signature.return_type, types.Phantom):
return self.context.get_dummy_value()
fnty = self.typeof(expr.func.name)
if isinstance(fnty, types.ObjModeDispatcher):
res = self._lower_call_ObjModeDispatcher(fnty, expr, signature)
elif isinstance(fnty, types.ExternalFunction):
res = self._lower_call_ExternalFunction(fnty, expr, signature)
elif isinstance(fnty, types.ExternalFunctionPointer):
res = self._lower_call_ExternalFunctionPointer(fnty, expr, signature)
elif isinstance(fnty, types.RecursiveCall):
res = self._lower_call_RecursiveCall(fnty, expr, signature)
elif isinstance(fnty, types.FunctionType):
res = self._lower_call_FunctionType(fnty, expr, signature)
else:
res = self._lower_call_normal(fnty, expr, signature)
# If lowering the call returned None, interpret that as returning dummy
# value if the return type of the function is void, otherwise there is
# a problem
if res is None:
if signature.return_type == types.void:
res = self.context.get_dummy_value()
else:
raise LoweringError(
msg="non-void function returns None from implementation", loc=self.loc
)
return self.context.cast(self.builder, res, signature.return_type, resty)
|
def lower_call(self, resty, expr):
signature = self.fndesc.calltypes[expr]
self.debug_print("# lower_call: expr = {0}".format(expr))
if isinstance(signature.return_type, types.Phantom):
return self.context.get_dummy_value()
if isinstance(expr.func, ir.Intrinsic):
fnty = expr.func.name
else:
fnty = self.typeof(expr.func.name)
if isinstance(fnty, types.ObjModeDispatcher):
res = self._lower_call_ObjModeDispatcher(fnty, expr, signature)
elif isinstance(fnty, types.ExternalFunction):
res = self._lower_call_ExternalFunction(fnty, expr, signature)
elif isinstance(fnty, types.ExternalFunctionPointer):
res = self._lower_call_ExternalFunctionPointer(fnty, expr, signature)
elif isinstance(fnty, types.RecursiveCall):
res = self._lower_call_RecursiveCall(fnty, expr, signature)
elif isinstance(fnty, types.FunctionType):
res = self._lower_call_FunctionType(fnty, expr, signature)
else:
res = self._lower_call_normal(fnty, expr, signature)
# If lowering the call returned None, interpret that as returning dummy
# value if the return type of the function is void, otherwise there is
# a problem
if res is None:
if signature.return_type == types.void:
res = self.context.get_dummy_value()
else:
raise LoweringError(
msg="non-void function returns None from implementation", loc=self.loc
)
return self.context.cast(self.builder, res, signature.return_type, resty)
|
https://github.com/numba/numba/issues/5408
|
======================================================================
FAIL: test_set_registers_57 (numba.cuda.tests.cudadrv.test_linker.TestLinker)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/devices.py", line 224, in _require_cuda_context
return fn(*args, **kws)
File "/home/gmarkall/numbadev/numba/numba/cuda/tests/cudadrv/test_linker.py", line 92, in test_set_registers_57
self.assertEquals(57, compiled._func.get().attrs.regs)
AssertionError: 57 != 56
|
AssertionError
|
def _lower_call_normal(self, fnty, expr, signature):
# Normal function resolution
self.debug_print("# calling normal function: {0}".format(fnty))
self.debug_print("# signature: {0}".format(signature))
if isinstance(fnty, types.ObjModeDispatcher):
argvals = expr.func.args
else:
argvals = self.fold_call_args(
fnty,
signature,
expr.args,
expr.vararg,
expr.kws,
)
impl = self.context.get_function(fnty, signature)
if signature.recvr:
# The "self" object is passed as the function object
# for bounded function
the_self = self.loadvar(expr.func.name)
# Prepend the self reference
argvals = [the_self] + list(argvals)
res = impl(self.builder, argvals, self.loc)
return res
|
def _lower_call_normal(self, fnty, expr, signature):
# Normal function resolution
self.debug_print("# calling normal function: {0}".format(fnty))
self.debug_print("# signature: {0}".format(signature))
if isinstance(expr.func, ir.Intrinsic) or isinstance(fnty, types.ObjModeDispatcher):
argvals = expr.func.args
else:
argvals = self.fold_call_args(
fnty,
signature,
expr.args,
expr.vararg,
expr.kws,
)
impl = self.context.get_function(fnty, signature)
if signature.recvr:
# The "self" object is passed as the function object
# for bounded function
the_self = self.loadvar(expr.func.name)
# Prepend the self reference
argvals = [the_self] + list(argvals)
res = impl(self.builder, argvals, self.loc)
return res
|
https://github.com/numba/numba/issues/5408
|
======================================================================
FAIL: test_set_registers_57 (numba.cuda.tests.cudadrv.test_linker.TestLinker)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/devices.py", line 224, in _require_cuda_context
return fn(*args, **kws)
File "/home/gmarkall/numbadev/numba/numba/cuda/tests/cudadrv/test_linker.py", line 92, in test_set_registers_57
self.assertEquals(57, compiled._func.get().attrs.regs)
AssertionError: 57 != 56
|
AssertionError
|
def typeof_expr(self, inst, target, expr):
if expr.op == "call":
self.typeof_call(inst, target, expr)
elif expr.op in ("getiter", "iternext"):
self.typeof_intrinsic_call(inst, target, expr.op, expr.value)
elif expr.op == "exhaust_iter":
constraint = ExhaustIterConstraint(
target.name, count=expr.count, iterator=expr.value, loc=expr.loc
)
self.constraints.append(constraint)
elif expr.op == "pair_first":
constraint = PairFirstConstraint(target.name, pair=expr.value, loc=expr.loc)
self.constraints.append(constraint)
elif expr.op == "pair_second":
constraint = PairSecondConstraint(target.name, pair=expr.value, loc=expr.loc)
self.constraints.append(constraint)
elif expr.op == "binop":
self.typeof_intrinsic_call(inst, target, expr.fn, expr.lhs, expr.rhs)
elif expr.op == "inplace_binop":
self.typeof_intrinsic_call(inst, target, expr.fn, expr.lhs, expr.rhs)
elif expr.op == "unary":
self.typeof_intrinsic_call(inst, target, expr.fn, expr.value)
elif expr.op == "static_getitem":
constraint = StaticGetItemConstraint(
target.name,
value=expr.value,
index=expr.index,
index_var=expr.index_var,
loc=expr.loc,
)
self.constraints.append(constraint)
self.calls.append((inst.value, constraint))
elif expr.op == "getitem":
self.typeof_intrinsic_call(
inst,
target,
operator.getitem,
expr.value,
expr.index,
)
elif expr.op == "typed_getitem":
constraint = TypedGetItemConstraint(
target.name,
value=expr.value,
dtype=expr.dtype,
index=expr.index,
loc=expr.loc,
)
self.constraints.append(constraint)
self.calls.append((inst.value, constraint))
elif expr.op == "getattr":
constraint = GetAttrConstraint(
target.name, attr=expr.attr, value=expr.value, loc=inst.loc, inst=inst
)
self.constraints.append(constraint)
elif expr.op == "build_tuple":
constraint = BuildTupleConstraint(target.name, items=expr.items, loc=inst.loc)
self.constraints.append(constraint)
elif expr.op == "build_list":
constraint = BuildListConstraint(target.name, items=expr.items, loc=inst.loc)
self.constraints.append(constraint)
elif expr.op == "build_set":
constraint = BuildSetConstraint(target.name, items=expr.items, loc=inst.loc)
self.constraints.append(constraint)
elif expr.op == "build_map":
constraint = BuildMapConstraint(
target.name,
items=expr.items,
special_value=expr.literal_value,
value_indexes=expr.value_indexes,
loc=inst.loc,
)
self.constraints.append(constraint)
elif expr.op == "cast":
self.constraints.append(
Propagate(dst=target.name, src=expr.value.name, loc=inst.loc)
)
elif expr.op == "phi":
for iv in expr.incoming_values:
if iv is not ir.UNDEFINED:
self.constraints.append(
Propagate(dst=target.name, src=iv.name, loc=inst.loc)
)
elif expr.op == "make_function":
self.lock_type(
target.name,
types.MakeFunctionLiteral(expr),
loc=inst.loc,
literal_value=expr,
)
else:
msg = "Unsupported op-code encountered: %s" % expr
raise UnsupportedError(msg, loc=inst.loc)
|
def typeof_expr(self, inst, target, expr):
if expr.op == "call":
if isinstance(expr.func, ir.Intrinsic):
sig = expr.func.type
self.add_type(target.name, sig.return_type, loc=inst.loc)
self.add_calltype(expr, sig)
else:
self.typeof_call(inst, target, expr)
elif expr.op in ("getiter", "iternext"):
self.typeof_intrinsic_call(inst, target, expr.op, expr.value)
elif expr.op == "exhaust_iter":
constraint = ExhaustIterConstraint(
target.name, count=expr.count, iterator=expr.value, loc=expr.loc
)
self.constraints.append(constraint)
elif expr.op == "pair_first":
constraint = PairFirstConstraint(target.name, pair=expr.value, loc=expr.loc)
self.constraints.append(constraint)
elif expr.op == "pair_second":
constraint = PairSecondConstraint(target.name, pair=expr.value, loc=expr.loc)
self.constraints.append(constraint)
elif expr.op == "binop":
self.typeof_intrinsic_call(inst, target, expr.fn, expr.lhs, expr.rhs)
elif expr.op == "inplace_binop":
self.typeof_intrinsic_call(inst, target, expr.fn, expr.lhs, expr.rhs)
elif expr.op == "unary":
self.typeof_intrinsic_call(inst, target, expr.fn, expr.value)
elif expr.op == "static_getitem":
constraint = StaticGetItemConstraint(
target.name,
value=expr.value,
index=expr.index,
index_var=expr.index_var,
loc=expr.loc,
)
self.constraints.append(constraint)
self.calls.append((inst.value, constraint))
elif expr.op == "getitem":
self.typeof_intrinsic_call(
inst,
target,
operator.getitem,
expr.value,
expr.index,
)
elif expr.op == "typed_getitem":
constraint = TypedGetItemConstraint(
target.name,
value=expr.value,
dtype=expr.dtype,
index=expr.index,
loc=expr.loc,
)
self.constraints.append(constraint)
self.calls.append((inst.value, constraint))
elif expr.op == "getattr":
constraint = GetAttrConstraint(
target.name, attr=expr.attr, value=expr.value, loc=inst.loc, inst=inst
)
self.constraints.append(constraint)
elif expr.op == "build_tuple":
constraint = BuildTupleConstraint(target.name, items=expr.items, loc=inst.loc)
self.constraints.append(constraint)
elif expr.op == "build_list":
constraint = BuildListConstraint(target.name, items=expr.items, loc=inst.loc)
self.constraints.append(constraint)
elif expr.op == "build_set":
constraint = BuildSetConstraint(target.name, items=expr.items, loc=inst.loc)
self.constraints.append(constraint)
elif expr.op == "build_map":
constraint = BuildMapConstraint(
target.name,
items=expr.items,
special_value=expr.literal_value,
value_indexes=expr.value_indexes,
loc=inst.loc,
)
self.constraints.append(constraint)
elif expr.op == "cast":
self.constraints.append(
Propagate(dst=target.name, src=expr.value.name, loc=inst.loc)
)
elif expr.op == "phi":
for iv in expr.incoming_values:
if iv is not ir.UNDEFINED:
self.constraints.append(
Propagate(dst=target.name, src=iv.name, loc=inst.loc)
)
elif expr.op == "make_function":
self.lock_type(
target.name,
types.MakeFunctionLiteral(expr),
loc=inst.loc,
literal_value=expr,
)
else:
msg = "Unsupported op-code encountered: %s" % expr
raise UnsupportedError(msg, loc=inst.loc)
|
https://github.com/numba/numba/issues/5408
|
======================================================================
FAIL: test_set_registers_57 (numba.cuda.tests.cudadrv.test_linker.TestLinker)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/devices.py", line 224, in _require_cuda_context
return fn(*args, **kws)
File "/home/gmarkall/numbadev/numba/numba/cuda/tests/cudadrv/test_linker.py", line 92, in test_set_registers_57
self.assertEquals(57, compiled._func.get().attrs.regs)
AssertionError: 57 != 56
|
AssertionError
|
def resolve_array(self, mod):
return types.Function(Hsa_shared_array)
|
def resolve_array(self, mod):
return types.Macro(Hsa_shared_array)
|
https://github.com/numba/numba/issues/5408
|
======================================================================
FAIL: test_set_registers_57 (numba.cuda.tests.cudadrv.test_linker.TestLinker)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/gmarkall/numbadev/numba/numba/cuda/cudadrv/devices.py", line 224, in _require_cuda_context
return fn(*args, **kws)
File "/home/gmarkall/numbadev/numba/numba/cuda/tests/cudadrv/test_linker.py", line 92, in test_set_registers_57
self.assertEquals(57, compiled._func.get().attrs.regs)
AssertionError: 57 != 56
|
AssertionError
|
def process_environ(self, environ):
def _readenv(name, ctor, default):
value = environ.get(name)
if value is None:
return default() if callable(default) else default
try:
return ctor(value)
except Exception:
warnings.warn(
"environ %s defined but failed to parse '%s'" % (name, value),
RuntimeWarning,
)
return default
def optional_str(x):
return str(x) if x is not None else None
# developer mode produces full tracebacks, disables help instructions
DEVELOPER_MODE = _readenv("NUMBA_DEVELOPER_MODE", int, 0)
# disable performance warnings, will switch of the generation of
# warnings of the class NumbaPerformanceWarning
DISABLE_PERFORMANCE_WARNINGS = _readenv(
"NUMBA_DISABLE_PERFORMANCE_WARNINGS", int, 0
)
# Flag to enable full exception reporting
FULL_TRACEBACKS = _readenv("NUMBA_FULL_TRACEBACKS", int, DEVELOPER_MODE)
# Show help text when an error occurs
SHOW_HELP = _readenv("NUMBA_SHOW_HELP", int, 0)
# The color scheme to use for error messages, default is no color
# just bold fonts in use.
COLOR_SCHEME = _readenv("NUMBA_COLOR_SCHEME", str, "no_color")
# Whether to globally enable bounds checking. The default None means
# to use the value of the flag to @njit. 0 or 1 overrides the flag
# globally.
BOUNDSCHECK = _readenv("NUMBA_BOUNDSCHECK", int, None)
# Whether to always warn about potential uninitialized variables
# because static controlflow analysis cannot find a definition
# in one or more of the incoming paths.
ALWAYS_WARN_UNINIT_VAR = _readenv(
"NUMBA_ALWAYS_WARN_UNINIT_VAR",
int,
0,
)
# Debug flag to control compiler debug print
DEBUG = _readenv("NUMBA_DEBUG", int, 0)
# DEBUG print IR after pass names
DEBUG_PRINT_AFTER = _readenv("NUMBA_DEBUG_PRINT_AFTER", str, "none")
# DEBUG print IR before pass names
DEBUG_PRINT_BEFORE = _readenv("NUMBA_DEBUG_PRINT_BEFORE", str, "none")
# DEBUG print IR before and after pass names
DEBUG_PRINT_WRAP = _readenv("NUMBA_DEBUG_PRINT_WRAP", str, "none")
# Highlighting in intermediate dumps
HIGHLIGHT_DUMPS = _readenv("NUMBA_HIGHLIGHT_DUMPS", int, 0)
# JIT Debug flag to trigger IR instruction print
DEBUG_JIT = _readenv("NUMBA_DEBUG_JIT", int, 0)
# Enable debugging of front-end operation
# (up to and including IR generation)
DEBUG_FRONTEND = _readenv("NUMBA_DEBUG_FRONTEND", int, 0)
# How many recently deserialized functions to retain regardless
# of external references
FUNCTION_CACHE_SIZE = _readenv("NUMBA_FUNCTION_CACHE_SIZE", int, 128)
# Maximum tuple size that parfors will unpack and pass to
# internal gufunc.
PARFOR_MAX_TUPLE_SIZE = _readenv("NUMBA_PARFOR_MAX_TUPLE_SIZE", int, 100)
# Enable logging of cache operation
DEBUG_CACHE = _readenv("NUMBA_DEBUG_CACHE", int, DEBUG)
# Redirect cache directory
# Contains path to the directory
CACHE_DIR = _readenv("NUMBA_CACHE_DIR", str, "")
# Enable tracing support
TRACE = _readenv("NUMBA_TRACE", int, 0)
# Enable debugging of type inference
DEBUG_TYPEINFER = _readenv("NUMBA_DEBUG_TYPEINFER", int, 0)
# Configure compilation target to use the specified CPU name
# and CPU feature as the host information.
# Note: this overrides "host" option for AOT compilation.
CPU_NAME = _readenv("NUMBA_CPU_NAME", optional_str, None)
CPU_FEATURES = _readenv(
"NUMBA_CPU_FEATURES",
optional_str,
("" if str(CPU_NAME).lower() == "generic" else None),
)
# Optimization level
OPT = _readenv("NUMBA_OPT", int, 3)
# Force dump of Python bytecode
DUMP_BYTECODE = _readenv("NUMBA_DUMP_BYTECODE", int, DEBUG_FRONTEND)
# Force dump of control flow graph
DUMP_CFG = _readenv("NUMBA_DUMP_CFG", int, DEBUG_FRONTEND)
# Force dump of Numba IR
DUMP_IR = _readenv("NUMBA_DUMP_IR", int, DEBUG_FRONTEND or DEBUG_TYPEINFER)
# print debug info of analysis and optimization on array operations
DEBUG_ARRAY_OPT = _readenv("NUMBA_DEBUG_ARRAY_OPT", int, 0)
# insert debug stmts to print information at runtime
DEBUG_ARRAY_OPT_RUNTIME = _readenv("NUMBA_DEBUG_ARRAY_OPT_RUNTIME", int, 0)
# print stats about parallel for-loops
DEBUG_ARRAY_OPT_STATS = _readenv("NUMBA_DEBUG_ARRAY_OPT_STATS", int, 0)
# prints user friendly information about parallel
PARALLEL_DIAGNOSTICS = _readenv("NUMBA_PARALLEL_DIAGNOSTICS", int, 0)
# print debug info of inline closure pass
DEBUG_INLINE_CLOSURE = _readenv("NUMBA_DEBUG_INLINE_CLOSURE", int, 0)
# Force dump of LLVM IR
DUMP_LLVM = _readenv("NUMBA_DUMP_LLVM", int, DEBUG)
# Force dump of Function optimized LLVM IR
DUMP_FUNC_OPT = _readenv("NUMBA_DUMP_FUNC_OPT", int, DEBUG)
# Force dump of Optimized LLVM IR
DUMP_OPTIMIZED = _readenv("NUMBA_DUMP_OPTIMIZED", int, DEBUG)
# Force disable loop vectorize
# Loop vectorizer is disabled on 32-bit win32 due to a bug (#649)
LOOP_VECTORIZE = _readenv("NUMBA_LOOP_VECTORIZE", int, not (IS_WIN32 and IS_32BITS))
# Force dump of generated assembly
DUMP_ASSEMBLY = _readenv("NUMBA_DUMP_ASSEMBLY", int, DEBUG)
# Force dump of type annotation
ANNOTATE = _readenv("NUMBA_DUMP_ANNOTATION", int, 0)
# Dump IR in such as way as to aid in "diff"ing.
DIFF_IR = _readenv("NUMBA_DIFF_IR", int, 0)
# Dump type annotation in html format
def fmt_html_path(path):
if path is None:
return path
else:
return os.path.abspath(path)
HTML = _readenv("NUMBA_DUMP_HTML", fmt_html_path, None)
# Allow interpreter fallback so that Numba @jit decorator will never
# fail. Use for migrating from old numba (<0.12) which supported
# closure, and other yet-to-be-supported features.
COMPATIBILITY_MODE = _readenv("NUMBA_COMPATIBILITY_MODE", int, 0)
# x86-64 specific
# Enable AVX on supported platforms where it won't degrade performance.
def avx_default():
if not _os_supports_avx():
return False
else:
# There are various performance issues with AVX and LLVM
# on some CPUs (list at
# http://llvm.org/bugs/buglist.cgi?quicksearch=avx).
# For now we'd rather disable it, since it can pessimize code
cpu_name = ll.get_host_cpu_name()
return cpu_name not in (
"corei7-avx",
"core-avx-i",
"sandybridge",
"ivybridge",
)
ENABLE_AVX = _readenv("NUMBA_ENABLE_AVX", int, avx_default)
# if set and SVML is available, it will be disabled
# By default, it's disabled on 32-bit platforms.
DISABLE_INTEL_SVML = _readenv("NUMBA_DISABLE_INTEL_SVML", int, IS_32BITS)
# Disable jit for debugging
DISABLE_JIT = _readenv("NUMBA_DISABLE_JIT", int, 0)
# choose parallel backend to use
THREADING_LAYER = _readenv("NUMBA_THREADING_LAYER", str, "default")
# CUDA Configs
# Force CUDA compute capability to a specific version
FORCE_CUDA_CC = _readenv("NUMBA_FORCE_CUDA_CC", _parse_cc, None)
# Disable CUDA support
DISABLE_CUDA = _readenv("NUMBA_DISABLE_CUDA", int, int(MACHINE_BITS == 32))
# Enable CUDA simulator
ENABLE_CUDASIM = _readenv("NUMBA_ENABLE_CUDASIM", int, 0)
# CUDA logging level
# Any level name from the *logging* module. Case insensitive.
# Defaults to CRITICAL if not set or invalid.
# Note: This setting only applies when logging is not configured.
# Any existing logging configuration is preserved.
CUDA_LOG_LEVEL = _readenv("NUMBA_CUDA_LOG_LEVEL", str, "")
# Maximum number of pending CUDA deallocations (default: 10)
CUDA_DEALLOCS_COUNT = _readenv("NUMBA_CUDA_MAX_PENDING_DEALLOCS_COUNT", int, 10)
# Maximum ratio of pending CUDA deallocations to capacity (default: 0.2)
CUDA_DEALLOCS_RATIO = _readenv("NUMBA_CUDA_MAX_PENDING_DEALLOCS_RATIO", float, 0.2)
# HSA Configs
# Disable HSA support
DISABLE_HSA = _readenv("NUMBA_DISABLE_HSA", int, 0)
# The default number of threads to use.
NUMBA_DEFAULT_NUM_THREADS = max(1, multiprocessing.cpu_count())
# Numba thread pool size (defaults to number of CPUs on the system).
_NUMBA_NUM_THREADS = _readenv("NUMBA_NUM_THREADS", int, NUMBA_DEFAULT_NUM_THREADS)
if (
"NUMBA_NUM_THREADS" in globals()
and globals()["NUMBA_NUM_THREADS"] != _NUMBA_NUM_THREADS
):
from numba.np.ufunc import parallel
if parallel._is_initialized:
raise RuntimeError(
"Cannot set NUMBA_NUM_THREADS to a "
"different value once the threads have been "
"launched (currently have %s, "
"trying to set %s)"
% (_NUMBA_NUM_THREADS, globals()["NUMBA_NUM_THREADS"])
)
NUMBA_NUM_THREADS = _NUMBA_NUM_THREADS
del _NUMBA_NUM_THREADS
# Profiling support
# Indicates if a profiler detected. Only VTune can be detected for now
RUNNING_UNDER_PROFILER = "VS_PROFILER" in os.environ
# Enables jit events in LLVM to support profiling of dynamic code
ENABLE_PROFILING = _readenv(
"NUMBA_ENABLE_PROFILING", int, int(RUNNING_UNDER_PROFILER)
)
# Debug Info
# The default value for the `debug` flag
DEBUGINFO_DEFAULT = _readenv("NUMBA_DEBUGINFO", int, ENABLE_PROFILING)
CUDA_DEBUGINFO_DEFAULT = _readenv("NUMBA_CUDA_DEBUGINFO", int, 0)
# gdb binary location
GDB_BINARY = _readenv("NUMBA_GDB_BINARY", str, "/usr/bin/gdb")
# CUDA Memory management
CUDA_MEMORY_MANAGER = _readenv("NUMBA_CUDA_MEMORY_MANAGER", str, "default")
# Inject the configuration values into the module globals
for name, value in locals().copy().items():
if name.isupper():
globals()[name] = value
|
def process_environ(self, environ):
def _readenv(name, ctor, default):
value = environ.get(name)
if value is None:
return default() if callable(default) else default
try:
return ctor(value)
except Exception:
warnings.warn(
"environ %s defined but failed to parse '%s'" % (name, value),
RuntimeWarning,
)
return default
def optional_str(x):
return str(x) if x is not None else None
# developer mode produces full tracebacks, disables help instructions
DEVELOPER_MODE = _readenv("NUMBA_DEVELOPER_MODE", int, 0)
# disable performance warnings, will switch of the generation of
# warnings of the class NumbaPerformanceWarning
DISABLE_PERFORMANCE_WARNINGS = _readenv(
"NUMBA_DISABLE_PERFORMANCE_WARNINGS", int, 0
)
# Flag to enable full exception reporting
FULL_TRACEBACKS = _readenv("NUMBA_FULL_TRACEBACKS", int, DEVELOPER_MODE)
# Show help text when an error occurs
SHOW_HELP = _readenv("NUMBA_SHOW_HELP", int, 0)
# The color scheme to use for error messages, default is no color
# just bold fonts in use.
COLOR_SCHEME = _readenv("NUMBA_COLOR_SCHEME", str, "no_color")
# Whether to globally enable bounds checking. The default None means
# to use the value of the flag to @njit. 0 or 1 overrides the flag
# globally.
BOUNDSCHECK = _readenv("NUMBA_BOUNDSCHECK", int, None)
# Whether to always warn about potential uninitialized variables
# because static controlflow analysis cannot find a definition
# in one or more of the incoming paths.
ALWAYS_WARN_UNINIT_VAR = _readenv("ALWAYS_WARN_UNINIT_VAR", int, 0)
# Debug flag to control compiler debug print
DEBUG = _readenv("NUMBA_DEBUG", int, 0)
# DEBUG print IR after pass names
DEBUG_PRINT_AFTER = _readenv("NUMBA_DEBUG_PRINT_AFTER", str, "none")
# DEBUG print IR before pass names
DEBUG_PRINT_BEFORE = _readenv("NUMBA_DEBUG_PRINT_BEFORE", str, "none")
# DEBUG print IR before and after pass names
DEBUG_PRINT_WRAP = _readenv("NUMBA_DEBUG_PRINT_WRAP", str, "none")
# Highlighting in intermediate dumps
HIGHLIGHT_DUMPS = _readenv("NUMBA_HIGHLIGHT_DUMPS", int, 0)
# JIT Debug flag to trigger IR instruction print
DEBUG_JIT = _readenv("NUMBA_DEBUG_JIT", int, 0)
# Enable debugging of front-end operation
# (up to and including IR generation)
DEBUG_FRONTEND = _readenv("NUMBA_DEBUG_FRONTEND", int, 0)
# How many recently deserialized functions to retain regardless
# of external references
FUNCTION_CACHE_SIZE = _readenv("NUMBA_FUNCTION_CACHE_SIZE", int, 128)
# Maximum tuple size that parfors will unpack and pass to
# internal gufunc.
PARFOR_MAX_TUPLE_SIZE = _readenv("NUMBA_PARFOR_MAX_TUPLE_SIZE", int, 100)
# Enable logging of cache operation
DEBUG_CACHE = _readenv("NUMBA_DEBUG_CACHE", int, DEBUG)
# Redirect cache directory
# Contains path to the directory
CACHE_DIR = _readenv("NUMBA_CACHE_DIR", str, "")
# Enable tracing support
TRACE = _readenv("NUMBA_TRACE", int, 0)
# Enable debugging of type inference
DEBUG_TYPEINFER = _readenv("NUMBA_DEBUG_TYPEINFER", int, 0)
# Configure compilation target to use the specified CPU name
# and CPU feature as the host information.
# Note: this overrides "host" option for AOT compilation.
CPU_NAME = _readenv("NUMBA_CPU_NAME", optional_str, None)
CPU_FEATURES = _readenv(
"NUMBA_CPU_FEATURES",
optional_str,
("" if str(CPU_NAME).lower() == "generic" else None),
)
# Optimization level
OPT = _readenv("NUMBA_OPT", int, 3)
# Force dump of Python bytecode
DUMP_BYTECODE = _readenv("NUMBA_DUMP_BYTECODE", int, DEBUG_FRONTEND)
# Force dump of control flow graph
DUMP_CFG = _readenv("NUMBA_DUMP_CFG", int, DEBUG_FRONTEND)
# Force dump of Numba IR
DUMP_IR = _readenv("NUMBA_DUMP_IR", int, DEBUG_FRONTEND or DEBUG_TYPEINFER)
# print debug info of analysis and optimization on array operations
DEBUG_ARRAY_OPT = _readenv("NUMBA_DEBUG_ARRAY_OPT", int, 0)
# insert debug stmts to print information at runtime
DEBUG_ARRAY_OPT_RUNTIME = _readenv("NUMBA_DEBUG_ARRAY_OPT_RUNTIME", int, 0)
# print stats about parallel for-loops
DEBUG_ARRAY_OPT_STATS = _readenv("NUMBA_DEBUG_ARRAY_OPT_STATS", int, 0)
# prints user friendly information about parallel
PARALLEL_DIAGNOSTICS = _readenv("NUMBA_PARALLEL_DIAGNOSTICS", int, 0)
# print debug info of inline closure pass
DEBUG_INLINE_CLOSURE = _readenv("NUMBA_DEBUG_INLINE_CLOSURE", int, 0)
# Force dump of LLVM IR
DUMP_LLVM = _readenv("NUMBA_DUMP_LLVM", int, DEBUG)
# Force dump of Function optimized LLVM IR
DUMP_FUNC_OPT = _readenv("NUMBA_DUMP_FUNC_OPT", int, DEBUG)
# Force dump of Optimized LLVM IR
DUMP_OPTIMIZED = _readenv("NUMBA_DUMP_OPTIMIZED", int, DEBUG)
# Force disable loop vectorize
# Loop vectorizer is disabled on 32-bit win32 due to a bug (#649)
LOOP_VECTORIZE = _readenv("NUMBA_LOOP_VECTORIZE", int, not (IS_WIN32 and IS_32BITS))
# Force dump of generated assembly
DUMP_ASSEMBLY = _readenv("NUMBA_DUMP_ASSEMBLY", int, DEBUG)
# Force dump of type annotation
ANNOTATE = _readenv("NUMBA_DUMP_ANNOTATION", int, 0)
# Dump IR in such as way as to aid in "diff"ing.
DIFF_IR = _readenv("NUMBA_DIFF_IR", int, 0)
# Dump type annotation in html format
def fmt_html_path(path):
if path is None:
return path
else:
return os.path.abspath(path)
HTML = _readenv("NUMBA_DUMP_HTML", fmt_html_path, None)
# Allow interpreter fallback so that Numba @jit decorator will never
# fail. Use for migrating from old numba (<0.12) which supported
# closure, and other yet-to-be-supported features.
COMPATIBILITY_MODE = _readenv("NUMBA_COMPATIBILITY_MODE", int, 0)
# x86-64 specific
# Enable AVX on supported platforms where it won't degrade performance.
def avx_default():
if not _os_supports_avx():
return False
else:
# There are various performance issues with AVX and LLVM
# on some CPUs (list at
# http://llvm.org/bugs/buglist.cgi?quicksearch=avx).
# For now we'd rather disable it, since it can pessimize code
cpu_name = ll.get_host_cpu_name()
return cpu_name not in (
"corei7-avx",
"core-avx-i",
"sandybridge",
"ivybridge",
)
ENABLE_AVX = _readenv("NUMBA_ENABLE_AVX", int, avx_default)
# if set and SVML is available, it will be disabled
# By default, it's disabled on 32-bit platforms.
DISABLE_INTEL_SVML = _readenv("NUMBA_DISABLE_INTEL_SVML", int, IS_32BITS)
# Disable jit for debugging
DISABLE_JIT = _readenv("NUMBA_DISABLE_JIT", int, 0)
# choose parallel backend to use
THREADING_LAYER = _readenv("NUMBA_THREADING_LAYER", str, "default")
# CUDA Configs
# Force CUDA compute capability to a specific version
FORCE_CUDA_CC = _readenv("NUMBA_FORCE_CUDA_CC", _parse_cc, None)
# Disable CUDA support
DISABLE_CUDA = _readenv("NUMBA_DISABLE_CUDA", int, int(MACHINE_BITS == 32))
# Enable CUDA simulator
ENABLE_CUDASIM = _readenv("NUMBA_ENABLE_CUDASIM", int, 0)
# CUDA logging level
# Any level name from the *logging* module. Case insensitive.
# Defaults to CRITICAL if not set or invalid.
# Note: This setting only applies when logging is not configured.
# Any existing logging configuration is preserved.
CUDA_LOG_LEVEL = _readenv("NUMBA_CUDA_LOG_LEVEL", str, "")
# Maximum number of pending CUDA deallocations (default: 10)
CUDA_DEALLOCS_COUNT = _readenv("NUMBA_CUDA_MAX_PENDING_DEALLOCS_COUNT", int, 10)
# Maximum ratio of pending CUDA deallocations to capacity (default: 0.2)
CUDA_DEALLOCS_RATIO = _readenv("NUMBA_CUDA_MAX_PENDING_DEALLOCS_RATIO", float, 0.2)
# HSA Configs
# Disable HSA support
DISABLE_HSA = _readenv("NUMBA_DISABLE_HSA", int, 0)
# The default number of threads to use.
NUMBA_DEFAULT_NUM_THREADS = max(1, multiprocessing.cpu_count())
# Numba thread pool size (defaults to number of CPUs on the system).
_NUMBA_NUM_THREADS = _readenv("NUMBA_NUM_THREADS", int, NUMBA_DEFAULT_NUM_THREADS)
if (
"NUMBA_NUM_THREADS" in globals()
and globals()["NUMBA_NUM_THREADS"] != _NUMBA_NUM_THREADS
):
from numba.np.ufunc import parallel
if parallel._is_initialized:
raise RuntimeError(
"Cannot set NUMBA_NUM_THREADS to a "
"different value once the threads have been "
"launched (currently have %s, "
"trying to set %s)"
% (_NUMBA_NUM_THREADS, globals()["NUMBA_NUM_THREADS"])
)
NUMBA_NUM_THREADS = _NUMBA_NUM_THREADS
del _NUMBA_NUM_THREADS
# Profiling support
# Indicates if a profiler detected. Only VTune can be detected for now
RUNNING_UNDER_PROFILER = "VS_PROFILER" in os.environ
# Enables jit events in LLVM to support profiling of dynamic code
ENABLE_PROFILING = _readenv(
"NUMBA_ENABLE_PROFILING", int, int(RUNNING_UNDER_PROFILER)
)
# Debug Info
# The default value for the `debug` flag
DEBUGINFO_DEFAULT = _readenv("NUMBA_DEBUGINFO", int, ENABLE_PROFILING)
CUDA_DEBUGINFO_DEFAULT = _readenv("NUMBA_CUDA_DEBUGINFO", int, 0)
# gdb binary location
GDB_BINARY = _readenv("NUMBA_GDB_BINARY", str, "/usr/bin/gdb")
# CUDA Memory management
CUDA_MEMORY_MANAGER = _readenv("NUMBA_CUDA_MEMORY_MANAGER", str, "default")
# Inject the configuration values into the module globals
for name, value in locals().copy().items():
if name.isupper():
globals()[name] = value
|
https://github.com/numba/numba/issues/5482
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/errors.py in new_error_context(fmt_, *args, **kwargs)
768 try:
--> 769 yield
770 except NumbaError as e:
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_block(self, block)
266 loc=self.loc, errcls_=defaulterrcls):
--> 267 self.lower_inst(inst)
268 self.post_block(block)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_inst(self, inst)
363 ty = self.typeof(inst.target.name)
--> 364 val = self.lower_assign(ty, inst)
365 self.storevar(val, inst.target.name)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_assign(self, ty, inst)
540 elif isinstance(value, ir.Var):
--> 541 val = self.loadvar(value.name)
542 oty = self.typeof(value.name)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in loadvar(self, name)
1280 """
-> 1281 ptr = self.getvar(name)
1282 return self.builder.load(ptr)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in getvar(self, name)
1274 """
-> 1275 return self.varmap[name]
1276
KeyError: 'cum_v.2'
During handling of the above exception, another exception occurred:
LoweringError Traceback (most recent call last)
<ipython-input-19-fa852184280b> in <module>
31 return probs
32
---> 33 foo(pol_data, decr_vecs)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in _compile_for_args(self, *args, **kws)
418 e.patch_message('\n'.join((str(e).rstrip(), help_msg)))
419 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 420 raise e
421
422 def inspect_llvm(self, signature=None):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in _compile_for_args(self, *args, **kws)
351 argtypes.append(self.typeof_pyval(a))
352 try:
--> 353 return self.compile(tuple(argtypes))
354 except errors.ForceLiteralArg as e:
355 # Received request for compiler re-entry with the list of arguments
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in compile(self, sig)
792 self._cache_misses[sig] += 1
793 try:
--> 794 cres = self._compiler.compile(args, return_type)
795 except errors.ForceLiteralArg as e:
796 def folded(args, kws):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in compile(self, args, return_type)
75
76 def compile(self, args, return_type):
---> 77 status, retval = self._compile_cached(args, return_type)
78 if status:
79 return retval
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in _compile_cached(self, args, return_type)
89
90 try:
---> 91 retval = self._compile_core(args, return_type)
92 except errors.TypingError as e:
93 self._failed_cache[key] = e
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in _compile_core(self, args, return_type)
102
103 impl = self._get_implementation(args, {})
--> 104 cres = compiler.compile_extra(self.targetdescr.typing_context,
105 self.targetdescr.target_context,
106 impl,
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library, pipeline_class)
566 pipeline = pipeline_class(typingctx, targetctx, library,
567 args, return_type, flags, locals)
--> 568 return pipeline.compile_extra(func)
569
570
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in compile_extra(self, func)
337 self.state.lifted = ()
338 self.state.lifted_from = None
--> 339 return self._compile_bytecode()
340
341 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in _compile_bytecode(self)
399 """
400 assert self.state.func_ir is None
--> 401 return self._compile_core()
402
403 def _compile_ir(self):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in _compile_core(self)
379 self.state.status.fail_reason = e
380 if is_final_pipeline:
--> 381 raise e
382 else:
383 raise CompilerError("All available pipelines exhausted")
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in _compile_core(self)
370 res = None
371 try:
--> 372 pm.run(self.state)
373 if self.state.cr is not None:
374 break
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_machinery.py in run(self, state)
339 (self.pipeline_name, pass_desc)
340 patched_exception = self._patch_error(msg, e)
--> 341 raise patched_exception
342
343 def dependency_analysis(self):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_machinery.py in run(self, state)
330 pass_inst = _pass_registry.get(pss).pass_inst
331 if isinstance(pass_inst, CompilerPass):
--> 332 self._runPass(idx, pass_inst, state)
333 else:
334 raise BaseException("Legacy pass in use")
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_machinery.py in _runPass(self, index, pss, internal_state)
289 mutated |= check(pss.run_initialization, internal_state)
290 with SimpleTimer() as pass_time:
--> 291 mutated |= check(pss.run_pass, internal_state)
292 with SimpleTimer() as finalize_time:
293 mutated |= check(pss.run_finalizer, internal_state)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_machinery.py in check(func, compiler_state)
262
263 def check(func, compiler_state):
--> 264 mangled = func(compiler_state)
265 if mangled not in (True, False):
266 msg = ("CompilerPass implementations should return True/False. "
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/typed_passes.py in run_pass(self, state)
440
441 # TODO: Pull this out into the pipeline
--> 442 NativeLowering().run_pass(state)
443 lowered = state['cr']
444 signature = typing.signature(state.return_type, *state.args)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/typed_passes.py in run_pass(self, state)
368 lower = lowering.Lower(targetctx, library, fndesc, interp,
369 metadata=metadata)
--> 370 lower.lower()
371 if not flags.no_cpython_wrapper:
372 lower.create_cpython_wrapper(flags.release_gil)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower(self)
177 if self.generator_info is None:
178 self.genlower = None
--> 179 self.lower_normal_function(self.fndesc)
180 else:
181 self.genlower = self.GeneratorLower(self)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_normal_function(self, fndesc)
225 # Init argument values
226 self.extract_function_arguments()
--> 227 entry_block_tail = self.lower_function_body()
228
229 # Close tail of entry block
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_function_body(self)
251 bb = self.blkmap[offset]
252 self.builder.position_at_end(bb)
--> 253 self.lower_block(block)
254 self.post_lower()
255 return entry_block_tail
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_block(self, block)
265 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
266 loc=self.loc, errcls_=defaulterrcls):
--> 267 self.lower_inst(inst)
268 self.post_block(block)
269
~/anaconda3/envs/latest_versions38/lib/python3.8/contextlib.py in __exit__(self, type, value, traceback)
129 value = type()
130 try:
--> 131 self.gen.throw(type, value, traceback)
132 except StopIteration as exc:
133 # Suppress StopIteration *unless* it's the same exception that
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/errors.py in new_error_context(fmt_, *args, **kwargs)
774 newerr = errcls(e).add_context(_format_msg(fmt_, args, kwargs))
775 tb = sys.exc_info()[2] if numba.core.config.FULL_TRACEBACKS else None
--> 776 reraise(type(newerr), newerr, tb)
777
778
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/utils.py in reraise(tp, value, tb)
78 if value.__traceback__ is not tb:
79 raise value.with_traceback(tb)
---> 80 raise value
81
82
LoweringError: Failed in nopython mode pipeline (step: nopython mode backend)
'cum_v.2'
File "<ipython-input-19-fa852184280b>", line 30:
def foo(pol_data, decr_vecs):
<source elided>
else:
cum_v = np.kron(cum_v, v)
^
[1] During: lowering "cum_v.4 = cum_v.2" at <ipython-input-19-fa852184280b> (30)
|
KeyError
|
def lower_expr(self, resty, expr):
if expr.op == "binop":
return self.lower_binop(resty, expr, expr.fn)
elif expr.op == "inplace_binop":
lty = self.typeof(expr.lhs.name)
if lty.mutable:
return self.lower_binop(resty, expr, expr.fn)
else:
# inplace operators on non-mutable types reuse the same
# definition as the corresponding copying operators.)
return self.lower_binop(resty, expr, expr.immutable_fn)
elif expr.op == "unary":
val = self.loadvar(expr.value.name)
typ = self.typeof(expr.value.name)
func_ty = self.context.typing_context.resolve_value_type(expr.fn)
# Get function
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(func_ty, signature)
# Convert argument to match
val = self.context.cast(self.builder, val, typ, signature.args[0])
res = impl(self.builder, [val])
res = self.context.cast(self.builder, res, signature.return_type, resty)
return res
elif expr.op == "call":
res = self.lower_call(resty, expr)
return res
elif expr.op == "pair_first":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
res = self.context.pair_first(self.builder, val, ty)
self.incref(resty, res)
return res
elif expr.op == "pair_second":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
res = self.context.pair_second(self.builder, val, ty)
self.incref(resty, res)
return res
elif expr.op in ("getiter", "iternext"):
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(expr.op, signature)
[fty] = signature.args
castval = self.context.cast(self.builder, val, ty, fty)
res = impl(self.builder, (castval,))
res = self.context.cast(self.builder, res, signature.return_type, resty)
return res
elif expr.op == "exhaust_iter":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
# Unpack optional
if isinstance(ty, types.Optional):
val = self.context.cast(self.builder, val, ty, ty.type)
ty = ty.type
# If we have a tuple, we needn't do anything
# (and we can't iterate over the heterogeneous ones).
if isinstance(ty, types.BaseTuple):
assert ty == resty
self.incref(ty, val)
return val
itemty = ty.iterator_type.yield_type
tup = self.context.get_constant_undef(resty)
pairty = types.Pair(itemty, types.boolean)
getiter_sig = typing.signature(ty.iterator_type, ty)
getiter_impl = self.context.get_function("getiter", getiter_sig)
iternext_sig = typing.signature(pairty, ty.iterator_type)
iternext_impl = self.context.get_function("iternext", iternext_sig)
iterobj = getiter_impl(self.builder, (val,))
# We call iternext() as many times as desired (`expr.count`).
for i in range(expr.count):
pair = iternext_impl(self.builder, (iterobj,))
is_valid = self.context.pair_second(self.builder, pair, pairty)
with cgutils.if_unlikely(self.builder, self.builder.not_(is_valid)):
self.return_exception(ValueError, loc=self.loc)
item = self.context.pair_first(self.builder, pair, pairty)
tup = self.builder.insert_value(tup, item, i)
# Call iternext() once more to check that the iterator
# is exhausted.
pair = iternext_impl(self.builder, (iterobj,))
is_valid = self.context.pair_second(self.builder, pair, pairty)
with cgutils.if_unlikely(self.builder, is_valid):
self.return_exception(ValueError, loc=self.loc)
self.decref(ty.iterator_type, iterobj)
return tup
elif expr.op == "getattr":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
if isinstance(resty, types.BoundFunction):
# if we are getting out a method, assume we have typed this
# properly and just build a bound function object
casted = self.context.cast(self.builder, val, ty, resty.this)
res = self.context.get_bound_function(self.builder, casted, resty.this)
self.incref(resty, res)
return res
else:
impl = self.context.get_getattr(ty, expr.attr)
attrty = self.context.typing_context.resolve_getattr(ty, expr.attr)
if impl is None:
# ignore the attribute
return self.context.get_dummy_value()
else:
res = impl(self.context, self.builder, ty, val, expr.attr)
# Cast the attribute type to the expected output type
res = self.context.cast(self.builder, res, attrty, resty)
return res
elif expr.op == "static_getitem":
signature = typing.signature(
resty,
self.typeof(expr.value.name),
_lit_or_omitted(expr.index),
)
try:
# Both get_function() and the returned implementation can
# raise NotImplementedError if the types aren't supported
impl = self.context.get_function("static_getitem", signature)
return impl(self.builder, (self.loadvar(expr.value.name), expr.index))
except NotImplementedError:
if expr.index_var is None:
raise
# Fall back on the generic getitem() implementation
# for this type.
signature = self.fndesc.calltypes[expr]
return self.lower_getitem(
resty, expr, expr.value, expr.index_var, signature
)
elif expr.op == "typed_getitem":
signature = typing.signature(
resty,
self.typeof(expr.value.name),
self.typeof(expr.index.name),
)
impl = self.context.get_function("typed_getitem", signature)
return impl(
self.builder, (self.loadvar(expr.value.name), self.loadvar(expr.index.name))
)
elif expr.op == "getitem":
signature = self.fndesc.calltypes[expr]
return self.lower_getitem(resty, expr, expr.value, expr.index, signature)
elif expr.op == "build_tuple":
itemvals = [self.loadvar(i.name) for i in expr.items]
itemtys = [self.typeof(i.name) for i in expr.items]
castvals = [
self.context.cast(self.builder, val, fromty, toty)
for val, toty, fromty in zip(itemvals, resty, itemtys)
]
tup = self.context.make_tuple(self.builder, resty, castvals)
self.incref(resty, tup)
return tup
elif expr.op == "build_list":
itemvals = [self.loadvar(i.name) for i in expr.items]
itemtys = [self.typeof(i.name) for i in expr.items]
castvals = [
self.context.cast(self.builder, val, fromty, resty.dtype)
for val, fromty in zip(itemvals, itemtys)
]
return self.context.build_list(self.builder, resty, castvals)
elif expr.op == "build_set":
# Insert in reverse order, as Python does
items = expr.items[::-1]
itemvals = [self.loadvar(i.name) for i in items]
itemtys = [self.typeof(i.name) for i in items]
castvals = [
self.context.cast(self.builder, val, fromty, resty.dtype)
for val, fromty in zip(itemvals, itemtys)
]
return self.context.build_set(self.builder, resty, castvals)
elif expr.op == "build_map":
items = expr.items
keys, values = [], []
key_types, value_types = [], []
for k, v in items:
key = self.loadvar(k.name)
keytype = self.typeof(k.name)
val = self.loadvar(v.name)
valtype = self.typeof(v.name)
keys.append(key)
values.append(val)
key_types.append(keytype)
value_types.append(valtype)
return self.context.build_map(
self.builder,
resty,
list(zip(key_types, value_types)),
list(zip(keys, values)),
)
elif expr.op == "cast":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
castval = self.context.cast(self.builder, val, ty, resty)
self.incref(resty, castval)
return castval
elif expr.op == "phi":
raise LoweringError("PHI not stripped")
elif expr.op == "null":
return self.context.get_constant_null(resty)
elif expr.op in self.context.special_ops:
res = self.context.special_ops[expr.op](self, expr)
return res
raise NotImplementedError(expr)
|
def lower_expr(self, resty, expr):
if expr.op == "binop":
return self.lower_binop(resty, expr, expr.fn)
elif expr.op == "inplace_binop":
lty = self.typeof(expr.lhs.name)
if lty.mutable:
return self.lower_binop(resty, expr, expr.fn)
else:
# inplace operators on non-mutable types reuse the same
# definition as the corresponding copying operators.)
return self.lower_binop(resty, expr, expr.immutable_fn)
elif expr.op == "unary":
val = self.loadvar(expr.value.name)
typ = self.typeof(expr.value.name)
func_ty = self.context.typing_context.resolve_value_type(expr.fn)
# Get function
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(func_ty, signature)
# Convert argument to match
val = self.context.cast(self.builder, val, typ, signature.args[0])
res = impl(self.builder, [val])
res = self.context.cast(self.builder, res, signature.return_type, resty)
return res
elif expr.op == "call":
res = self.lower_call(resty, expr)
return res
elif expr.op == "pair_first":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
res = self.context.pair_first(self.builder, val, ty)
self.incref(resty, res)
return res
elif expr.op == "pair_second":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
res = self.context.pair_second(self.builder, val, ty)
self.incref(resty, res)
return res
elif expr.op in ("getiter", "iternext"):
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
signature = self.fndesc.calltypes[expr]
impl = self.context.get_function(expr.op, signature)
[fty] = signature.args
castval = self.context.cast(self.builder, val, ty, fty)
res = impl(self.builder, (castval,))
res = self.context.cast(self.builder, res, signature.return_type, resty)
return res
elif expr.op == "exhaust_iter":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
# Unpack optional
if isinstance(ty, types.Optional):
val = self.context.cast(self.builder, val, ty, ty.type)
ty = ty.type
# If we have a tuple, we needn't do anything
# (and we can't iterate over the heterogeneous ones).
if isinstance(ty, types.BaseTuple):
assert ty == resty
self.incref(ty, val)
return val
itemty = ty.iterator_type.yield_type
tup = self.context.get_constant_undef(resty)
pairty = types.Pair(itemty, types.boolean)
getiter_sig = typing.signature(ty.iterator_type, ty)
getiter_impl = self.context.get_function("getiter", getiter_sig)
iternext_sig = typing.signature(pairty, ty.iterator_type)
iternext_impl = self.context.get_function("iternext", iternext_sig)
iterobj = getiter_impl(self.builder, (val,))
# We call iternext() as many times as desired (`expr.count`).
for i in range(expr.count):
pair = iternext_impl(self.builder, (iterobj,))
is_valid = self.context.pair_second(self.builder, pair, pairty)
with cgutils.if_unlikely(self.builder, self.builder.not_(is_valid)):
self.return_exception(ValueError, loc=self.loc)
item = self.context.pair_first(self.builder, pair, pairty)
tup = self.builder.insert_value(tup, item, i)
# Call iternext() once more to check that the iterator
# is exhausted.
pair = iternext_impl(self.builder, (iterobj,))
is_valid = self.context.pair_second(self.builder, pair, pairty)
with cgutils.if_unlikely(self.builder, is_valid):
self.return_exception(ValueError, loc=self.loc)
self.decref(ty.iterator_type, iterobj)
return tup
elif expr.op == "getattr":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
if isinstance(resty, types.BoundFunction):
# if we are getting out a method, assume we have typed this
# properly and just build a bound function object
casted = self.context.cast(self.builder, val, ty, resty.this)
res = self.context.get_bound_function(self.builder, casted, resty.this)
self.incref(resty, res)
return res
else:
impl = self.context.get_getattr(ty, expr.attr)
attrty = self.context.typing_context.resolve_getattr(ty, expr.attr)
if impl is None:
# ignore the attribute
return self.context.get_dummy_value()
else:
res = impl(self.context, self.builder, ty, val, expr.attr)
# Cast the attribute type to the expected output type
res = self.context.cast(self.builder, res, attrty, resty)
return res
elif expr.op == "static_getitem":
signature = typing.signature(
resty,
self.typeof(expr.value.name),
_lit_or_omitted(expr.index),
)
try:
# Both get_function() and the returned implementation can
# raise NotImplementedError if the types aren't supported
impl = self.context.get_function("static_getitem", signature)
return impl(self.builder, (self.loadvar(expr.value.name), expr.index))
except NotImplementedError:
if expr.index_var is None:
raise
# Fall back on the generic getitem() implementation
# for this type.
signature = self.fndesc.calltypes[expr]
return self.lower_getitem(
resty, expr, expr.value, expr.index_var, signature
)
elif expr.op == "typed_getitem":
signature = typing.signature(
resty,
self.typeof(expr.value.name),
self.typeof(expr.index.name),
)
impl = self.context.get_function("typed_getitem", signature)
return impl(
self.builder, (self.loadvar(expr.value.name), self.loadvar(expr.index.name))
)
elif expr.op == "getitem":
signature = self.fndesc.calltypes[expr]
return self.lower_getitem(resty, expr, expr.value, expr.index, signature)
elif expr.op == "build_tuple":
itemvals = [self.loadvar(i.name) for i in expr.items]
itemtys = [self.typeof(i.name) for i in expr.items]
castvals = [
self.context.cast(self.builder, val, fromty, toty)
for val, toty, fromty in zip(itemvals, resty, itemtys)
]
tup = self.context.make_tuple(self.builder, resty, castvals)
self.incref(resty, tup)
return tup
elif expr.op == "build_list":
itemvals = [self.loadvar(i.name) for i in expr.items]
itemtys = [self.typeof(i.name) for i in expr.items]
castvals = [
self.context.cast(self.builder, val, fromty, resty.dtype)
for val, fromty in zip(itemvals, itemtys)
]
return self.context.build_list(self.builder, resty, castvals)
elif expr.op == "build_set":
# Insert in reverse order, as Python does
items = expr.items[::-1]
itemvals = [self.loadvar(i.name) for i in items]
itemtys = [self.typeof(i.name) for i in items]
castvals = [
self.context.cast(self.builder, val, fromty, resty.dtype)
for val, fromty in zip(itemvals, itemtys)
]
return self.context.build_set(self.builder, resty, castvals)
elif expr.op == "build_map":
items = expr.items
keys, values = [], []
key_types, value_types = [], []
for k, v in items:
key = self.loadvar(k.name)
keytype = self.typeof(k.name)
val = self.loadvar(v.name)
valtype = self.typeof(v.name)
keys.append(key)
values.append(val)
key_types.append(keytype)
value_types.append(valtype)
return self.context.build_map(
self.builder,
resty,
list(zip(key_types, value_types)),
list(zip(keys, values)),
)
elif expr.op == "cast":
val = self.loadvar(expr.value.name)
ty = self.typeof(expr.value.name)
castval = self.context.cast(self.builder, val, ty, resty)
self.incref(resty, castval)
return castval
elif expr.op == "phi":
raise LoweringError("PHI not stripped")
elif expr.op in self.context.special_ops:
res = self.context.special_ops[expr.op](self, expr)
return res
raise NotImplementedError(expr)
|
https://github.com/numba/numba/issues/5482
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/errors.py in new_error_context(fmt_, *args, **kwargs)
768 try:
--> 769 yield
770 except NumbaError as e:
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_block(self, block)
266 loc=self.loc, errcls_=defaulterrcls):
--> 267 self.lower_inst(inst)
268 self.post_block(block)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_inst(self, inst)
363 ty = self.typeof(inst.target.name)
--> 364 val = self.lower_assign(ty, inst)
365 self.storevar(val, inst.target.name)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_assign(self, ty, inst)
540 elif isinstance(value, ir.Var):
--> 541 val = self.loadvar(value.name)
542 oty = self.typeof(value.name)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in loadvar(self, name)
1280 """
-> 1281 ptr = self.getvar(name)
1282 return self.builder.load(ptr)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in getvar(self, name)
1274 """
-> 1275 return self.varmap[name]
1276
KeyError: 'cum_v.2'
During handling of the above exception, another exception occurred:
LoweringError Traceback (most recent call last)
<ipython-input-19-fa852184280b> in <module>
31 return probs
32
---> 33 foo(pol_data, decr_vecs)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in _compile_for_args(self, *args, **kws)
418 e.patch_message('\n'.join((str(e).rstrip(), help_msg)))
419 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 420 raise e
421
422 def inspect_llvm(self, signature=None):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in _compile_for_args(self, *args, **kws)
351 argtypes.append(self.typeof_pyval(a))
352 try:
--> 353 return self.compile(tuple(argtypes))
354 except errors.ForceLiteralArg as e:
355 # Received request for compiler re-entry with the list of arguments
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in compile(self, sig)
792 self._cache_misses[sig] += 1
793 try:
--> 794 cres = self._compiler.compile(args, return_type)
795 except errors.ForceLiteralArg as e:
796 def folded(args, kws):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in compile(self, args, return_type)
75
76 def compile(self, args, return_type):
---> 77 status, retval = self._compile_cached(args, return_type)
78 if status:
79 return retval
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in _compile_cached(self, args, return_type)
89
90 try:
---> 91 retval = self._compile_core(args, return_type)
92 except errors.TypingError as e:
93 self._failed_cache[key] = e
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in _compile_core(self, args, return_type)
102
103 impl = self._get_implementation(args, {})
--> 104 cres = compiler.compile_extra(self.targetdescr.typing_context,
105 self.targetdescr.target_context,
106 impl,
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library, pipeline_class)
566 pipeline = pipeline_class(typingctx, targetctx, library,
567 args, return_type, flags, locals)
--> 568 return pipeline.compile_extra(func)
569
570
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in compile_extra(self, func)
337 self.state.lifted = ()
338 self.state.lifted_from = None
--> 339 return self._compile_bytecode()
340
341 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in _compile_bytecode(self)
399 """
400 assert self.state.func_ir is None
--> 401 return self._compile_core()
402
403 def _compile_ir(self):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in _compile_core(self)
379 self.state.status.fail_reason = e
380 if is_final_pipeline:
--> 381 raise e
382 else:
383 raise CompilerError("All available pipelines exhausted")
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in _compile_core(self)
370 res = None
371 try:
--> 372 pm.run(self.state)
373 if self.state.cr is not None:
374 break
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_machinery.py in run(self, state)
339 (self.pipeline_name, pass_desc)
340 patched_exception = self._patch_error(msg, e)
--> 341 raise patched_exception
342
343 def dependency_analysis(self):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_machinery.py in run(self, state)
330 pass_inst = _pass_registry.get(pss).pass_inst
331 if isinstance(pass_inst, CompilerPass):
--> 332 self._runPass(idx, pass_inst, state)
333 else:
334 raise BaseException("Legacy pass in use")
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_machinery.py in _runPass(self, index, pss, internal_state)
289 mutated |= check(pss.run_initialization, internal_state)
290 with SimpleTimer() as pass_time:
--> 291 mutated |= check(pss.run_pass, internal_state)
292 with SimpleTimer() as finalize_time:
293 mutated |= check(pss.run_finalizer, internal_state)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_machinery.py in check(func, compiler_state)
262
263 def check(func, compiler_state):
--> 264 mangled = func(compiler_state)
265 if mangled not in (True, False):
266 msg = ("CompilerPass implementations should return True/False. "
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/typed_passes.py in run_pass(self, state)
440
441 # TODO: Pull this out into the pipeline
--> 442 NativeLowering().run_pass(state)
443 lowered = state['cr']
444 signature = typing.signature(state.return_type, *state.args)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/typed_passes.py in run_pass(self, state)
368 lower = lowering.Lower(targetctx, library, fndesc, interp,
369 metadata=metadata)
--> 370 lower.lower()
371 if not flags.no_cpython_wrapper:
372 lower.create_cpython_wrapper(flags.release_gil)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower(self)
177 if self.generator_info is None:
178 self.genlower = None
--> 179 self.lower_normal_function(self.fndesc)
180 else:
181 self.genlower = self.GeneratorLower(self)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_normal_function(self, fndesc)
225 # Init argument values
226 self.extract_function_arguments()
--> 227 entry_block_tail = self.lower_function_body()
228
229 # Close tail of entry block
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_function_body(self)
251 bb = self.blkmap[offset]
252 self.builder.position_at_end(bb)
--> 253 self.lower_block(block)
254 self.post_lower()
255 return entry_block_tail
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_block(self, block)
265 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
266 loc=self.loc, errcls_=defaulterrcls):
--> 267 self.lower_inst(inst)
268 self.post_block(block)
269
~/anaconda3/envs/latest_versions38/lib/python3.8/contextlib.py in __exit__(self, type, value, traceback)
129 value = type()
130 try:
--> 131 self.gen.throw(type, value, traceback)
132 except StopIteration as exc:
133 # Suppress StopIteration *unless* it's the same exception that
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/errors.py in new_error_context(fmt_, *args, **kwargs)
774 newerr = errcls(e).add_context(_format_msg(fmt_, args, kwargs))
775 tb = sys.exc_info()[2] if numba.core.config.FULL_TRACEBACKS else None
--> 776 reraise(type(newerr), newerr, tb)
777
778
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/utils.py in reraise(tp, value, tb)
78 if value.__traceback__ is not tb:
79 raise value.with_traceback(tb)
---> 80 raise value
81
82
LoweringError: Failed in nopython mode pipeline (step: nopython mode backend)
'cum_v.2'
File "<ipython-input-19-fa852184280b>", line 30:
def foo(pol_data, decr_vecs):
<source elided>
else:
cum_v = np.kron(cum_v, v)
^
[1] During: lowering "cum_v.4 = cum_v.2" at <ipython-input-19-fa852184280b> (30)
|
KeyError
|
def lower_expr(self, expr):
if expr.op == "binop":
return self.lower_binop(expr, expr.fn, inplace=False)
elif expr.op == "inplace_binop":
return self.lower_binop(expr, expr.fn, inplace=True)
elif expr.op == "unary":
value = self.loadvar(expr.value.name)
if expr.fn == operator.neg:
res = self.pyapi.number_negative(value)
elif expr.fn == operator.pos:
res = self.pyapi.number_positive(value)
elif expr.fn == operator.not_:
res = self.pyapi.object_not(value)
self.check_int_status(res)
longval = self.builder.zext(res, self.pyapi.long)
res = self.pyapi.bool_from_long(longval)
elif expr.fn == operator.invert:
res = self.pyapi.number_invert(value)
else:
raise NotImplementedError(expr)
self.check_error(res)
return res
elif expr.op == "call":
argvals = [self.loadvar(a.name) for a in expr.args]
fn = self.loadvar(expr.func.name)
args = self.pyapi.tuple_pack(argvals)
if expr.vararg:
# Expand *args
new_args = self.pyapi.number_add(args, self.loadvar(expr.vararg.name))
self.decref(args)
args = new_args
if not expr.kws:
# No named arguments
ret = self.pyapi.call(fn, args, None)
else:
# Named arguments
keyvalues = [(k, self.loadvar(v.name)) for k, v in expr.kws]
kws = self.pyapi.dict_pack(keyvalues)
ret = self.pyapi.call(fn, args, kws)
self.decref(kws)
self.decref(args)
self.check_error(ret)
return ret
elif expr.op == "getattr":
obj = self.loadvar(expr.value.name)
res = self.pyapi.object_getattr(obj, self._freeze_string(expr.attr))
self.check_error(res)
return res
elif expr.op == "build_tuple":
items = [self.loadvar(it.name) for it in expr.items]
res = self.pyapi.tuple_pack(items)
self.check_error(res)
return res
elif expr.op == "build_list":
items = [self.loadvar(it.name) for it in expr.items]
res = self.pyapi.list_pack(items)
self.check_error(res)
return res
elif expr.op == "build_map":
res = self.pyapi.dict_new(expr.size)
self.check_error(res)
for k, v in expr.items:
key = self.loadvar(k.name)
value = self.loadvar(v.name)
ok = self.pyapi.dict_setitem(res, key, value)
self.check_int_status(ok)
return res
elif expr.op == "build_set":
items = [self.loadvar(it.name) for it in expr.items]
res = self.pyapi.set_new()
self.check_error(res)
for it in items:
ok = self.pyapi.set_add(res, it)
self.check_int_status(ok)
return res
elif expr.op == "getiter":
obj = self.loadvar(expr.value.name)
res = self.pyapi.object_getiter(obj)
self.check_error(res)
return res
elif expr.op == "iternext":
iterobj = self.loadvar(expr.value.name)
item = self.pyapi.iter_next(iterobj)
is_valid = cgutils.is_not_null(self.builder, item)
pair = self.pyapi.tuple_new(2)
with self.builder.if_else(is_valid) as (then, otherwise):
with then:
self.pyapi.tuple_setitem(pair, 0, item)
with otherwise:
self.check_occurred()
# Make the tuple valid by inserting None as dummy
# iteration "result" (it will be ignored).
self.pyapi.tuple_setitem(pair, 0, self.pyapi.make_none())
self.pyapi.tuple_setitem(pair, 1, self.pyapi.bool_from_bool(is_valid))
return pair
elif expr.op == "pair_first":
pair = self.loadvar(expr.value.name)
first = self.pyapi.tuple_getitem(pair, 0)
self.incref(first)
return first
elif expr.op == "pair_second":
pair = self.loadvar(expr.value.name)
second = self.pyapi.tuple_getitem(pair, 1)
self.incref(second)
return second
elif expr.op == "exhaust_iter":
iterobj = self.loadvar(expr.value.name)
tup = self.pyapi.sequence_tuple(iterobj)
self.check_error(tup)
# Check tuple size is as expected
tup_size = self.pyapi.tuple_size(tup)
expected_size = self.context.get_constant(types.intp, expr.count)
has_wrong_size = self.builder.icmp(lc.ICMP_NE, tup_size, expected_size)
with cgutils.if_unlikely(self.builder, has_wrong_size):
self.return_exception(ValueError)
return tup
elif expr.op == "getitem":
value = self.loadvar(expr.value.name)
index = self.loadvar(expr.index.name)
res = self.pyapi.object_getitem(value, index)
self.check_error(res)
return res
elif expr.op == "static_getitem":
value = self.loadvar(expr.value.name)
index = self.context.get_constant(types.intp, expr.index)
indexobj = self.pyapi.long_from_ssize_t(index)
self.check_error(indexobj)
res = self.pyapi.object_getitem(value, indexobj)
self.decref(indexobj)
self.check_error(res)
return res
elif expr.op == "getslice":
target = self.loadvar(expr.target.name)
start = self.loadvar(expr.start.name)
stop = self.loadvar(expr.stop.name)
slicefn = self.get_builtin_obj("slice")
sliceobj = self.pyapi.call_function_objargs(slicefn, (start, stop))
self.decref(slicefn)
self.check_error(sliceobj)
res = self.pyapi.object_getitem(target, sliceobj)
self.check_error(res)
return res
elif expr.op == "cast":
val = self.loadvar(expr.value.name)
self.incref(val)
return val
elif expr.op == "phi":
raise LoweringError("PHI not stripped")
elif expr.op == "null":
# Make null value
return cgutils.get_null_value(self.pyapi.pyobj)
else:
raise NotImplementedError(expr)
|
def lower_expr(self, expr):
if expr.op == "binop":
return self.lower_binop(expr, expr.fn, inplace=False)
elif expr.op == "inplace_binop":
return self.lower_binop(expr, expr.fn, inplace=True)
elif expr.op == "unary":
value = self.loadvar(expr.value.name)
if expr.fn == operator.neg:
res = self.pyapi.number_negative(value)
elif expr.fn == operator.pos:
res = self.pyapi.number_positive(value)
elif expr.fn == operator.not_:
res = self.pyapi.object_not(value)
self.check_int_status(res)
longval = self.builder.zext(res, self.pyapi.long)
res = self.pyapi.bool_from_long(longval)
elif expr.fn == operator.invert:
res = self.pyapi.number_invert(value)
else:
raise NotImplementedError(expr)
self.check_error(res)
return res
elif expr.op == "call":
argvals = [self.loadvar(a.name) for a in expr.args]
fn = self.loadvar(expr.func.name)
args = self.pyapi.tuple_pack(argvals)
if expr.vararg:
# Expand *args
new_args = self.pyapi.number_add(args, self.loadvar(expr.vararg.name))
self.decref(args)
args = new_args
if not expr.kws:
# No named arguments
ret = self.pyapi.call(fn, args, None)
else:
# Named arguments
keyvalues = [(k, self.loadvar(v.name)) for k, v in expr.kws]
kws = self.pyapi.dict_pack(keyvalues)
ret = self.pyapi.call(fn, args, kws)
self.decref(kws)
self.decref(args)
self.check_error(ret)
return ret
elif expr.op == "getattr":
obj = self.loadvar(expr.value.name)
res = self.pyapi.object_getattr(obj, self._freeze_string(expr.attr))
self.check_error(res)
return res
elif expr.op == "build_tuple":
items = [self.loadvar(it.name) for it in expr.items]
res = self.pyapi.tuple_pack(items)
self.check_error(res)
return res
elif expr.op == "build_list":
items = [self.loadvar(it.name) for it in expr.items]
res = self.pyapi.list_pack(items)
self.check_error(res)
return res
elif expr.op == "build_map":
res = self.pyapi.dict_new(expr.size)
self.check_error(res)
for k, v in expr.items:
key = self.loadvar(k.name)
value = self.loadvar(v.name)
ok = self.pyapi.dict_setitem(res, key, value)
self.check_int_status(ok)
return res
elif expr.op == "build_set":
items = [self.loadvar(it.name) for it in expr.items]
res = self.pyapi.set_new()
self.check_error(res)
for it in items:
ok = self.pyapi.set_add(res, it)
self.check_int_status(ok)
return res
elif expr.op == "getiter":
obj = self.loadvar(expr.value.name)
res = self.pyapi.object_getiter(obj)
self.check_error(res)
return res
elif expr.op == "iternext":
iterobj = self.loadvar(expr.value.name)
item = self.pyapi.iter_next(iterobj)
is_valid = cgutils.is_not_null(self.builder, item)
pair = self.pyapi.tuple_new(2)
with self.builder.if_else(is_valid) as (then, otherwise):
with then:
self.pyapi.tuple_setitem(pair, 0, item)
with otherwise:
self.check_occurred()
# Make the tuple valid by inserting None as dummy
# iteration "result" (it will be ignored).
self.pyapi.tuple_setitem(pair, 0, self.pyapi.make_none())
self.pyapi.tuple_setitem(pair, 1, self.pyapi.bool_from_bool(is_valid))
return pair
elif expr.op == "pair_first":
pair = self.loadvar(expr.value.name)
first = self.pyapi.tuple_getitem(pair, 0)
self.incref(first)
return first
elif expr.op == "pair_second":
pair = self.loadvar(expr.value.name)
second = self.pyapi.tuple_getitem(pair, 1)
self.incref(second)
return second
elif expr.op == "exhaust_iter":
iterobj = self.loadvar(expr.value.name)
tup = self.pyapi.sequence_tuple(iterobj)
self.check_error(tup)
# Check tuple size is as expected
tup_size = self.pyapi.tuple_size(tup)
expected_size = self.context.get_constant(types.intp, expr.count)
has_wrong_size = self.builder.icmp(lc.ICMP_NE, tup_size, expected_size)
with cgutils.if_unlikely(self.builder, has_wrong_size):
self.return_exception(ValueError)
return tup
elif expr.op == "getitem":
value = self.loadvar(expr.value.name)
index = self.loadvar(expr.index.name)
res = self.pyapi.object_getitem(value, index)
self.check_error(res)
return res
elif expr.op == "static_getitem":
value = self.loadvar(expr.value.name)
index = self.context.get_constant(types.intp, expr.index)
indexobj = self.pyapi.long_from_ssize_t(index)
self.check_error(indexobj)
res = self.pyapi.object_getitem(value, indexobj)
self.decref(indexobj)
self.check_error(res)
return res
elif expr.op == "getslice":
target = self.loadvar(expr.target.name)
start = self.loadvar(expr.start.name)
stop = self.loadvar(expr.stop.name)
slicefn = self.get_builtin_obj("slice")
sliceobj = self.pyapi.call_function_objargs(slicefn, (start, stop))
self.decref(slicefn)
self.check_error(sliceobj)
res = self.pyapi.object_getitem(target, sliceobj)
self.check_error(res)
return res
elif expr.op == "cast":
val = self.loadvar(expr.value.name)
self.incref(val)
return val
elif expr.op == "phi":
raise LoweringError("PHI not stripped")
else:
raise NotImplementedError(expr)
|
https://github.com/numba/numba/issues/5482
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/errors.py in new_error_context(fmt_, *args, **kwargs)
768 try:
--> 769 yield
770 except NumbaError as e:
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_block(self, block)
266 loc=self.loc, errcls_=defaulterrcls):
--> 267 self.lower_inst(inst)
268 self.post_block(block)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_inst(self, inst)
363 ty = self.typeof(inst.target.name)
--> 364 val = self.lower_assign(ty, inst)
365 self.storevar(val, inst.target.name)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_assign(self, ty, inst)
540 elif isinstance(value, ir.Var):
--> 541 val = self.loadvar(value.name)
542 oty = self.typeof(value.name)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in loadvar(self, name)
1280 """
-> 1281 ptr = self.getvar(name)
1282 return self.builder.load(ptr)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in getvar(self, name)
1274 """
-> 1275 return self.varmap[name]
1276
KeyError: 'cum_v.2'
During handling of the above exception, another exception occurred:
LoweringError Traceback (most recent call last)
<ipython-input-19-fa852184280b> in <module>
31 return probs
32
---> 33 foo(pol_data, decr_vecs)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in _compile_for_args(self, *args, **kws)
418 e.patch_message('\n'.join((str(e).rstrip(), help_msg)))
419 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 420 raise e
421
422 def inspect_llvm(self, signature=None):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in _compile_for_args(self, *args, **kws)
351 argtypes.append(self.typeof_pyval(a))
352 try:
--> 353 return self.compile(tuple(argtypes))
354 except errors.ForceLiteralArg as e:
355 # Received request for compiler re-entry with the list of arguments
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in compile(self, sig)
792 self._cache_misses[sig] += 1
793 try:
--> 794 cres = self._compiler.compile(args, return_type)
795 except errors.ForceLiteralArg as e:
796 def folded(args, kws):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in compile(self, args, return_type)
75
76 def compile(self, args, return_type):
---> 77 status, retval = self._compile_cached(args, return_type)
78 if status:
79 return retval
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in _compile_cached(self, args, return_type)
89
90 try:
---> 91 retval = self._compile_core(args, return_type)
92 except errors.TypingError as e:
93 self._failed_cache[key] = e
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in _compile_core(self, args, return_type)
102
103 impl = self._get_implementation(args, {})
--> 104 cres = compiler.compile_extra(self.targetdescr.typing_context,
105 self.targetdescr.target_context,
106 impl,
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library, pipeline_class)
566 pipeline = pipeline_class(typingctx, targetctx, library,
567 args, return_type, flags, locals)
--> 568 return pipeline.compile_extra(func)
569
570
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in compile_extra(self, func)
337 self.state.lifted = ()
338 self.state.lifted_from = None
--> 339 return self._compile_bytecode()
340
341 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in _compile_bytecode(self)
399 """
400 assert self.state.func_ir is None
--> 401 return self._compile_core()
402
403 def _compile_ir(self):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in _compile_core(self)
379 self.state.status.fail_reason = e
380 if is_final_pipeline:
--> 381 raise e
382 else:
383 raise CompilerError("All available pipelines exhausted")
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in _compile_core(self)
370 res = None
371 try:
--> 372 pm.run(self.state)
373 if self.state.cr is not None:
374 break
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_machinery.py in run(self, state)
339 (self.pipeline_name, pass_desc)
340 patched_exception = self._patch_error(msg, e)
--> 341 raise patched_exception
342
343 def dependency_analysis(self):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_machinery.py in run(self, state)
330 pass_inst = _pass_registry.get(pss).pass_inst
331 if isinstance(pass_inst, CompilerPass):
--> 332 self._runPass(idx, pass_inst, state)
333 else:
334 raise BaseException("Legacy pass in use")
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_machinery.py in _runPass(self, index, pss, internal_state)
289 mutated |= check(pss.run_initialization, internal_state)
290 with SimpleTimer() as pass_time:
--> 291 mutated |= check(pss.run_pass, internal_state)
292 with SimpleTimer() as finalize_time:
293 mutated |= check(pss.run_finalizer, internal_state)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_machinery.py in check(func, compiler_state)
262
263 def check(func, compiler_state):
--> 264 mangled = func(compiler_state)
265 if mangled not in (True, False):
266 msg = ("CompilerPass implementations should return True/False. "
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/typed_passes.py in run_pass(self, state)
440
441 # TODO: Pull this out into the pipeline
--> 442 NativeLowering().run_pass(state)
443 lowered = state['cr']
444 signature = typing.signature(state.return_type, *state.args)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/typed_passes.py in run_pass(self, state)
368 lower = lowering.Lower(targetctx, library, fndesc, interp,
369 metadata=metadata)
--> 370 lower.lower()
371 if not flags.no_cpython_wrapper:
372 lower.create_cpython_wrapper(flags.release_gil)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower(self)
177 if self.generator_info is None:
178 self.genlower = None
--> 179 self.lower_normal_function(self.fndesc)
180 else:
181 self.genlower = self.GeneratorLower(self)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_normal_function(self, fndesc)
225 # Init argument values
226 self.extract_function_arguments()
--> 227 entry_block_tail = self.lower_function_body()
228
229 # Close tail of entry block
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_function_body(self)
251 bb = self.blkmap[offset]
252 self.builder.position_at_end(bb)
--> 253 self.lower_block(block)
254 self.post_lower()
255 return entry_block_tail
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_block(self, block)
265 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
266 loc=self.loc, errcls_=defaulterrcls):
--> 267 self.lower_inst(inst)
268 self.post_block(block)
269
~/anaconda3/envs/latest_versions38/lib/python3.8/contextlib.py in __exit__(self, type, value, traceback)
129 value = type()
130 try:
--> 131 self.gen.throw(type, value, traceback)
132 except StopIteration as exc:
133 # Suppress StopIteration *unless* it's the same exception that
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/errors.py in new_error_context(fmt_, *args, **kwargs)
774 newerr = errcls(e).add_context(_format_msg(fmt_, args, kwargs))
775 tb = sys.exc_info()[2] if numba.core.config.FULL_TRACEBACKS else None
--> 776 reraise(type(newerr), newerr, tb)
777
778
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/utils.py in reraise(tp, value, tb)
78 if value.__traceback__ is not tb:
79 raise value.with_traceback(tb)
---> 80 raise value
81
82
LoweringError: Failed in nopython mode pipeline (step: nopython mode backend)
'cum_v.2'
File "<ipython-input-19-fa852184280b>", line 30:
def foo(pol_data, decr_vecs):
<source elided>
else:
cum_v = np.kron(cum_v, v)
^
[1] During: lowering "cum_v.4 = cum_v.2" at <ipython-input-19-fa852184280b> (30)
|
KeyError
|
def _fix_ssa_vars(blocks, varname, defmap):
"""Rewrite all uses to ``varname`` given the definition map"""
states = _make_states(blocks)
states["varname"] = varname
states["defmap"] = defmap
states["phimap"] = phimap = defaultdict(list)
states["cfg"] = cfg = compute_cfg_from_blocks(blocks)
states["df+"] = _iterated_domfronts(cfg)
newblocks = _run_block_rewrite(blocks, states, _FixSSAVars())
# check for unneeded phi nodes
_remove_unneeded_phis(phimap)
# insert phi nodes
for label, philist in phimap.items():
curblk = newblocks[label]
# Prepend PHI nodes to the block
curblk.body = philist + curblk.body
return newblocks
|
def _fix_ssa_vars(blocks, varname, defmap):
"""Rewrite all uses to ``varname`` given the definition map"""
states = _make_states(blocks)
states["varname"] = varname
states["defmap"] = defmap
states["phimap"] = phimap = defaultdict(list)
states["cfg"] = cfg = compute_cfg_from_blocks(blocks)
states["df+"] = _iterated_domfronts(cfg)
newblocks = _run_block_rewrite(blocks, states, _FixSSAVars())
# insert phi nodes
for label, philist in phimap.items():
curblk = newblocks[label]
# Prepend PHI nodes to the block
curblk.body = philist + curblk.body
return newblocks
|
https://github.com/numba/numba/issues/5482
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/errors.py in new_error_context(fmt_, *args, **kwargs)
768 try:
--> 769 yield
770 except NumbaError as e:
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_block(self, block)
266 loc=self.loc, errcls_=defaulterrcls):
--> 267 self.lower_inst(inst)
268 self.post_block(block)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_inst(self, inst)
363 ty = self.typeof(inst.target.name)
--> 364 val = self.lower_assign(ty, inst)
365 self.storevar(val, inst.target.name)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_assign(self, ty, inst)
540 elif isinstance(value, ir.Var):
--> 541 val = self.loadvar(value.name)
542 oty = self.typeof(value.name)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in loadvar(self, name)
1280 """
-> 1281 ptr = self.getvar(name)
1282 return self.builder.load(ptr)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in getvar(self, name)
1274 """
-> 1275 return self.varmap[name]
1276
KeyError: 'cum_v.2'
During handling of the above exception, another exception occurred:
LoweringError Traceback (most recent call last)
<ipython-input-19-fa852184280b> in <module>
31 return probs
32
---> 33 foo(pol_data, decr_vecs)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in _compile_for_args(self, *args, **kws)
418 e.patch_message('\n'.join((str(e).rstrip(), help_msg)))
419 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 420 raise e
421
422 def inspect_llvm(self, signature=None):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in _compile_for_args(self, *args, **kws)
351 argtypes.append(self.typeof_pyval(a))
352 try:
--> 353 return self.compile(tuple(argtypes))
354 except errors.ForceLiteralArg as e:
355 # Received request for compiler re-entry with the list of arguments
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in compile(self, sig)
792 self._cache_misses[sig] += 1
793 try:
--> 794 cres = self._compiler.compile(args, return_type)
795 except errors.ForceLiteralArg as e:
796 def folded(args, kws):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in compile(self, args, return_type)
75
76 def compile(self, args, return_type):
---> 77 status, retval = self._compile_cached(args, return_type)
78 if status:
79 return retval
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in _compile_cached(self, args, return_type)
89
90 try:
---> 91 retval = self._compile_core(args, return_type)
92 except errors.TypingError as e:
93 self._failed_cache[key] = e
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in _compile_core(self, args, return_type)
102
103 impl = self._get_implementation(args, {})
--> 104 cres = compiler.compile_extra(self.targetdescr.typing_context,
105 self.targetdescr.target_context,
106 impl,
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library, pipeline_class)
566 pipeline = pipeline_class(typingctx, targetctx, library,
567 args, return_type, flags, locals)
--> 568 return pipeline.compile_extra(func)
569
570
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in compile_extra(self, func)
337 self.state.lifted = ()
338 self.state.lifted_from = None
--> 339 return self._compile_bytecode()
340
341 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in _compile_bytecode(self)
399 """
400 assert self.state.func_ir is None
--> 401 return self._compile_core()
402
403 def _compile_ir(self):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in _compile_core(self)
379 self.state.status.fail_reason = e
380 if is_final_pipeline:
--> 381 raise e
382 else:
383 raise CompilerError("All available pipelines exhausted")
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in _compile_core(self)
370 res = None
371 try:
--> 372 pm.run(self.state)
373 if self.state.cr is not None:
374 break
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_machinery.py in run(self, state)
339 (self.pipeline_name, pass_desc)
340 patched_exception = self._patch_error(msg, e)
--> 341 raise patched_exception
342
343 def dependency_analysis(self):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_machinery.py in run(self, state)
330 pass_inst = _pass_registry.get(pss).pass_inst
331 if isinstance(pass_inst, CompilerPass):
--> 332 self._runPass(idx, pass_inst, state)
333 else:
334 raise BaseException("Legacy pass in use")
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_machinery.py in _runPass(self, index, pss, internal_state)
289 mutated |= check(pss.run_initialization, internal_state)
290 with SimpleTimer() as pass_time:
--> 291 mutated |= check(pss.run_pass, internal_state)
292 with SimpleTimer() as finalize_time:
293 mutated |= check(pss.run_finalizer, internal_state)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_machinery.py in check(func, compiler_state)
262
263 def check(func, compiler_state):
--> 264 mangled = func(compiler_state)
265 if mangled not in (True, False):
266 msg = ("CompilerPass implementations should return True/False. "
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/typed_passes.py in run_pass(self, state)
440
441 # TODO: Pull this out into the pipeline
--> 442 NativeLowering().run_pass(state)
443 lowered = state['cr']
444 signature = typing.signature(state.return_type, *state.args)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/typed_passes.py in run_pass(self, state)
368 lower = lowering.Lower(targetctx, library, fndesc, interp,
369 metadata=metadata)
--> 370 lower.lower()
371 if not flags.no_cpython_wrapper:
372 lower.create_cpython_wrapper(flags.release_gil)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower(self)
177 if self.generator_info is None:
178 self.genlower = None
--> 179 self.lower_normal_function(self.fndesc)
180 else:
181 self.genlower = self.GeneratorLower(self)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_normal_function(self, fndesc)
225 # Init argument values
226 self.extract_function_arguments()
--> 227 entry_block_tail = self.lower_function_body()
228
229 # Close tail of entry block
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_function_body(self)
251 bb = self.blkmap[offset]
252 self.builder.position_at_end(bb)
--> 253 self.lower_block(block)
254 self.post_lower()
255 return entry_block_tail
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_block(self, block)
265 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
266 loc=self.loc, errcls_=defaulterrcls):
--> 267 self.lower_inst(inst)
268 self.post_block(block)
269
~/anaconda3/envs/latest_versions38/lib/python3.8/contextlib.py in __exit__(self, type, value, traceback)
129 value = type()
130 try:
--> 131 self.gen.throw(type, value, traceback)
132 except StopIteration as exc:
133 # Suppress StopIteration *unless* it's the same exception that
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/errors.py in new_error_context(fmt_, *args, **kwargs)
774 newerr = errcls(e).add_context(_format_msg(fmt_, args, kwargs))
775 tb = sys.exc_info()[2] if numba.core.config.FULL_TRACEBACKS else None
--> 776 reraise(type(newerr), newerr, tb)
777
778
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/utils.py in reraise(tp, value, tb)
78 if value.__traceback__ is not tb:
79 raise value.with_traceback(tb)
---> 80 raise value
81
82
LoweringError: Failed in nopython mode pipeline (step: nopython mode backend)
'cum_v.2'
File "<ipython-input-19-fa852184280b>", line 30:
def foo(pol_data, decr_vecs):
<source elided>
else:
cum_v = np.kron(cum_v, v)
^
[1] During: lowering "cum_v.4 = cum_v.2" at <ipython-input-19-fa852184280b> (30)
|
KeyError
|
def _strip_phi_nodes(self, func_ir):
"""Strip Phi nodes from ``func_ir``
For each phi node, put incoming value to their respective incoming
basic-block at possibly the latest position (i.e. after the latest
assignment to the corresponding variable).
"""
exporters = defaultdict(list)
phis = set()
# Find all variables that needs to be exported
for label, block in func_ir.blocks.items():
for assign in block.find_insts(ir.Assign):
if isinstance(assign.value, ir.Expr):
if assign.value.op == "phi":
phis.add(assign)
phi = assign.value
for ib, iv in zip(phi.incoming_blocks, phi.incoming_values):
exporters[ib].append((assign.target, iv))
# Rewrite the blocks with the new exporting assignments
newblocks = {}
for label, block in func_ir.blocks.items():
newblk = copy(block)
newblocks[label] = newblk
# strip phis
newblk.body = [stmt for stmt in block.body if stmt not in phis]
# insert exporters
for target, rhs in exporters[label]:
# If RHS is undefined
if rhs is ir.UNDEFINED:
# Put in a NULL initializer
rhs = ir.Expr.null(loc=target.loc)
assign = ir.Assign(target=target, value=rhs, loc=target.loc)
# Insert at the earliest possible location; i.e. after the
# last assignment to rhs
assignments = [
stmt for stmt in newblk.find_insts(ir.Assign) if stmt.target == rhs
]
if assignments:
last_assignment = assignments[-1]
newblk.insert_after(assign, last_assignment)
else:
newblk.prepend(assign)
func_ir.blocks = newblocks
return func_ir
|
def _strip_phi_nodes(self, func_ir):
"""Strip Phi nodes from ``func_ir``
For each phi node, put incoming value to their respective incoming
basic-block at possibly the latest position (i.e. after the latest
assignment to the corresponding variable).
"""
exporters = defaultdict(list)
phis = set()
# Find all variables that needs to be exported
for label, block in func_ir.blocks.items():
for assign in block.find_insts(ir.Assign):
if isinstance(assign.value, ir.Expr):
if assign.value.op == "phi":
phis.add(assign)
phi = assign.value
for ib, iv in zip(phi.incoming_blocks, phi.incoming_values):
exporters[ib].append((assign.target, iv))
# Rewrite the blocks with the new exporting assignments
newblocks = {}
for label, block in func_ir.blocks.items():
newblk = copy(block)
newblocks[label] = newblk
# strip phis
newblk.body = [stmt for stmt in block.body if stmt not in phis]
# insert exporters
for target, rhs in exporters[label]:
if rhs is not ir.UNDEFINED:
assign = ir.Assign(target=target, value=rhs, loc=target.loc)
# Insert at the earliest possible location; i.e. after the
# last assignment to rhs
assignments = [
stmt for stmt in newblk.find_insts(ir.Assign) if stmt.target == rhs
]
if assignments:
last_assignment = assignments[-1]
newblk.insert_after(assign, last_assignment)
else:
newblk.prepend(assign)
func_ir.blocks = newblocks
return func_ir
|
https://github.com/numba/numba/issues/5482
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/errors.py in new_error_context(fmt_, *args, **kwargs)
768 try:
--> 769 yield
770 except NumbaError as e:
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_block(self, block)
266 loc=self.loc, errcls_=defaulterrcls):
--> 267 self.lower_inst(inst)
268 self.post_block(block)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_inst(self, inst)
363 ty = self.typeof(inst.target.name)
--> 364 val = self.lower_assign(ty, inst)
365 self.storevar(val, inst.target.name)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_assign(self, ty, inst)
540 elif isinstance(value, ir.Var):
--> 541 val = self.loadvar(value.name)
542 oty = self.typeof(value.name)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in loadvar(self, name)
1280 """
-> 1281 ptr = self.getvar(name)
1282 return self.builder.load(ptr)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in getvar(self, name)
1274 """
-> 1275 return self.varmap[name]
1276
KeyError: 'cum_v.2'
During handling of the above exception, another exception occurred:
LoweringError Traceback (most recent call last)
<ipython-input-19-fa852184280b> in <module>
31 return probs
32
---> 33 foo(pol_data, decr_vecs)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in _compile_for_args(self, *args, **kws)
418 e.patch_message('\n'.join((str(e).rstrip(), help_msg)))
419 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 420 raise e
421
422 def inspect_llvm(self, signature=None):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in _compile_for_args(self, *args, **kws)
351 argtypes.append(self.typeof_pyval(a))
352 try:
--> 353 return self.compile(tuple(argtypes))
354 except errors.ForceLiteralArg as e:
355 # Received request for compiler re-entry with the list of arguments
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in compile(self, sig)
792 self._cache_misses[sig] += 1
793 try:
--> 794 cres = self._compiler.compile(args, return_type)
795 except errors.ForceLiteralArg as e:
796 def folded(args, kws):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in compile(self, args, return_type)
75
76 def compile(self, args, return_type):
---> 77 status, retval = self._compile_cached(args, return_type)
78 if status:
79 return retval
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in _compile_cached(self, args, return_type)
89
90 try:
---> 91 retval = self._compile_core(args, return_type)
92 except errors.TypingError as e:
93 self._failed_cache[key] = e
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/dispatcher.py in _compile_core(self, args, return_type)
102
103 impl = self._get_implementation(args, {})
--> 104 cres = compiler.compile_extra(self.targetdescr.typing_context,
105 self.targetdescr.target_context,
106 impl,
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library, pipeline_class)
566 pipeline = pipeline_class(typingctx, targetctx, library,
567 args, return_type, flags, locals)
--> 568 return pipeline.compile_extra(func)
569
570
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in compile_extra(self, func)
337 self.state.lifted = ()
338 self.state.lifted_from = None
--> 339 return self._compile_bytecode()
340
341 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in _compile_bytecode(self)
399 """
400 assert self.state.func_ir is None
--> 401 return self._compile_core()
402
403 def _compile_ir(self):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in _compile_core(self)
379 self.state.status.fail_reason = e
380 if is_final_pipeline:
--> 381 raise e
382 else:
383 raise CompilerError("All available pipelines exhausted")
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler.py in _compile_core(self)
370 res = None
371 try:
--> 372 pm.run(self.state)
373 if self.state.cr is not None:
374 break
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_machinery.py in run(self, state)
339 (self.pipeline_name, pass_desc)
340 patched_exception = self._patch_error(msg, e)
--> 341 raise patched_exception
342
343 def dependency_analysis(self):
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_machinery.py in run(self, state)
330 pass_inst = _pass_registry.get(pss).pass_inst
331 if isinstance(pass_inst, CompilerPass):
--> 332 self._runPass(idx, pass_inst, state)
333 else:
334 raise BaseException("Legacy pass in use")
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_machinery.py in _runPass(self, index, pss, internal_state)
289 mutated |= check(pss.run_initialization, internal_state)
290 with SimpleTimer() as pass_time:
--> 291 mutated |= check(pss.run_pass, internal_state)
292 with SimpleTimer() as finalize_time:
293 mutated |= check(pss.run_finalizer, internal_state)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/compiler_machinery.py in check(func, compiler_state)
262
263 def check(func, compiler_state):
--> 264 mangled = func(compiler_state)
265 if mangled not in (True, False):
266 msg = ("CompilerPass implementations should return True/False. "
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/typed_passes.py in run_pass(self, state)
440
441 # TODO: Pull this out into the pipeline
--> 442 NativeLowering().run_pass(state)
443 lowered = state['cr']
444 signature = typing.signature(state.return_type, *state.args)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/typed_passes.py in run_pass(self, state)
368 lower = lowering.Lower(targetctx, library, fndesc, interp,
369 metadata=metadata)
--> 370 lower.lower()
371 if not flags.no_cpython_wrapper:
372 lower.create_cpython_wrapper(flags.release_gil)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower(self)
177 if self.generator_info is None:
178 self.genlower = None
--> 179 self.lower_normal_function(self.fndesc)
180 else:
181 self.genlower = self.GeneratorLower(self)
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_normal_function(self, fndesc)
225 # Init argument values
226 self.extract_function_arguments()
--> 227 entry_block_tail = self.lower_function_body()
228
229 # Close tail of entry block
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_function_body(self)
251 bb = self.blkmap[offset]
252 self.builder.position_at_end(bb)
--> 253 self.lower_block(block)
254 self.post_lower()
255 return entry_block_tail
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/lowering.py in lower_block(self, block)
265 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
266 loc=self.loc, errcls_=defaulterrcls):
--> 267 self.lower_inst(inst)
268 self.post_block(block)
269
~/anaconda3/envs/latest_versions38/lib/python3.8/contextlib.py in __exit__(self, type, value, traceback)
129 value = type()
130 try:
--> 131 self.gen.throw(type, value, traceback)
132 except StopIteration as exc:
133 # Suppress StopIteration *unless* it's the same exception that
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/errors.py in new_error_context(fmt_, *args, **kwargs)
774 newerr = errcls(e).add_context(_format_msg(fmt_, args, kwargs))
775 tb = sys.exc_info()[2] if numba.core.config.FULL_TRACEBACKS else None
--> 776 reraise(type(newerr), newerr, tb)
777
778
~/anaconda3/envs/latest_versions38/lib/python3.8/site-packages/numba/core/utils.py in reraise(tp, value, tb)
78 if value.__traceback__ is not tb:
79 raise value.with_traceback(tb)
---> 80 raise value
81
82
LoweringError: Failed in nopython mode pipeline (step: nopython mode backend)
'cum_v.2'
File "<ipython-input-19-fa852184280b>", line 30:
def foo(pol_data, decr_vecs):
<source elided>
else:
cum_v = np.kron(cum_v, v)
^
[1] During: lowering "cum_v.4 = cum_v.2" at <ipython-input-19-fa852184280b> (30)
|
KeyError
|
def apply_transform(self, state):
# compute new CFG
func_ir = state.func_ir
cfg = compute_cfg_from_blocks(func_ir.blocks)
# find loops
loops = cfg.loops()
# 0. Find the loops containing literal_unroll and store this
# information
literal_unroll_info = dict()
unroll_info = namedtuple("unroll_info", ["loop", "call", "arg", "getitem"])
def get_call_args(init_arg, want):
# Chases the assignment of a called value back through a specific
# call to a global function "want" and returns the arguments
# supplied to that function's call
some_call = get_definition(func_ir, init_arg)
if not isinstance(some_call, ir.Expr):
raise GuardException
if not some_call.op == "call":
raise GuardException
the_global = get_definition(func_ir, some_call.func)
if not isinstance(the_global, ir.Global):
raise GuardException
if the_global.value is not want:
raise GuardException
return some_call
for lbl, loop in loops.items():
# TODO: check the loop head has literal_unroll, if it does but
# does not conform to the following then raise
# scan loop header
iternexts = [_ for _ in func_ir.blocks[loop.header].find_exprs("iternext")]
if len(iternexts) != 1:
return False
for iternext in iternexts:
# Walk the canonicalised loop structure and check it
# Check loop form range(literal_unroll(container)))
phi = guard(get_definition, func_ir, iternext.value)
if phi is None:
continue
# check call global "range"
range_call = guard(get_call_args, phi.value, range)
if range_call is None:
continue
range_arg = range_call.args[0]
# check call global "len"
len_call = guard(get_call_args, range_arg, len)
if len_call is None:
continue
len_arg = len_call.args[0]
# check literal_unroll
literal_unroll_call = guard(get_definition, func_ir, len_arg)
if literal_unroll_call is None:
continue
if not isinstance(literal_unroll_call, ir.Expr):
continue
if literal_unroll_call.op != "call":
continue
literal_func = getattr(literal_unroll_call, "func", None)
if not literal_func:
continue
call_func = guard(get_definition, func_ir, literal_unroll_call.func)
if call_func is None:
continue
call_func = call_func.value
if call_func is literal_unroll:
assert len(literal_unroll_call.args) == 1
arg = literal_unroll_call.args[0]
typemap = state.typemap
resolved_arg = guard(get_definition, func_ir, arg, lhs_only=True)
ty = typemap[resolved_arg.name]
assert isinstance(ty, self._accepted_types)
# loop header is spelled ok, now make sure the body
# actually contains a getitem
# find a "getitem"
tuple_getitem = None
for lbl in loop.body:
blk = func_ir.blocks[lbl]
for stmt in blk.body:
if isinstance(stmt, ir.Assign):
if (
isinstance(stmt.value, ir.Expr)
and stmt.value.op == "getitem"
):
# check for something like a[i]
if stmt.value.value != arg:
# that failed, so check for the
# definition
dfn = guard(
get_definition, func_ir, stmt.value.value
)
if dfn is None:
continue
try:
args = getattr(dfn, "args", False)
except KeyError:
continue
if not args:
continue
if not args[0] == arg:
continue
target_ty = state.typemap[arg.name]
if not isinstance(target_ty, self._accepted_types):
continue
tuple_getitem = stmt
break
if tuple_getitem:
break
else:
continue # no getitem in this loop
ui = unroll_info(loop, literal_unroll_call, arg, tuple_getitem)
literal_unroll_info[lbl] = ui
if not literal_unroll_info:
return False
# 1. Validate loops, must not have any calls to literal_unroll
for test_lbl, test_loop in literal_unroll_info.items():
for ref_lbl, ref_loop in literal_unroll_info.items():
if test_lbl == ref_lbl: # comparing to self! skip
continue
if test_loop.loop.header in ref_loop.loop.body:
msg = "Nesting of literal_unroll is unsupported"
loc = func_ir.blocks[test_loop.loop.header].loc
raise errors.UnsupportedError(msg, loc)
# 2. Do the unroll, get a loop and process it!
lbl, info = literal_unroll_info.popitem()
self.unroll_loop(state, info)
# 3. Rebuild the state, the IR has taken a hammering
func_ir.blocks = simplify_CFG(func_ir.blocks)
post_proc = postproc.PostProcessor(func_ir)
post_proc.run()
if self._DEBUG:
print("-" * 80 + "END OF PASS, SIMPLIFY DONE")
func_ir.dump()
func_ir._definitions = build_definitions(func_ir.blocks)
return True
|
def apply_transform(self, state):
# compute new CFG
func_ir = state.func_ir
cfg = compute_cfg_from_blocks(func_ir.blocks)
# find loops
loops = cfg.loops()
# 0. Find the loops containing literal_unroll and store this
# information
literal_unroll_info = dict()
unroll_info = namedtuple("unroll_info", ["loop", "call", "arg", "getitem"])
def get_call_args(init_arg, want):
# Chases the assignment of a called value back through a specific
# call to a global function "want" and returns the arguments
# supplied to that function's call
some_call = get_definition(func_ir, init_arg)
if not isinstance(some_call, ir.Expr):
raise GuardException
if not some_call.op == "call":
raise GuardException
the_global = get_definition(func_ir, some_call.func)
if not isinstance(the_global, ir.Global):
raise GuardException
if the_global.value is not want:
raise GuardException
return some_call
for lbl, loop in loops.items():
# TODO: check the loop head has literal_unroll, if it does but
# does not conform to the following then raise
# scan loop header
iternexts = [_ for _ in func_ir.blocks[loop.header].find_exprs("iternext")]
if len(iternexts) != 1:
return False
for iternext in iternexts:
# Walk the canonicalised loop structure and check it
# Check loop form range(literal_unroll(container)))
phi = guard(get_definition, func_ir, iternext.value)
if phi is None:
continue
# check call global "range"
range_call = guard(get_call_args, phi.value, range)
if range_call is None:
continue
range_arg = range_call.args[0]
# check call global "len"
len_call = guard(get_call_args, range_arg, len)
if len_call is None:
continue
len_arg = len_call.args[0]
# check literal_unroll
literal_unroll_call = guard(get_definition, func_ir, len_arg)
if literal_unroll_call is None:
continue
if not isinstance(literal_unroll_call, ir.Expr):
continue
if literal_unroll_call.op != "call":
continue
literal_func = getattr(literal_unroll_call, "func", None)
if not literal_func:
continue
call_func = guard(get_definition, func_ir, literal_unroll_call.func)
if call_func is None:
continue
call_func = call_func.value
if call_func is literal_unroll:
assert len(literal_unroll_call.args) == 1
arg = literal_unroll_call.args[0]
typemap = state.typemap
resolved_arg = guard(get_definition, func_ir, arg, lhs_only=True)
ty = typemap[resolved_arg.name]
assert isinstance(ty, self._accepted_types)
# loop header is spelled ok, now make sure the body
# actually contains a getitem
# find a "getitem"
tuple_getitem = None
for lbl in loop.body:
blk = func_ir.blocks[lbl]
for stmt in blk.body:
if isinstance(stmt, ir.Assign):
if (
isinstance(stmt.value, ir.Expr)
and stmt.value.op == "getitem"
):
# check for something like a[i]
if stmt.value.value != arg:
# that failed, so check for the
# definition
dfn = guard(
get_definition, func_ir, stmt.value.value
)
if dfn is None:
continue
args = getattr(dfn, "args", False)
if not args:
continue
if not args[0] == arg:
continue
target_ty = state.typemap[arg.name]
if not isinstance(target_ty, self._accepted_types):
continue
tuple_getitem = stmt
break
if tuple_getitem:
break
else:
continue # no getitem in this loop
ui = unroll_info(loop, literal_unroll_call, arg, tuple_getitem)
literal_unroll_info[lbl] = ui
if not literal_unroll_info:
return False
# 1. Validate loops, must not have any calls to literal_unroll
for test_lbl, test_loop in literal_unroll_info.items():
for ref_lbl, ref_loop in literal_unroll_info.items():
if test_lbl == ref_lbl: # comparing to self! skip
continue
if test_loop.loop.header in ref_loop.loop.body:
msg = "Nesting of literal_unroll is unsupported"
loc = func_ir.blocks[test_loop.loop.header].loc
raise errors.UnsupportedError(msg, loc)
# 2. Do the unroll, get a loop and process it!
lbl, info = literal_unroll_info.popitem()
self.unroll_loop(state, info)
# 3. Rebuild the state, the IR has taken a hammering
func_ir.blocks = simplify_CFG(func_ir.blocks)
post_proc = postproc.PostProcessor(func_ir)
post_proc.run()
if self._DEBUG:
print("-" * 80 + "END OF PASS, SIMPLIFY DONE")
func_ir.dump()
func_ir._definitions = build_definitions(func_ir.blocks)
return True
|
https://github.com/numba/numba/issues/5477
|
ERROR: test_numba_literal_unroll_1 (__main__.TestSuite)
----------------------------------------------------------------------
Traceback (most recent call last):
File ".\example_test.py", line 40, in test_numba_literal_unroll_1
hpat_func(data, n)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\dispatcher.py", line 420, in _compile_for_args
raise e
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\dispatcher.py", line 353, in _compile_for_args
return self.compile(tuple(argtypes))
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\dispatcher.py", line 794, in compile
cres = self._compiler.compile(args, return_type)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\dispatcher.py", line 77, in compile
status, retval = self._compile_cached(args, return_type)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\dispatcher.py", line 91, in _compile_cached
retval = self._compile_core(args, return_type)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\dispatcher.py", line 104, in _compile_core
cres = compiler.compile_extra(self.targetdescr.typing_context,
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler.py", line 568, in compile_extra
return pipeline.compile_extra(func)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler.py", line 339, in compile_extra
return self._compile_bytecode()
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler.py", line 401, in _compile_bytecode
return self._compile_core()
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler.py", line 381, in _compile_core
raise e
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler.py", line 372, in _compile_core
pm.run(self.state)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler_machinery.py", line 341, in run
raise patched_exception
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler_machinery.py", line 332, in run
self._runPass(idx, pass_inst, state)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler_machinery.py", line 291, in _runPass
mutated |= check(pss.run_pass, internal_state)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler_machinery.py", line 264, in check
mangled = func(compiler_state)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\untyped_passes.py", line 1447, in run_pass
pm.run(state)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler_machinery.py", line 341, in run
raise patched_exception
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler_machinery.py", line 332, in run
self._runPass(idx, pass_inst, state)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler_machinery.py", line 291, in _runPass
mutated |= check(pss.run_pass, internal_state)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler_machinery.py", line 264, in check
mangled = func(compiler_state)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\untyped_passes.py", line 1224, in run_pass
stat = self.apply_transform(state)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\untyped_passes.py", line 1092, in apply_transform
self.unroll_loop(state, info)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\untyped_passes.py", line 1130, in unroll_loop
args = getattr(dfn, 'args', False)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\ir.py", line 382, in __getattr__
return self._kws[name]
KeyError: 'Failed in nopython mode pipeline (step: handles literal_unroll)\n"Fai
led in literal_unroll_subpipeline mode pipeline (step: performs mixed container
unroll)\\n\'args\'"'
----------------------------------------------------------------------
Ran 1 test in 0.418s
FAILED (errors=1)
|
KeyError
|
def unroll_loop(self, state, loop_info):
# The general idea here is to:
# 1. Find *a* getitem that conforms to the literal_unroll semantic,
# i.e. one that is targeting a tuple with a loop induced index
# 2. Compute a structure from the tuple that describes which
# iterations of a loop will have which type
# 3. Generate a switch table in IR form for the structure in 2
# 4. Switch out getitems for the tuple for a `typed_getitem`
# 5. Inject switch table as replacement loop body
# 6. Patch up
func_ir = state.func_ir
getitem_target = loop_info.arg
target_ty = state.typemap[getitem_target.name]
assert isinstance(target_ty, self._accepted_types)
# 1. find a "getitem" that conforms
tuple_getitem = []
for lbl in loop_info.loop.body:
blk = func_ir.blocks[lbl]
for stmt in blk.body:
if isinstance(stmt, ir.Assign):
if isinstance(stmt.value, ir.Expr) and stmt.value.op == "getitem":
# try a couple of spellings... a[i] and ref(a)[i]
if stmt.value.value != getitem_target:
dfn = func_ir.get_definition(stmt.value.value)
try:
args = getattr(dfn, "args", False)
except KeyError:
continue
if not args:
continue
if not args[0] == getitem_target:
continue
target_ty = state.typemap[getitem_target.name]
if not isinstance(target_ty, self._accepted_types):
continue
tuple_getitem.append(stmt)
if not tuple_getitem:
msg = (
"Loop unrolling analysis has failed, there's no getitem "
"in loop body that conforms to literal_unroll "
"requirements."
)
LOC = func_ir.blocks[loop_info.loop.header].loc
raise errors.CompilerError(msg, LOC)
# 2. get switch data
switch_data = self.analyse_tuple(target_ty)
# 3. generate switch IR
index = func_ir._definitions[tuple_getitem[0].value.index.name][0]
branches = self.gen_switch(switch_data, index)
# 4. swap getitems for a typed_getitem, these are actually just
# placeholders at this point. When the loop is duplicated they can
# be swapped for a typed_getitem of the correct type or if the item
# is literal it can be shoved straight into the duplicated loop body
for item in tuple_getitem:
old = item.value
new = ir.Expr.typed_getitem(old.value, types.void, old.index, old.loc)
item.value = new
# 5. Inject switch table
# Find the actual loop without the header (that won't get replaced)
# and derive some new IR for this set of blocks
this_loop = loop_info.loop
this_loop_body = this_loop.body - set([this_loop.header])
loop_blocks = {x: func_ir.blocks[x] for x in this_loop_body}
new_ir = func_ir.derive(loop_blocks)
# Work out what is live on entry and exit so as to prevent
# replacement (defined vars can escape, used vars live at the header
# need to remain as-is so their references are correct, they can
# also escape).
usedefs = compute_use_defs(func_ir.blocks)
idx = this_loop.header
keep = set()
keep |= usedefs.usemap[idx] | usedefs.defmap[idx]
keep |= func_ir.variable_lifetime.livemap[idx]
dont_replace = [x for x in (keep)]
# compute the unrolled body
unrolled_body = self.inject_loop_body(
branches, new_ir, max(func_ir.blocks.keys()) + 1, dont_replace, switch_data
)
# 6. Patch in the unrolled body and fix up
blks = state.func_ir.blocks
orig_lbl = tuple(this_loop_body)
replace, *delete = orig_lbl
unroll, header_block = unrolled_body, this_loop.header
unroll_lbl = [x for x in sorted(unroll.blocks.keys())]
blks[replace] = unroll.blocks[unroll_lbl[0]]
[blks.pop(d) for d in delete]
for k in unroll_lbl[1:]:
blks[k] = unroll.blocks[k]
# stitch up the loop predicate true -> new loop body jump
blks[header_block].body[-1].truebr = replace
|
def unroll_loop(self, state, loop_info):
# The general idea here is to:
# 1. Find *a* getitem that conforms to the literal_unroll semantic,
# i.e. one that is targeting a tuple with a loop induced index
# 2. Compute a structure from the tuple that describes which
# iterations of a loop will have which type
# 3. Generate a switch table in IR form for the structure in 2
# 4. Switch out getitems for the tuple for a `typed_getitem`
# 5. Inject switch table as replacement loop body
# 6. Patch up
func_ir = state.func_ir
getitem_target = loop_info.arg
target_ty = state.typemap[getitem_target.name]
assert isinstance(target_ty, self._accepted_types)
# 1. find a "getitem" that conforms
tuple_getitem = []
for lbl in loop_info.loop.body:
blk = func_ir.blocks[lbl]
for stmt in blk.body:
if isinstance(stmt, ir.Assign):
if isinstance(stmt.value, ir.Expr) and stmt.value.op == "getitem":
# try a couple of spellings... a[i] and ref(a)[i]
if stmt.value.value != getitem_target:
dfn = func_ir.get_definition(stmt.value.value)
args = getattr(dfn, "args", False)
if not args:
continue
if not args[0] == getitem_target:
continue
target_ty = state.typemap[getitem_target.name]
if not isinstance(target_ty, self._accepted_types):
continue
tuple_getitem.append(stmt)
if not tuple_getitem:
msg = (
"Loop unrolling analysis has failed, there's no getitem "
"in loop body that conforms to literal_unroll "
"requirements."
)
LOC = func_ir.blocks[loop_info.loop.header].loc
raise errors.CompilerError(msg, LOC)
# 2. get switch data
switch_data = self.analyse_tuple(target_ty)
# 3. generate switch IR
index = func_ir._definitions[tuple_getitem[0].value.index.name][0]
branches = self.gen_switch(switch_data, index)
# 4. swap getitems for a typed_getitem, these are actually just
# placeholders at this point. When the loop is duplicated they can
# be swapped for a typed_getitem of the correct type or if the item
# is literal it can be shoved straight into the duplicated loop body
for item in tuple_getitem:
old = item.value
new = ir.Expr.typed_getitem(old.value, types.void, old.index, old.loc)
item.value = new
# 5. Inject switch table
# Find the actual loop without the header (that won't get replaced)
# and derive some new IR for this set of blocks
this_loop = loop_info.loop
this_loop_body = this_loop.body - set([this_loop.header])
loop_blocks = {x: func_ir.blocks[x] for x in this_loop_body}
new_ir = func_ir.derive(loop_blocks)
# Work out what is live on entry and exit so as to prevent
# replacement (defined vars can escape, used vars live at the header
# need to remain as-is so their references are correct, they can
# also escape).
usedefs = compute_use_defs(func_ir.blocks)
idx = this_loop.header
keep = set()
keep |= usedefs.usemap[idx] | usedefs.defmap[idx]
keep |= func_ir.variable_lifetime.livemap[idx]
dont_replace = [x for x in (keep)]
# compute the unrolled body
unrolled_body = self.inject_loop_body(
branches, new_ir, max(func_ir.blocks.keys()) + 1, dont_replace, switch_data
)
# 6. Patch in the unrolled body and fix up
blks = state.func_ir.blocks
orig_lbl = tuple(this_loop_body)
replace, *delete = orig_lbl
unroll, header_block = unrolled_body, this_loop.header
unroll_lbl = [x for x in sorted(unroll.blocks.keys())]
blks[replace] = unroll.blocks[unroll_lbl[0]]
[blks.pop(d) for d in delete]
for k in unroll_lbl[1:]:
blks[k] = unroll.blocks[k]
# stitch up the loop predicate true -> new loop body jump
blks[header_block].body[-1].truebr = replace
|
https://github.com/numba/numba/issues/5477
|
ERROR: test_numba_literal_unroll_1 (__main__.TestSuite)
----------------------------------------------------------------------
Traceback (most recent call last):
File ".\example_test.py", line 40, in test_numba_literal_unroll_1
hpat_func(data, n)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\dispatcher.py", line 420, in _compile_for_args
raise e
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\dispatcher.py", line 353, in _compile_for_args
return self.compile(tuple(argtypes))
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\dispatcher.py", line 794, in compile
cres = self._compiler.compile(args, return_type)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\dispatcher.py", line 77, in compile
status, retval = self._compile_cached(args, return_type)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\dispatcher.py", line 91, in _compile_cached
retval = self._compile_core(args, return_type)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\dispatcher.py", line 104, in _compile_core
cres = compiler.compile_extra(self.targetdescr.typing_context,
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler.py", line 568, in compile_extra
return pipeline.compile_extra(func)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler.py", line 339, in compile_extra
return self._compile_bytecode()
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler.py", line 401, in _compile_bytecode
return self._compile_core()
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler.py", line 381, in _compile_core
raise e
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler.py", line 372, in _compile_core
pm.run(self.state)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler_machinery.py", line 341, in run
raise patched_exception
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler_machinery.py", line 332, in run
self._runPass(idx, pass_inst, state)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler_machinery.py", line 291, in _runPass
mutated |= check(pss.run_pass, internal_state)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler_machinery.py", line 264, in check
mangled = func(compiler_state)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\untyped_passes.py", line 1447, in run_pass
pm.run(state)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler_machinery.py", line 341, in run
raise patched_exception
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler_machinery.py", line 332, in run
self._runPass(idx, pass_inst, state)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler_machinery.py", line 291, in _runPass
mutated |= check(pss.run_pass, internal_state)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\compiler_machinery.py", line 264, in check
mangled = func(compiler_state)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\untyped_passes.py", line 1224, in run_pass
stat = self.apply_transform(state)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\untyped_passes.py", line 1092, in apply_transform
self.unroll_loop(state, info)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\untyped_passes.py", line 1130, in unroll_loop
args = getattr(dfn, 'args', False)
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\numba_master\numba\nu
mba\core\ir.py", line 382, in __getattr__
return self._kws[name]
KeyError: 'Failed in nopython mode pipeline (step: handles literal_unroll)\n"Fai
led in literal_unroll_subpipeline mode pipeline (step: performs mixed container
unroll)\\n\'args\'"'
----------------------------------------------------------------------
Ran 1 test in 0.418s
FAILED (errors=1)
|
KeyError
|
def new_list(item, allocated=DEFAULT_ALLOCATED):
"""Construct a new list. (Not implemented in the interpreter yet)
Parameters
----------
item: TypeRef
Item type of the new list.
allocated: int
number of items to pre-allocate
"""
# With JIT disabled, ignore all arguments and return a Python list.
return list()
|
def new_list(item, allocated=0):
"""Construct a new list. (Not implemented in the interpreter yet)
Parameters
----------
item: TypeRef
Item type of the new list.
allocated: int
number of items to pre-allocate
"""
# With JIT disabled, ignore all arguments and return a Python list.
return list()
|
https://github.com/numba/numba/issues/5355
|
======================================================================
ERROR: test_numba_typed_list_allocated (sdc.tests.test_series.TestSeries)
----------------------------------------------------------------------
Traceback (most recent call last):
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\hpat\sdc\tests\test_series.py
", line 6817, in test_numba_typed_list_allocated
print(jitted())
File "c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\numba\disp
atcher.py", line 401, in _compile_for_args
error_rewrite(e, 'typing')
File "c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\numba\disp
atcher.py", line 344, in error_rewrite
reraise(type(e), e, None)
File "c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\numba\six.
py", line 668, in reraise
raise value.with_traceback(tb)
numba.errors.TypingError: Failed in nopython mode pipeline (step: nopython frontend)
Invalid use of Function(<function typedlist_empty at 0x0000015A9C481550>) with argument(
s) of type(s): (typeref[<class 'numba.types.containers.ListType'>], class(int64), int64)
* parameterized
In definition 0:
TypeError: typedlist_empty() takes 2 positional arguments but 3 were given
raised from c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\nu
mba\typing\templates.py:539
In definition 1:
TypeError: typedlist_empty() takes 2 positional arguments but 3 were given
raised from c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\nu
mba\typing\templates.py:539
This error is usually caused by passing an argument of a type that is unsupported by the
named function.
[1] During: resolving callee type: BoundFunction((<class 'numba.types.abstract.TypeRef'>
, 'empty_list') for typeref[<class 'numba.types.containers.ListType'>])
[2] During: typing of call at C:\Users\akozlov\AppData\Local\Continuum\anaconda3\hpat\sd
c\tests\test_series.py (6813)
File "sdc\tests\test_series.py", line 6813:
def test_impl():
<source elided>
# problem in overload of empty_list in typedlist.py
alloc_list = List.empty_list(types.int64, 10)
^
----------------------------------------------------------------------
Ran 1 test in 0.075s
FAILED (errors=1)
|
numba.errors.TypingError
|
def impl_new_list(item, allocated=DEFAULT_ALLOCATED):
"""Creates a new list.
Parameters
----------
item: Numba type
type of the list item.
allocated: int
number of items to pre-allocate
"""
if not isinstance(item, Type):
raise TypeError("expecting *item* to be a numba Type")
itemty = item
def imp(item, allocated=DEFAULT_ALLOCATED):
if allocated < 0:
raise RuntimeError("expecting *allocated* to be >= 0")
lp = _list_new(itemty, allocated)
_list_set_method_table(lp, itemty)
l = _make_list(itemty, lp)
return l
return imp
|
def impl_new_list(item, allocated=0):
"""Creates a new list.
Parameters
----------
item: Numba type
type of the list item.
allocated: int
number of items to pre-allocate
"""
if not isinstance(item, Type):
raise TypeError("expecting *item* to be a numba Type")
itemty = item
def imp(item, allocated=0):
if allocated < 0:
raise RuntimeError("expecting *allocated* to be >= 0")
lp = _list_new(itemty, allocated)
_list_set_method_table(lp, itemty)
l = _make_list(itemty, lp)
return l
return imp
|
https://github.com/numba/numba/issues/5355
|
======================================================================
ERROR: test_numba_typed_list_allocated (sdc.tests.test_series.TestSeries)
----------------------------------------------------------------------
Traceback (most recent call last):
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\hpat\sdc\tests\test_series.py
", line 6817, in test_numba_typed_list_allocated
print(jitted())
File "c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\numba\disp
atcher.py", line 401, in _compile_for_args
error_rewrite(e, 'typing')
File "c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\numba\disp
atcher.py", line 344, in error_rewrite
reraise(type(e), e, None)
File "c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\numba\six.
py", line 668, in reraise
raise value.with_traceback(tb)
numba.errors.TypingError: Failed in nopython mode pipeline (step: nopython frontend)
Invalid use of Function(<function typedlist_empty at 0x0000015A9C481550>) with argument(
s) of type(s): (typeref[<class 'numba.types.containers.ListType'>], class(int64), int64)
* parameterized
In definition 0:
TypeError: typedlist_empty() takes 2 positional arguments but 3 were given
raised from c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\nu
mba\typing\templates.py:539
In definition 1:
TypeError: typedlist_empty() takes 2 positional arguments but 3 were given
raised from c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\nu
mba\typing\templates.py:539
This error is usually caused by passing an argument of a type that is unsupported by the
named function.
[1] During: resolving callee type: BoundFunction((<class 'numba.types.abstract.TypeRef'>
, 'empty_list') for typeref[<class 'numba.types.containers.ListType'>])
[2] During: typing of call at C:\Users\akozlov\AppData\Local\Continuum\anaconda3\hpat\sd
c\tests\test_series.py (6813)
File "sdc\tests\test_series.py", line 6813:
def test_impl():
<source elided>
# problem in overload of empty_list in typedlist.py
alloc_list = List.empty_list(types.int64, 10)
^
----------------------------------------------------------------------
Ran 1 test in 0.075s
FAILED (errors=1)
|
numba.errors.TypingError
|
def imp(item, allocated=DEFAULT_ALLOCATED):
if allocated < 0:
raise RuntimeError("expecting *allocated* to be >= 0")
lp = _list_new(itemty, allocated)
_list_set_method_table(lp, itemty)
l = _make_list(itemty, lp)
return l
|
def imp(item, allocated=0):
if allocated < 0:
raise RuntimeError("expecting *allocated* to be >= 0")
lp = _list_new(itemty, allocated)
_list_set_method_table(lp, itemty)
l = _make_list(itemty, lp)
return l
|
https://github.com/numba/numba/issues/5355
|
======================================================================
ERROR: test_numba_typed_list_allocated (sdc.tests.test_series.TestSeries)
----------------------------------------------------------------------
Traceback (most recent call last):
File "C:\Users\akozlov\AppData\Local\Continuum\anaconda3\hpat\sdc\tests\test_series.py
", line 6817, in test_numba_typed_list_allocated
print(jitted())
File "c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\numba\disp
atcher.py", line 401, in _compile_for_args
error_rewrite(e, 'typing')
File "c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\numba\disp
atcher.py", line 344, in error_rewrite
reraise(type(e), e, None)
File "c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\numba\six.
py", line 668, in reraise
raise value.with_traceback(tb)
numba.errors.TypingError: Failed in nopython mode pipeline (step: nopython frontend)
Invalid use of Function(<function typedlist_empty at 0x0000015A9C481550>) with argument(
s) of type(s): (typeref[<class 'numba.types.containers.ListType'>], class(int64), int64)
* parameterized
In definition 0:
TypeError: typedlist_empty() takes 2 positional arguments but 3 were given
raised from c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\nu
mba\typing\templates.py:539
In definition 1:
TypeError: typedlist_empty() takes 2 positional arguments but 3 were given
raised from c:\users\akozlov\appdata\local\continuum\anaconda3\numba_master\numba\nu
mba\typing\templates.py:539
This error is usually caused by passing an argument of a type that is unsupported by the
named function.
[1] During: resolving callee type: BoundFunction((<class 'numba.types.abstract.TypeRef'>
, 'empty_list') for typeref[<class 'numba.types.containers.ListType'>])
[2] During: typing of call at C:\Users\akozlov\AppData\Local\Continuum\anaconda3\hpat\sd
c\tests\test_series.py (6813)
File "sdc\tests\test_series.py", line 6813:
def test_impl():
<source elided>
# problem in overload of empty_list in typedlist.py
alloc_list = List.empty_list(types.int64, 10)
^
----------------------------------------------------------------------
Ran 1 test in 0.075s
FAILED (errors=1)
|
numba.errors.TypingError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.