after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def pull_file(self, remote_source, local_dir):
"""Transport file on the remote side to a local directory
Args:
- remote_source (string): remote_source
- local_dir (string): Local directory to copy to
Returns:
- str: Local path to file
Raises:
- FileExists : Name collision at local directory.
- FileCopyException : FileCopy failed.
"""
local_dest = local_dir + "/" + os.path.basename(remote_source)
try:
os.makedirs(local_dir)
except OSError as e:
if e.errno != errno.EEXIST:
logger.exception("Failed to create local_dir: {0}".format(local_dir))
raise BadScriptPath(e, self.hostname)
try:
self._valid_sftp_client().get(remote_source, local_dest)
except Exception as e:
logger.exception("File pull failed")
raise FileCopyException(e, self.hostname)
return local_dest
|
def pull_file(self, remote_source, local_dir):
"""Transport file on the remote side to a local directory
Args:
- remote_source (string): remote_source
- local_dir (string): Local directory to copy to
Returns:
- str: Local path to file
Raises:
- FileExists : Name collision at local directory.
- FileCopyException : FileCopy failed.
"""
local_dest = local_dir + "/" + os.path.basename(remote_source)
try:
os.makedirs(local_dir)
except OSError as e:
if e.errno != errno.EEXIST:
logger.exception("Failed to create local_dir: {0}".format(local_dir))
raise BadScriptPath(e, self.hostname)
# Easier to check this than to waste time trying to pull file and
# realize there's a problem.
if os.path.exists(local_dest):
logger.exception(
"Remote file copy will overwrite a local file:{0}".format(local_dest)
)
raise FileExists(None, self.hostname, filename=local_dest)
try:
self._valid_sftp_client().get(remote_source, local_dest)
except Exception as e:
logger.exception("File pull failed")
raise FileCopyException(e, self.hostname)
return local_dest
|
https://github.com/Parsl/parsl/issues/1785
|
2020-06-28 02:02:27 parsl.channels.ssh.ssh:215 [ERROR] Remote file copy will overwrite a local file:/home/users/nus/csipav/scratch/Parsl-workflows/GDC-Downloader-Output/remote/COAD_0/runinfo/001/submit_scripts/parsl.localprovider.1593280864.8464878.sh.ec
NoneType: None
2020-06-28 02:02:27 parsl.dataflow.flow_control:114 [ERROR] Flow control callback threw an exception - logging and proceeding anyway
Traceback (most recent call last):
File "/home/akila/Documents/NUS_CSI/Parsl/parsl.git/parsl/dataflow/flow_control.py", line 112, in make_callback
self.callback(tasks=self._event_buffer, kind=kind)
File "/home/akila/Documents/NUS_CSI/Parsl/parsl.git/parsl/dataflow/task_status_poller.py", line 63, in poll
self._update_state()
File "/home/akila/Documents/NUS_CSI/Parsl/parsl.git/parsl/dataflow/task_status_poller.py", line 69, in _update_state
item.poll(now)
File "/home/akila/Documents/NUS_CSI/Parsl/parsl.git/parsl/dataflow/task_status_poller.py", line 27, in poll
self._status = self._executor.status()
File "/home/akila/Documents/NUS_CSI/Parsl/parsl.git/parsl/executors/status_handling.py", line 59, in status
status = self._make_status_dict(job_ids, self._provider.status(job_ids))
File "/home/akila/Documents/NUS_CSI/Parsl/parsl.git/parsl/providers/local/local.py", line 87, in status
str_ec = self._read_job_file(script_path, '.ec').strip()
File "/home/akila/Documents/NUS_CSI/Parsl/parsl.git/parsl/providers/local/local.py", line 142, in _read_job_file
path = self._job_file_path(script_path, suffix)
File "/home/akila/Documents/NUS_CSI/Parsl/parsl.git/parsl/providers/local/local.py", line 138, in _job_file_path
path = self.channel.pull_file(path, self.script_dir)
File "/home/akila/Documents/NUS_CSI/Parsl/parsl.git/parsl/channels/ssh/ssh.py", line 216, in pull_file
raise FileExists(None, self.hostname, filename=local_dest)
parsl.channels.errors.FileExists: Hostname:nus.nscc.sg, Reason:File name collision in channel transport phase:/home/users/nus/csipav/scratch/Parsl-workflows/GDC-Downloader-Output/remote/COAD_0/runinfo/001/submit_scripts/parsl.localprovider.1593280864.8464878.sh.ec
|
parsl.channels.errors.FileExists
|
def status(self, job_ids):
"""Get the status of a list of jobs identified by their ids.
Parameters
----------
job_ids : list of str
Identifiers for the jobs.
Returns
-------
list of int
The status codes of the requsted jobs.
"""
all_states = []
status = self.client.describe_instances(InstanceIds=list(job_ids))
for r in status["Reservations"]:
for i in r["Instances"]:
instance_id = i["InstanceId"]
instance_state = translate_table.get(i["State"]["Name"], JobState.UNKNOWN)
instance_status = JobStatus(instance_state)
self.resources[instance_id]["status"] = instance_status
all_states.extend([instance_status])
return all_states
|
def status(self, job_ids):
"""Get the status of a list of jobs identified by their ids.
Parameters
----------
job_ids : list of str
Identifiers for the jobs.
Returns
-------
list of int
The status codes of the requsted jobs.
"""
all_states = []
status = self.client.describe_instances(InstanceIds=list(job_ids))
for r in status["Reservations"]:
for i in r["Instances"]:
instance_id = i["InstanceId"]
instance_state = translate_table.get(i["State"]["Name"], JobState.UNKNOWN)
self.resources[instance_id]["status"] = JobStatus(instance_state)
all_states.extend([instance_state])
return all_states
|
https://github.com/Parsl/parsl/issues/1733
|
2020-06-01 13:40:16.122 parsl.dataflow.task_status_poller:62 [DEBUG] Polling
2020-06-01 13:40:16.126 parsl.dataflow.flow_control:114 [ERROR] Flow control callback threw an exception - logging and proceeding anyway
Traceback (most recent call last):
File "/home/benc/parsl/src/parsl/parsl/dataflow/flow_control.py", line 112, in make_callback
self.callback(tasks=self._event_buffer, kind=kind)
File "/home/benc/parsl/src/parsl/parsl/dataflow/task_status_poller.py", line 64, in poll
self._strategy.strategize(self._poll_items, tasks)
File "/home/benc/parsl/src/parsl/parsl/dataflow/strategy.py", line 198, in _strategy_simple
running = sum([1 for x in status.values() if x.state == JobState.RUNNING])
File "/home/benc/parsl/src/parsl/parsl/dataflow/strategy.py", line 198, in <listcomp>
running = sum([1 for x in status.values() if x.state == JobState.RUNNING])
AttributeError: 'JobState' object has no attribute 'state'
|
AttributeError
|
def status(self, job_ids):
"""Get the status of a list of jobs identified by the job identifiers
returned from the submit request.
Args:
- job_ids (list) : A list of job identifiers
Returns:
- A list of JobStatus objects corresponding to each job_id in the job_ids list.
Raises:
- ExecutionProviderException or its subclasses
"""
statuses = []
for job_id in job_ids:
instance = (
self.client.instances()
.get(instance=job_id, project=self.project_id, zone=self.zone)
.execute()
)
job_status = JobStatus(translate_table[instance["status"]])
self.resources[job_id]["status"] = job_status
statuses.append(job_status)
return statuses
|
def status(self, job_ids):
"""Get the status of a list of jobs identified by the job identifiers
returned from the submit request.
Args:
- job_ids (list) : A list of job identifiers
Returns:
- A list of JobStatus objects corresponding to each job_id in the job_ids list.
Raises:
- ExecutionProviderException or its subclasses
"""
statuses = []
for job_id in job_ids:
instance = (
self.client.instances()
.get(instance=job_id, project=self.project_id, zone=self.zone)
.execute()
)
self.resources[job_id]["status"] = JobStatus(
translate_table[instance["status"]]
)
statuses.append(translate_table[instance["status"]])
return statuses
|
https://github.com/Parsl/parsl/issues/1733
|
2020-06-01 13:40:16.122 parsl.dataflow.task_status_poller:62 [DEBUG] Polling
2020-06-01 13:40:16.126 parsl.dataflow.flow_control:114 [ERROR] Flow control callback threw an exception - logging and proceeding anyway
Traceback (most recent call last):
File "/home/benc/parsl/src/parsl/parsl/dataflow/flow_control.py", line 112, in make_callback
self.callback(tasks=self._event_buffer, kind=kind)
File "/home/benc/parsl/src/parsl/parsl/dataflow/task_status_poller.py", line 64, in poll
self._strategy.strategize(self._poll_items, tasks)
File "/home/benc/parsl/src/parsl/parsl/dataflow/strategy.py", line 198, in _strategy_simple
running = sum([1 for x in status.values() if x.state == JobState.RUNNING])
File "/home/benc/parsl/src/parsl/parsl/dataflow/strategy.py", line 198, in <listcomp>
running = sum([1 for x in status.values() if x.state == JobState.RUNNING])
AttributeError: 'JobState' object has no attribute 'state'
|
AttributeError
|
def _status(self):
"""Get the status of a list of jobs identified by the job identifiers
returned from the submit request.
Returns:
- A list of JobStatus objects corresponding to each job_id in the job_ids list.
Raises:
- ExecutionProviderException or its subclasses
"""
cmd = "qstat"
retcode, stdout, stderr = self.execute_wait(cmd)
# Execute_wait failed. Do no update
if retcode != 0:
return
jobs_missing = list(self.resources.keys())
for line in stdout.split("\n"):
parts = line.split()
if (
parts
and parts[0].lower().lower() != "job-id"
and not parts[0].startswith("----")
):
job_id = parts[0]
state = translate_table.get(parts[4].lower(), JobState.UNKNOWN)
if job_id in self.resources:
self.resources[job_id]["status"] = JobStatus(state)
jobs_missing.remove(job_id)
# Filling in missing blanks for jobs that might have gone missing
# we might lose some information about why the jobs failed.
for missing_job in jobs_missing:
self.resources[missing_job]["status"] = JobStatus(JobState.COMPLETED)
|
def _status(self):
"""Get the status of a list of jobs identified by the job identifiers
returned from the submit request.
Returns:
- A list of JobStatus objects corresponding to each job_id in the job_ids list.
Raises:
- ExecutionProviderException or its subclasses
"""
cmd = "qstat"
retcode, stdout, stderr = self.execute_wait(cmd)
# Execute_wait failed. Do no update
if retcode != 0:
return
jobs_missing = list(self.resources.keys())
for line in stdout.split("\n"):
parts = line.split()
if (
parts
and parts[0].lower().lower() != "job-id"
and not parts[0].startswith("----")
):
job_id = parts[0]
status = translate_table.get(parts[4].lower(), JobState.UNKNOWN)
if job_id in self.resources:
self.resources[job_id]["status"] = status
jobs_missing.remove(job_id)
# Filling in missing blanks for jobs that might have gone missing
# we might lose some information about why the jobs failed.
for missing_job in jobs_missing:
self.resources[missing_job]["status"] = JobStatus(JobState.COMPLETED)
|
https://github.com/Parsl/parsl/issues/1733
|
2020-06-01 13:40:16.122 parsl.dataflow.task_status_poller:62 [DEBUG] Polling
2020-06-01 13:40:16.126 parsl.dataflow.flow_control:114 [ERROR] Flow control callback threw an exception - logging and proceeding anyway
Traceback (most recent call last):
File "/home/benc/parsl/src/parsl/parsl/dataflow/flow_control.py", line 112, in make_callback
self.callback(tasks=self._event_buffer, kind=kind)
File "/home/benc/parsl/src/parsl/parsl/dataflow/task_status_poller.py", line 64, in poll
self._strategy.strategize(self._poll_items, tasks)
File "/home/benc/parsl/src/parsl/parsl/dataflow/strategy.py", line 198, in _strategy_simple
running = sum([1 for x in status.values() if x.state == JobState.RUNNING])
File "/home/benc/parsl/src/parsl/parsl/dataflow/strategy.py", line 198, in <listcomp>
running = sum([1 for x in status.values() if x.state == JobState.RUNNING])
AttributeError: 'JobState' object has no attribute 'state'
|
AttributeError
|
def status(self, job_ids) -> List[JobStatus]:
"""Get the status of a list of jobs identified by their ids.
Parameters
----------
job_ids : list of str
Identifiers for the jobs.
Returns
-------
list of int
The status codes of the requsted jobs.
"""
statuses = []
logger.info("List VMs in resource group")
for job_id in job_ids:
try:
vm = self.compute_client.virtual_machines.get(
self.group_name, job_id, expand="instanceView"
)
status = vm.instance_view.statuses[1].display_status
statuses.append(JobStatus(translate_table.get(status, JobState.UNKNOWN)))
# This only happens when it is in ProvisionState/Pending
except IndexError:
statuses.append(JobStatus(JobState.PENDING))
return statuses
|
def status(self, job_ids):
"""Get the status of a list of jobs identified by their ids.
Parameters
----------
job_ids : list of str
Identifiers for the jobs.
Returns
-------
list of int
The status codes of the requsted jobs.
"""
statuses = []
logger.info("List VMs in resource group")
for job_id in job_ids:
try:
vm = self.compute_client.virtual_machines.get(
self.group_name, job_id, expand="instanceView"
)
status = vm.instance_view.statuses[1].display_status
statuses.append(JobStatus(translate_table.get(status, JobState.UNKNOWN)))
# This only happens when it is in ProvisionState/Pending
except IndexError:
statuses.append(JobStatus(JobState.PENDING))
return statuses
|
https://github.com/Parsl/parsl/issues/1733
|
2020-06-01 13:40:16.122 parsl.dataflow.task_status_poller:62 [DEBUG] Polling
2020-06-01 13:40:16.126 parsl.dataflow.flow_control:114 [ERROR] Flow control callback threw an exception - logging and proceeding anyway
Traceback (most recent call last):
File "/home/benc/parsl/src/parsl/parsl/dataflow/flow_control.py", line 112, in make_callback
self.callback(tasks=self._event_buffer, kind=kind)
File "/home/benc/parsl/src/parsl/parsl/dataflow/task_status_poller.py", line 64, in poll
self._strategy.strategize(self._poll_items, tasks)
File "/home/benc/parsl/src/parsl/parsl/dataflow/strategy.py", line 198, in _strategy_simple
running = sum([1 for x in status.values() if x.state == JobState.RUNNING])
File "/home/benc/parsl/src/parsl/parsl/dataflow/strategy.py", line 198, in <listcomp>
running = sum([1 for x in status.values() if x.state == JobState.RUNNING])
AttributeError: 'JobState' object has no attribute 'state'
|
AttributeError
|
def status(self, job_ids: List[Any]) -> List[JobStatus]:
"""Get status of the list of jobs with job_ids
Parameters
----------
job_ids : list of strings
List of job id strings
Returns
-------
list of JobStatus objects
"""
for job_id in job_ids:
channel = self.resources[job_id]["channel"]
status_command = "ps --pid {} | grep {}".format(
self.resources[job_id]["job_id"], self.resources[job_id]["cmd"].split()[0]
)
retcode, stdout, stderr = channel.execute_wait(status_command)
if retcode != 0 and self.resources[job_id]["status"].state == JobState.RUNNING:
self.resources[job_id]["status"] = JobStatus(JobState.FAILED)
return [self.resources[job_id]["status"] for job_id in job_ids]
|
def status(self, job_ids):
"""Get status of the list of jobs with job_ids
Parameters
----------
job_ids : list of strings
List of job id strings
Returns
-------
list of JobStatus objects
"""
for job_id in job_ids:
channel = self.resources[job_id]["channel"]
status_command = "ps --pid {} | grep {}".format(
self.resources[job_id]["job_id"], self.resources[job_id]["cmd"].split()[0]
)
retcode, stdout, stderr = channel.execute_wait(status_command)
if retcode != 0 and self.resources[job_id]["status"].state == JobState.RUNNING:
self.resources[job_id]["status"] = JobStatus(JobState.FAILED)
return [self.resources[job_id]["status"] for job_id in job_ids]
|
https://github.com/Parsl/parsl/issues/1733
|
2020-06-01 13:40:16.122 parsl.dataflow.task_status_poller:62 [DEBUG] Polling
2020-06-01 13:40:16.126 parsl.dataflow.flow_control:114 [ERROR] Flow control callback threw an exception - logging and proceeding anyway
Traceback (most recent call last):
File "/home/benc/parsl/src/parsl/parsl/dataflow/flow_control.py", line 112, in make_callback
self.callback(tasks=self._event_buffer, kind=kind)
File "/home/benc/parsl/src/parsl/parsl/dataflow/task_status_poller.py", line 64, in poll
self._strategy.strategize(self._poll_items, tasks)
File "/home/benc/parsl/src/parsl/parsl/dataflow/strategy.py", line 198, in _strategy_simple
running = sum([1 for x in status.values() if x.state == JobState.RUNNING])
File "/home/benc/parsl/src/parsl/parsl/dataflow/strategy.py", line 198, in <listcomp>
running = sum([1 for x in status.values() if x.state == JobState.RUNNING])
AttributeError: 'JobState' object has no attribute 'state'
|
AttributeError
|
def status(self, job_ids: List[Any]) -> List[JobStatus]:
"""Get the status of a list of jobs identified by the job identifiers
returned from the submit request.
Args:
- job_ids (list) : A list of job identifiers
Returns:
- A list of JobStatus objects corresponding to each job_id in the job_ids list.
Raises:
- ExecutionProviderException or its subclasses
"""
if job_ids:
self._status()
return [self.resources[jid]["status"] for jid in job_ids]
|
def status(self, job_ids):
"""Get the status of a list of jobs identified by the job identifiers
returned from the submit request.
Args:
- job_ids (list) : A list of job identifiers
Returns:
- A list of JobStatus objects corresponding to each job_id in the job_ids list.
Raises:
- ExecutionProviderException or its subclasses
"""
if job_ids:
self._status()
return [self.resources[jid]["status"] for jid in job_ids]
|
https://github.com/Parsl/parsl/issues/1733
|
2020-06-01 13:40:16.122 parsl.dataflow.task_status_poller:62 [DEBUG] Polling
2020-06-01 13:40:16.126 parsl.dataflow.flow_control:114 [ERROR] Flow control callback threw an exception - logging and proceeding anyway
Traceback (most recent call last):
File "/home/benc/parsl/src/parsl/parsl/dataflow/flow_control.py", line 112, in make_callback
self.callback(tasks=self._event_buffer, kind=kind)
File "/home/benc/parsl/src/parsl/parsl/dataflow/task_status_poller.py", line 64, in poll
self._strategy.strategize(self._poll_items, tasks)
File "/home/benc/parsl/src/parsl/parsl/dataflow/strategy.py", line 198, in _strategy_simple
running = sum([1 for x in status.values() if x.state == JobState.RUNNING])
File "/home/benc/parsl/src/parsl/parsl/dataflow/strategy.py", line 198, in <listcomp>
running = sum([1 for x in status.values() if x.state == JobState.RUNNING])
AttributeError: 'JobState' object has no attribute 'state'
|
AttributeError
|
def status(self, job_ids: List[Any]) -> List[JobStatus]:
"""Get the status of a list of jobs identified by their ids.
Parameters
----------
job_ids : list of int
Identifiers of jobs for which the status will be returned.
Returns
-------
List of int
Status codes for the requested jobs.
"""
self._status()
return [self.resources[jid]["status"] for jid in job_ids]
|
def status(self, job_ids):
"""Get the status of a list of jobs identified by their ids.
Parameters
----------
job_ids : list of int
Identifiers of jobs for which the status will be returned.
Returns
-------
List of int
Status codes for the requested jobs.
"""
self._status()
return [self.resources[jid]["status"] for jid in job_ids]
|
https://github.com/Parsl/parsl/issues/1733
|
2020-06-01 13:40:16.122 parsl.dataflow.task_status_poller:62 [DEBUG] Polling
2020-06-01 13:40:16.126 parsl.dataflow.flow_control:114 [ERROR] Flow control callback threw an exception - logging and proceeding anyway
Traceback (most recent call last):
File "/home/benc/parsl/src/parsl/parsl/dataflow/flow_control.py", line 112, in make_callback
self.callback(tasks=self._event_buffer, kind=kind)
File "/home/benc/parsl/src/parsl/parsl/dataflow/task_status_poller.py", line 64, in poll
self._strategy.strategize(self._poll_items, tasks)
File "/home/benc/parsl/src/parsl/parsl/dataflow/strategy.py", line 198, in _strategy_simple
running = sum([1 for x in status.values() if x.state == JobState.RUNNING])
File "/home/benc/parsl/src/parsl/parsl/dataflow/strategy.py", line 198, in <listcomp>
running = sum([1 for x in status.values() if x.state == JobState.RUNNING])
AttributeError: 'JobState' object has no attribute 'state'
|
AttributeError
|
def status(self, job_ids: List[Any]) -> List[JobStatus]:
"""Get the status of a list of jobs identified by their ids.
Args:
- job_ids (List of ids) : List of identifiers for the jobs
Returns:
- List of status codes.
"""
logger.debug("Checking status of: {0}".format(job_ids))
for job_id in self.resources:
retcode, stdout, stderr = self.channel.execute_wait(
'ps -p {} > /dev/null 2> /dev/null; echo "STATUS:$?" '.format(
self.resources[job_id]["remote_pid"]
),
self.cmd_timeout,
)
if stdout: # (is not None)
for line in stdout.split("\n"):
if line.startswith("STATUS:"):
status = line.split("STATUS:")[1].strip()
if status == "0":
self.resources[job_id]["status"] = JobStatus(JobState.RUNNING)
else:
self.resources[job_id]["status"] = JobStatus(JobState.FAILED)
return [self.resources[jid]["status"] for jid in job_ids]
|
def status(self, job_ids):
"""Get the status of a list of jobs identified by their ids.
Args:
- job_ids (List of ids) : List of identifiers for the jobs
Returns:
- List of status codes.
"""
logger.debug("Checking status of: {0}".format(job_ids))
for job_id in self.resources:
retcode, stdout, stderr = self.channel.execute_wait(
'ps -p {} > /dev/null 2> /dev/null; echo "STATUS:$?" '.format(
self.resources[job_id]["remote_pid"]
),
self.cmd_timeout,
)
if stdout: # (is not None)
for line in stdout.split("\n"):
if line.startswith("STATUS:"):
status = line.split("STATUS:")[1].strip()
if status == "0":
self.resources[job_id]["status"] = JobStatus(JobState.RUNNING)
else:
self.resources[job_id]["status"] = JobStatus(JobState.FAILED)
return [self.resources[jid]["status"] for jid in job_ids]
|
https://github.com/Parsl/parsl/issues/1733
|
2020-06-01 13:40:16.122 parsl.dataflow.task_status_poller:62 [DEBUG] Polling
2020-06-01 13:40:16.126 parsl.dataflow.flow_control:114 [ERROR] Flow control callback threw an exception - logging and proceeding anyway
Traceback (most recent call last):
File "/home/benc/parsl/src/parsl/parsl/dataflow/flow_control.py", line 112, in make_callback
self.callback(tasks=self._event_buffer, kind=kind)
File "/home/benc/parsl/src/parsl/parsl/dataflow/task_status_poller.py", line 64, in poll
self._strategy.strategize(self._poll_items, tasks)
File "/home/benc/parsl/src/parsl/parsl/dataflow/strategy.py", line 198, in _strategy_simple
running = sum([1 for x in status.values() if x.state == JobState.RUNNING])
File "/home/benc/parsl/src/parsl/parsl/dataflow/strategy.py", line 198, in <listcomp>
running = sum([1 for x in status.values() if x.state == JobState.RUNNING])
AttributeError: 'JobState' object has no attribute 'state'
|
AttributeError
|
def start(self, poll_period=None):
"""Start the interchange
Parameters:
----------
TODO: Move task receiving to a thread
"""
logger.info("Incoming ports bound")
if poll_period is None:
poll_period = self.poll_period
start = time.time()
count = 0
self._kill_event = threading.Event()
self._task_puller_thread = threading.Thread(
target=wrap_with_logs(self.migrate_tasks_to_internal),
args=(self._kill_event,),
name="Interchange-Task-Puller",
)
self._task_puller_thread.start()
self._command_thread = threading.Thread(
target=wrap_with_logs(self._command_server),
args=(self._kill_event,),
name="Interchange-Command",
)
self._command_thread.start()
poller = zmq.Poller()
# poller.register(self.task_incoming, zmq.POLLIN)
poller.register(self.task_outgoing, zmq.POLLIN)
poller.register(self.results_incoming, zmq.POLLIN)
# These are managers which we should examine in an iteration
# for scheduling a job (or maybe any other attention?).
# Anything altering the state of the manager should add it
# onto this list.
interesting_managers = set()
while not self._kill_event.is_set():
self.socks = dict(poller.poll(timeout=poll_period))
# Listen for requests for work
if (
self.task_outgoing in self.socks
and self.socks[self.task_outgoing] == zmq.POLLIN
):
logger.debug("[MAIN] starting task_outgoing section")
message = self.task_outgoing.recv_multipart()
manager = message[0]
if manager not in self._ready_manager_queue:
reg_flag = False
try:
msg = json.loads(message[1].decode("utf-8"))
msg["reg_time"] = datetime.datetime.strptime(
msg["reg_time"], "%Y-%m-%d %H:%M:%S"
)
reg_flag = True
except Exception:
logger.warning(
"[MAIN] Got Exception reading registration message from manager: {}".format(
manager
),
exc_info=True,
)
logger.debug("[MAIN] Message :\n{}\n".format(message[0]))
# By default we set up to ignore bad nodes/registration messages.
self._ready_manager_queue[manager] = {
"last": time.time(),
"free_capacity": 0,
"block_id": None,
"max_capacity": 0,
"worker_count": 0,
"active": True,
"tasks": [],
}
if reg_flag is True:
interesting_managers.add(manager)
logger.info(
"[MAIN] Adding manager: {} to ready queue".format(manager)
)
self._ready_manager_queue[manager].update(msg)
logger.info(
"[MAIN] Registration info for manager {}: {}".format(
manager, msg
)
)
if self.monitoring_enabled:
logger.info(
"Sending message {} to hub".format(
self._ready_manager_queue[manager]
)
)
self.hub_channel.send_pyobj(
(MessageType.NODE_INFO, self._ready_manager_queue[manager])
)
if (
msg["python_v"].rsplit(".", 1)[0]
!= self.current_platform["python_v"].rsplit(".", 1)[0]
or msg["parsl_v"] != self.current_platform["parsl_v"]
):
logger.warning(
"[MAIN] Manager {} has incompatible version info with the interchange".format(
manager
)
)
if self.suppress_failure is False:
logger.debug("Setting kill event")
self._kill_event.set()
e = ManagerLost(
manager, self._ready_manager_queue[manager]["hostname"]
)
result_package = {
"task_id": -1,
"exception": serialize_object(e),
}
pkl_package = pickle.dumps(result_package)
self.results_outgoing.send(pkl_package)
logger.warning(
"[MAIN] Sent failure reports, unregistering manager"
)
else:
logger.debug(
"[MAIN] Suppressing shutdown due to version incompatibility"
)
else:
logger.info(
"[MAIN] Manager {} has compatible Parsl version {}".format(
manager, msg["parsl_v"]
)
)
logger.info(
"[MAIN] Manager {} has compatible Python version {}".format(
manager, msg["python_v"].rsplit(".", 1)[0]
)
)
else:
# Registration has failed.
if self.suppress_failure is False:
self._kill_event.set()
e = BadRegistration(manager, critical=True)
result_package = {
"task_id": -1,
"exception": serialize_object(e),
}
pkl_package = pickle.dumps(result_package)
self.results_outgoing.send(pkl_package)
else:
logger.debug(
"[MAIN] Suppressing bad registration from manager:{}".format(
manager
)
)
else:
tasks_requested = int.from_bytes(message[1], "little")
self._ready_manager_queue[manager]["last"] = time.time()
if tasks_requested == HEARTBEAT_CODE:
logger.debug("[MAIN] Manager {} sent heartbeat".format(manager))
self.task_outgoing.send_multipart(
[manager, b"", PKL_HEARTBEAT_CODE]
)
else:
logger.debug(
"[MAIN] Manager {} requested {} tasks".format(
manager, tasks_requested
)
)
self._ready_manager_queue[manager]["free_capacity"] = (
tasks_requested
)
interesting_managers.add(manager)
logger.debug("[MAIN] leaving task_outgoing section")
# If we had received any requests, check if there are tasks that could be passed
logger.debug(
"Managers count (total/interesting): {}/{}".format(
len(self._ready_manager_queue), len(interesting_managers)
)
)
if interesting_managers and not self.pending_task_queue.empty():
shuffled_managers = list(interesting_managers)
random.shuffle(shuffled_managers)
while (
shuffled_managers and not self.pending_task_queue.empty()
): # cf. the if statement above...
manager = shuffled_managers.pop()
tasks_inflight = len(self._ready_manager_queue[manager]["tasks"])
real_capacity = min(
self._ready_manager_queue[manager]["free_capacity"],
self._ready_manager_queue[manager]["max_capacity"] - tasks_inflight,
)
if real_capacity and self._ready_manager_queue[manager]["active"]:
tasks = self.get_tasks(real_capacity)
if tasks:
self.task_outgoing.send_multipart(
[manager, b"", pickle.dumps(tasks)]
)
task_count = len(tasks)
count += task_count
tids = [t["task_id"] for t in tasks]
self._ready_manager_queue[manager]["free_capacity"] -= (
task_count
)
self._ready_manager_queue[manager]["tasks"].extend(tids)
logger.debug(
"[MAIN] Sent tasks: {} to manager {}".format(tids, manager)
)
if self._ready_manager_queue[manager]["free_capacity"] > 0:
logger.debug(
"[MAIN] Manager {} has free_capacity {}".format(
manager,
self._ready_manager_queue[manager]["free_capacity"],
)
)
# ... so keep it in the interesting_managers list
else:
logger.debug(
"[MAIN] Manager {} is now saturated".format(manager)
)
interesting_managers.remove(manager)
else:
interesting_managers.remove(manager)
# logger.debug("Nothing to send to manager {}".format(manager))
logger.debug(
"[MAIN] leaving _ready_manager_queue section, with {} managers still interesting".format(
len(interesting_managers)
)
)
else:
logger.debug(
"[MAIN] either no interesting managers or no tasks, so skipping manager pass"
)
# Receive any results and forward to client
if (
self.results_incoming in self.socks
and self.socks[self.results_incoming] == zmq.POLLIN
):
logger.debug("[MAIN] entering results_incoming section")
manager, *b_messages = self.results_incoming.recv_multipart()
if manager not in self._ready_manager_queue:
logger.warning(
"[MAIN] Received a result from a un-registered manager: {}".format(
manager
)
)
else:
logger.debug(
"[MAIN] Got {} result items in batch".format(len(b_messages))
)
for b_message in b_messages:
r = pickle.loads(b_message)
logger.debug(
"[MAIN] Received result for task {} from {}".format(
r["task_id"], manager
)
)
self._ready_manager_queue[manager]["tasks"].remove(r["task_id"])
self.results_outgoing.send_multipart(b_messages)
logger.debug(
"[MAIN] Current tasks: {}".format(
self._ready_manager_queue[manager]["tasks"]
)
)
logger.debug("[MAIN] leaving results_incoming section")
bad_managers = [
manager
for manager in self._ready_manager_queue
if time.time() - self._ready_manager_queue[manager]["last"]
> self.heartbeat_threshold
]
for manager in bad_managers:
logger.debug(
"[MAIN] Last: {} Current: {}".format(
self._ready_manager_queue[manager]["last"], time.time()
)
)
logger.warning(
"[MAIN] Too many heartbeats missed for manager {}".format(manager)
)
for tid in self._ready_manager_queue[manager]["tasks"]:
try:
raise ManagerLost(
manager, self._ready_manager_queue[manager]["hostname"]
)
except Exception:
result_package = {
"task_id": tid,
"exception": serialize_object(
RemoteExceptionWrapper(*sys.exc_info())
),
}
pkl_package = pickle.dumps(result_package)
self.results_outgoing.send(pkl_package)
logger.warning("[MAIN] Sent failure reports, unregistering manager")
self._ready_manager_queue.pop(manager, "None")
if manager in interesting_managers:
interesting_managers.remove(manager)
delta = time.time() - start
logger.info("Processed {} tasks in {} seconds".format(count, delta))
logger.warning("Exiting")
|
def start(self, poll_period=None):
"""Start the interchange
Parameters:
----------
TODO: Move task receiving to a thread
"""
logger.info("Incoming ports bound")
if poll_period is None:
poll_period = self.poll_period
start = time.time()
count = 0
self._kill_event = threading.Event()
self._task_puller_thread = threading.Thread(
target=wrap_with_logs(self.migrate_tasks_to_internal),
args=(self._kill_event,),
name="Interchange-Task-Puller",
)
self._task_puller_thread.start()
self._command_thread = threading.Thread(
target=wrap_with_logs(self._command_server),
args=(self._kill_event,),
name="Interchange-Command",
)
self._command_thread.start()
poller = zmq.Poller()
# poller.register(self.task_incoming, zmq.POLLIN)
poller.register(self.task_outgoing, zmq.POLLIN)
poller.register(self.results_incoming, zmq.POLLIN)
# These are managers which we should examine in an iteration
# for scheduling a job (or maybe any other attention?).
# Anything altering the state of the manager should add it
# onto this list.
interesting_managers = set()
while not self._kill_event.is_set():
self.socks = dict(poller.poll(timeout=poll_period))
# Listen for requests for work
if (
self.task_outgoing in self.socks
and self.socks[self.task_outgoing] == zmq.POLLIN
):
logger.debug("[MAIN] starting task_outgoing section")
message = self.task_outgoing.recv_multipart()
manager = message[0]
if manager not in self._ready_manager_queue:
reg_flag = False
try:
msg = json.loads(message[1].decode("utf-8"))
msg["reg_time"] = datetime.datetime.strptime(
msg["reg_time"], "%Y-%m-%d %H:%M:%S"
)
reg_flag = True
except Exception:
logger.warning(
"[MAIN] Got Exception reading registration message from manager: {}".format(
manager
),
exc_info=True,
)
logger.debug("[MAIN] Message :\n{}\n".format(message[0]))
# By default we set up to ignore bad nodes/registration messages.
self._ready_manager_queue[manager] = {
"last": time.time(),
"free_capacity": 0,
"block_id": None,
"max_capacity": 0,
"worker_count": 0,
"active": True,
"tasks": [],
}
if reg_flag is True:
interesting_managers.add(manager)
logger.info(
"[MAIN] Adding manager: {} to ready queue".format(manager)
)
self._ready_manager_queue[manager].update(msg)
logger.info(
"[MAIN] Registration info for manager {}: {}".format(
manager, msg
)
)
if self.monitoring_enabled:
logger.info(
"Sending message {} to hub".format(
self._ready_manager_queue[manager]
)
)
self.hub_channel.send_pyobj(
(MessageType.NODE_INFO, self._ready_manager_queue[manager])
)
if (
msg["python_v"].rsplit(".", 1)[0]
!= self.current_platform["python_v"].rsplit(".", 1)[0]
or msg["parsl_v"] != self.current_platform["parsl_v"]
):
logger.warning(
"[MAIN] Manager {} has incompatible version info with the interchange".format(
manager
)
)
if self.suppress_failure is False:
logger.debug("Setting kill event")
self._kill_event.set()
e = ManagerLost(
manager, self._ready_manager_queue[manager]["hostname"]
)
result_package = {
"task_id": -1,
"exception": serialize_object(e),
}
pkl_package = pickle.dumps(result_package)
self.results_outgoing.send(pkl_package)
logger.warning(
"[MAIN] Sent failure reports, unregistering manager"
)
else:
logger.debug(
"[MAIN] Suppressing shutdown due to version incompatibility"
)
else:
logger.info(
"[MAIN] Manager {} has compatible Parsl version {}".format(
manager, msg["parsl_v"]
)
)
logger.info(
"[MAIN] Manager {} has compatible Python version {}".format(
manager, msg["python_v"].rsplit(".", 1)[0]
)
)
else:
# Registration has failed.
if self.suppress_failure is False:
self._kill_event.set()
e = BadRegistration(manager, critical=True)
result_package = {
"task_id": -1,
"exception": serialize_object(e),
}
pkl_package = pickle.dumps(result_package)
self.results_outgoing.send(pkl_package)
else:
logger.debug(
"[MAIN] Suppressing bad registration from manager:{}".format(
manager
)
)
else:
tasks_requested = int.from_bytes(message[1], "little")
self._ready_manager_queue[manager]["last"] = time.time()
if tasks_requested == HEARTBEAT_CODE:
logger.debug("[MAIN] Manager {} sent heartbeat".format(manager))
self.task_outgoing.send_multipart(
[manager, b"", PKL_HEARTBEAT_CODE]
)
else:
logger.debug(
"[MAIN] Manager {} requested {} tasks".format(
manager, tasks_requested
)
)
self._ready_manager_queue[manager]["free_capacity"] = (
tasks_requested
)
interesting_managers.add(manager)
logger.debug("[MAIN] leaving task_outgoing section")
# If we had received any requests, check if there are tasks that could be passed
logger.debug(
"Managers count (total/interesting): {}/{}".format(
len(self._ready_manager_queue), len(interesting_managers)
)
)
if interesting_managers and not self.pending_task_queue.empty():
shuffled_managers = list(interesting_managers)
random.shuffle(shuffled_managers)
while (
shuffled_managers and not self.pending_task_queue.empty()
): # cf. the if statement above...
manager = shuffled_managers.pop()
tasks_inflight = len(self._ready_manager_queue[manager]["tasks"])
real_capacity = min(
self._ready_manager_queue[manager]["free_capacity"],
self._ready_manager_queue[manager]["max_capacity"] - tasks_inflight,
)
if real_capacity and self._ready_manager_queue[manager]["active"]:
tasks = self.get_tasks(real_capacity)
if tasks:
self.task_outgoing.send_multipart(
[manager, b"", pickle.dumps(tasks)]
)
task_count = len(tasks)
count += task_count
tids = [t["task_id"] for t in tasks]
self._ready_manager_queue[manager]["free_capacity"] -= (
task_count
)
self._ready_manager_queue[manager]["tasks"].extend(tids)
logger.debug(
"[MAIN] Sent tasks: {} to manager {}".format(tids, manager)
)
if self._ready_manager_queue[manager]["free_capacity"] > 0:
logger.debug(
"[MAIN] Manager {} has free_capacity {}".format(
manager,
self._ready_manager_queue[manager]["free_capacity"],
)
)
# ... so keep it in the interesting_managers list
else:
logger.debug(
"[MAIN] Manager {} is now saturated".format(manager)
)
interesting_managers.remove(manager)
else:
interesting_managers.remove(manager)
# logger.debug("Nothing to send to manager {}".format(manager))
logger.debug(
"[MAIN] leaving _ready_manager_queue section, with {} managers still interesting".format(
len(interesting_managers)
)
)
else:
logger.debug(
"[MAIN] either no interesting managers or no tasks, so skipping manager pass"
)
# Receive any results and forward to client
if (
self.results_incoming in self.socks
and self.socks[self.results_incoming] == zmq.POLLIN
):
logger.debug("[MAIN] entering results_incoming section")
manager, *b_messages = self.results_incoming.recv_multipart()
if manager not in self._ready_manager_queue:
logger.warning(
"[MAIN] Received a result from a un-registered manager: {}".format(
manager
)
)
else:
logger.debug(
"[MAIN] Got {} result items in batch".format(len(b_messages))
)
for b_message in b_messages:
r = pickle.loads(b_message)
# logger.debug("[MAIN] Received result for task {} from {}".format(r['task_id'], manager))
self._ready_manager_queue[manager]["tasks"].remove(r["task_id"])
self.results_outgoing.send_multipart(b_messages)
logger.debug(
"[MAIN] Current tasks: {}".format(
self._ready_manager_queue[manager]["tasks"]
)
)
logger.debug("[MAIN] leaving results_incoming section")
bad_managers = [
manager
for manager in self._ready_manager_queue
if time.time() - self._ready_manager_queue[manager]["last"]
> self.heartbeat_threshold
]
for manager in bad_managers:
logger.debug(
"[MAIN] Last: {} Current: {}".format(
self._ready_manager_queue[manager]["last"], time.time()
)
)
logger.warning(
"[MAIN] Too many heartbeats missed for manager {}".format(manager)
)
for tid in self._ready_manager_queue[manager]["tasks"]:
try:
raise ManagerLost(
manager, self._ready_manager_queue[manager]["hostname"]
)
except Exception:
result_package = {
"task_id": tid,
"exception": serialize_object(
RemoteExceptionWrapper(*sys.exc_info())
),
}
pkl_package = pickle.dumps(result_package)
self.results_outgoing.send(pkl_package)
logger.warning("[MAIN] Sent failure reports, unregistering manager")
self._ready_manager_queue.pop(manager, "None")
if manager in interesting_managers:
interesting_managers.remove(manager)
delta = time.time() - start
logger.info("Processed {} tasks in {} seconds".format(count, delta))
logger.warning("Exiting")
|
https://github.com/Parsl/parsl/issues/1657
|
2020-04-21 10:25:41 parsl.dataflow.strategy:199 [DEBUG] Executor batch-1 has 518 active tasks, 1/0 running/pending blocks, and 540 connected workers
Process HTEX-Interchange:
Traceback (most recent call last):
File "/global/homes/d/descdm/.conda/envs/parsl-lsst-dm/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/global/homes/d/descdm/.conda/envs/parsl-lsst-dm/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/global/homes/d/descdm/.conda/envs/parsl-lsst-dm/lib/python3.7/site-packages/parsl/executors/high_throughput/interchange.py", line 564, in starter
ic.start()
File "/global/homes/d/descdm/.conda/envs/parsl-lsst-dm/lib/python3.7/site-packages/parsl/executors/high_throughput/interchange.py", line 494, in start
self._ready_manager_queue[manager]['tasks'].remove(r['task_id'])
ValueError: list.remove(x): x not in list
|
ValueError
|
def wait_for_current_tasks(self):
"""Waits for all tasks in the task list to be completed, by waiting for their
AppFuture to be completed. This method will not necessarily wait for any tasks
added after cleanup has started (such as data stageout?)
"""
logger.info("Waiting for all remaining tasks to complete")
for task_id in list(self.tasks):
# .exception() is a less exception throwing way of
# waiting for completion than .result()
if task_id not in self.tasks:
logger.debug("Task {} no longer in task list".format(task_id))
else:
fut = self.tasks[task_id]["app_fu"]
if not fut.done():
logger.debug("Waiting for task {} to complete".format(task_id))
fut.exception()
logger.info("All remaining tasks completed")
|
def wait_for_current_tasks(self):
"""Waits for all tasks in the task list to be completed, by waiting for their
AppFuture to be completed. This method will not necessarily wait for any tasks
added after cleanup has started (such as data stageout?)
"""
logger.info("Waiting for all remaining tasks to complete")
for task_id in self.tasks:
# .exception() is a less exception throwing way of
# waiting for completion than .result()
fut = self.tasks[task_id]["app_fu"]
if not fut.done():
logger.debug("Waiting for task {} to complete".format(task_id))
fut.exception()
logger.info("All remaining tasks completed")
|
https://github.com/Parsl/parsl/issues/1606
|
2020-03-14 14:38:50 parsl.dataflow.memoization:159 [DEBUG] Ignoring these kwargs for checkpointing: []
Traceback (most recent call last):
File "/home/users/nus/csipav/Documents/GDC-Pipeline.git/run.py", line 187, in <module>
2020-03-14 14:38:50 parsl.dataflow.memoization:202 [DEBUG] Task 1 has memoization hash 6888eb84e8d55e06d97d5fb5d0b61eb6
2020-03-14 14:38:50 parsl.dataflow.memoization:210 [INFO] Task 1 had no result in cache
main(sys.argv[1:])
File "/home/users/nus/csipav/Documents/GDC-Pipeline.git/run.py", line 183, in main
2020-03-14 14:38:50 parsl.executors.high_throughput.executor:529 [DEBUG] Pushing function <function MonitoringHub.monitor_wrapper.<locals>.wrapped at 0x2aaab86a5200> to queue with args (<function split_vcf_by_chromosome at 0x2aaab84929e0>, "{'MuSE': '/home/users/nus/csipav/anaconda3/envs/gdc/bin/MuSE', 'ace2sam': '/home/users/nus/csipav/an...", "<<class 'parsl.data_provider.files.File'> at 0x2aaab88eec90 url=/home/users/nus/csipav/scratch/Parsl...", "'/home/users/nus/csipav/scratch/Parsl-workflows/GDC-Pipeline-Output/OV_1/TCGA-13-0910/1/annotations/...", "'/home/users/nus/csipav/scratch/Parsl-workflows/GDC-Pipeline-Output/OV_1/TCGA-13-0910/1/strelka2-ana...")
2020-03-14 14:38:50 parsl.monitoring.monitoring:261 [DEBUG] Sending message MessageType.TASK_INFO, {'task_func_name': 'split_vcf_by_chromosome', 'task_fn_hash': '5f63d27122e82d22e85ec201359e25a7', 'task_memoize': True, 'task_hashsum': '6888eb84e8d55e06d97d5fb5d0b61eb6', 'task_fail_count': 0, 'task_status': <States.launched: 7>, 'task_id': 1, 'task_time_submitted': datetime.datetime(2020, 3, 14, 14, 38, 50, 14435), 'task_time_returned': None, 'task_executor': 'htex', 'run_id': '85606c50-cec0-4f13-b4af-b05d6791b9c3', 'timestamp': datetime.datetime(2020, 3, 14, 14, 38, 50, 27280), 'task_status_name': 'launched', 'tasks_failed_count': 0, 'tasks_completed_count': 12, 'task_inputs': 'None', 'task_outputs': "['/home/users/nus/csipav/scratch/Parsl-workflows/GDC-Pipeline-Output/OV_1/TCGA-13-0910/1/annotations/variants.indels_filtered.vcf.gz/split_chr/.complete']", 'task_stdin': None, 'task_stdout': '/home/users/nus/csipav/scratch/Parsl-workflows/GDC-Pipeline-Output/OV_1/runinfo/000/task_logs/0000/task_0001_split_vcf_by_chromosome_TCGA-13-0910_1-split_vcf_by_chromosome-variants.indels_filtered.vcf.gz.stdout', 'task_stderr': '/home/users/nus/csipav/scratch/Parsl-workflows/GDC-Pipeline-Output/OV_1/runinfo/000/task_logs/0000/task_0001_split_vcf_by_chromosome_TCGA-13-0910_1-split_vcf_by_chromosome-variants.indels_filtered.vcf.gz.stderr', 'task_fail_history': '', 'task_depends': '0', 'task_elapsed_time': None, 'task_fail_mode': 'lazy'}
run_gdc_pipeline(gdc_config)
File "/home/users/nus/csipav/Documents/GDC-Pipeline.git/run.py", line 105, in run_gdc_pipeline
2020-03-14 14:38:50 parsl.dataflow.dflow:499 [INFO] Task 1 launched on executor htex
parsl.wait_for_current_tasks()
File "/home/users/nus/csipav/anaconda3/envs/gdc/lib/python3.7/site-packages/parsl-0.9.0-py3.7.egg/parsl/dataflow/dflow.py", line 1147, in wait_for_current_tasks
2020-03-14 14:38:50 parsl.dataflow.memoization:159 [DEBUG] Ignoring these kwargs for checkpointing: []
2020-03-14 14:38:50 parsl.dataflow.memoization:202 [DEBUG] Task 28 has memoization hash 5b56fba6029bd95a9fb9d02f405e2282
2020-03-14 14:38:50 parsl.dataflow.memoization:210 [INFO] Task 28 had no result in cache
2020-03-14 14:38:50 parsl.executors.high_throughput.executor:529 [DEBUG] Pushing function <function MonitoringHub.monitor_wrapper.<locals>.wrapped at 0x2aaae8a3f830> to queue with args (<function split_vcf_by_chromosome at 0x2aaab84929e0>, "{'MuSE': '/home/users/nus/csipav/anaconda3/envs/gdc/bin/MuSE', 'ace2sam': '/home/users/nus/csipav/an...", "<<class 'parsl.data_provider.files.File'> at 0x2aaadc04d890 url=/home/users/nus/csipav/scratch/Parsl...", "'/home/users/nus/csipav/scratch/Parsl-workflows/GDC-Pipeline-Output/OV_1/TCGA-13-0910/1/annotations/...", "'/home/users/nus/csipav/scratch/Parsl-workflows/GDC-Pipeline-Output/OV_1/TCGA-13-0910/1/strelka2-ana...")
2020-03-14 14:38:50 parsl.monitoring.monitoring:261 [DEBUG] Sending message MessageType.TASK_INFO, {'task_func_name': 'split_vcf_by_chromosome', 'task_fn_hash': '5f63d27122e82d22e85ec201359e25a7', 'task_memoize': True, 'task_hashsum': '5b56fba6029bd95a9fb9d02f405e2282', 'task_fail_count': 0, 'task_status': <States.launched: 7>, 'task_id': 28, 'task_time_submitted': datetime.datetime(2020, 3, 14, 14, 38, 50, 28401), 'task_time_returned': None, 'task_executor': 'htex', 'run_id': '85606c50-cec0-4f13-b4af-b05d6791b9c3', 'timestamp': datetime.datetime(2020, 3, 14, 14, 38, 50, 39742), 'task_status_name': 'launched', 'tasks_failed_count': 0, 'tasks_completed_count': 12, 'task_inputs': 'None', 'task_outputs': "['/home/users/nus/csipav/scratch/Parsl-workflows/GDC-Pipeline-Output/OV_1/TCGA-13-0910/1/annotations/variants.snvs_filtered.vcf.gz/split_chr/.complete']", 'task_stdin': None, 'task_stdout': '/home/users/nus/csipav/scratch/Parsl-workflows/GDC-Pipeline-Output/OV_1/runinfo/000/task_logs/0000/task_0028_split_vcf_by_chromosome_TCGA-13-0910_1-split_vcf_by_chromosome-variants.snvs_filtered.vcf.gz.stdout', 'task_stderr': '/home/users/nus/csipav/scratch/Parsl-workflows/GDC-Pipeline-Output/OV_1/runinfo/000/task_logs/0000/task_0028_split_vcf_by_chromosome_TCGA-13-0910_1-split_vcf_by_chromosome-variants.snvs_filtered.vcf.gz.stderr', 'task_fail_history': '', 'task_depends': '0', 'task_elapsed_time': None, 'task_fail_mode': 'lazy'}
cls.dfk().wait_for_current_tasks()
File "/home/users/nus/csipav/anaconda3/envs/gdc/lib/python3.7/site-packages/parsl-0.9.0-py3.7.egg/parsl/dataflow/dflow.py", line 882, in wait_for_current_tasks
2020-03-14 14:38:50 parsl.dataflow.dflow:499 [INFO] Task 28 launched on executor htex
2020-03-14 14:38:50 parsl.dataflow.memoization:159 [DEBUG] Ignoring these kwargs for checkpointing: []
for task_id in self.tasks:
RuntimeError: dictionary changed size during iteration
2020-03-14 14:38:50 parsl.dataflow.dflow:900 [INFO] DFK cleanup initiated
|
RuntimeError
|
def _strategy_simple(self, tasks, *args, kind=None, **kwargs):
"""Peek at the DFK and the executors specified.
We assume here that tasks are not held in a runnable
state, and that all tasks from an app would be sent to
a single specific executor, i.e tasks cannot be specified
to go to one of more executors.
Args:
- tasks (task_ids): Not used here.
KWargs:
- kind (Not used)
"""
for label, executor in self.dfk.executors.items():
if not executor.scaling_enabled:
continue
# Tasks that are either pending completion
active_tasks = executor.outstanding
status = executor.status()
self.unset_logging()
# FIXME we need to handle case where provider does not define these
# FIXME probably more of this logic should be moved to the provider
min_blocks = executor.provider.min_blocks
max_blocks = executor.provider.max_blocks
if isinstance(executor, IPyParallelExecutor) or isinstance(
executor, HighThroughputExecutor
):
tasks_per_node = executor.workers_per_node
elif isinstance(executor, ExtremeScaleExecutor):
tasks_per_node = executor.ranks_per_node
nodes_per_block = executor.provider.nodes_per_block
parallelism = executor.provider.parallelism
running = sum([1 for x in status.values() if x.state == JobState.RUNNING])
pending = sum([1 for x in status.values() if x.state == JobState.PENDING])
active_blocks = running + pending
active_slots = active_blocks * tasks_per_node * nodes_per_block
if hasattr(executor, "connected_workers"):
logger.debug(
"Executor {} has {} active tasks, {}/{} running/pending blocks, and {} connected workers".format(
label, active_tasks, running, pending, executor.connected_workers
)
)
else:
logger.debug(
"Executor {} has {} active tasks and {}/{} running/pending blocks".format(
label, active_tasks, running, pending
)
)
# reset kill timer if executor has active tasks
if active_tasks > 0 and self.executors[executor.label]["idle_since"]:
self.executors[executor.label]["idle_since"] = None
# Case 1
# No tasks.
if active_tasks == 0:
# Case 1a
# Fewer blocks that min_blocks
if active_blocks <= min_blocks:
# Ignore
# logger.debug("Strategy: Case.1a")
pass
# Case 1b
# More blocks than min_blocks. Scale down
else:
# We want to make sure that max_idletime is reached
# before killing off resources
if not self.executors[executor.label]["idle_since"]:
logger.debug(
"Executor {} has 0 active tasks; starting kill timer (if idle time exceeds {}s, resources will be removed)".format(
label, self.max_idletime
)
)
self.executors[executor.label]["idle_since"] = time.time()
idle_since = self.executors[executor.label]["idle_since"]
if (time.time() - idle_since) > self.max_idletime:
# We have resources idle for the max duration,
# we have to scale_in now.
logger.debug(
"Idle time has reached {}s for executor {}; removing resources".format(
self.max_idletime, label
)
)
executor.scale_in(active_blocks - min_blocks)
else:
pass
# logger.debug("Strategy: Case.1b. Waiting for timer : {0}".format(idle_since))
# Case 2
# More tasks than the available slots.
elif (float(active_slots) / active_tasks) < parallelism:
# Case 2a
# We have the max blocks possible
if active_blocks >= max_blocks:
# Ignore since we already have the max nodes
# logger.debug("Strategy: Case.2a")
pass
# Case 2b
else:
# logger.debug("Strategy: Case.2b")
excess = math.ceil((active_tasks * parallelism) - active_slots)
excess_blocks = math.ceil(
float(excess) / (tasks_per_node * nodes_per_block)
)
excess_blocks = min(excess_blocks, max_blocks - active_blocks)
logger.debug("Requesting {} more blocks".format(excess_blocks))
executor.scale_out(excess_blocks)
elif active_slots == 0 and active_tasks > 0:
# Case 4
# Check if slots are being lost quickly ?
logger.debug("Requesting single slot")
if active_blocks < max_blocks:
executor.scale_out(1)
# Case 3
# tasks ~ slots
else:
# logger.debug("Strategy: Case 3")
pass
|
def _strategy_simple(self, tasks, *args, kind=None, **kwargs):
"""Peek at the DFK and the executors specified.
We assume here that tasks are not held in a runnable
state, and that all tasks from an app would be sent to
a single specific executor, i.e tasks cannot be specified
to go to one of more executors.
Args:
- tasks (task_ids): Not used here.
KWargs:
- kind (Not used)
"""
for label, executor in self.dfk.executors.items():
if not executor.scaling_enabled:
continue
# Tasks that are either pending completion
active_tasks = executor.outstanding
status = executor.status()
self.unset_logging()
# FIXME we need to handle case where provider does not define these
# FIXME probably more of this logic should be moved to the provider
min_blocks = executor.provider.min_blocks
max_blocks = executor.provider.max_blocks
if isinstance(executor, IPyParallelExecutor) or isinstance(
executor, HighThroughputExecutor
):
tasks_per_node = executor.workers_per_node
elif isinstance(executor, ExtremeScaleExecutor):
tasks_per_node = executor.ranks_per_node
nodes_per_block = executor.provider.nodes_per_block
parallelism = executor.provider.parallelism
running = sum([1 for x in status if x.state == JobState.RUNNING])
pending = sum([1 for x in status if x.state == JobState.PENDING])
active_blocks = running + pending
active_slots = active_blocks * tasks_per_node * nodes_per_block
if hasattr(executor, "connected_workers"):
logger.debug(
"Executor {} has {} active tasks, {}/{} running/pending blocks, and {} connected workers".format(
label, active_tasks, running, pending, executor.connected_workers
)
)
else:
logger.debug(
"Executor {} has {} active tasks and {}/{} running/pending blocks".format(
label, active_tasks, running, pending
)
)
# reset kill timer if executor has active tasks
if active_tasks > 0 and self.executors[executor.label]["idle_since"]:
self.executors[executor.label]["idle_since"] = None
# Case 1
# No tasks.
if active_tasks == 0:
# Case 1a
# Fewer blocks that min_blocks
if active_blocks <= min_blocks:
# Ignore
# logger.debug("Strategy: Case.1a")
pass
# Case 1b
# More blocks than min_blocks. Scale down
else:
# We want to make sure that max_idletime is reached
# before killing off resources
if not self.executors[executor.label]["idle_since"]:
logger.debug(
"Executor {} has 0 active tasks; starting kill timer (if idle time exceeds {}s, resources will be removed)".format(
label, self.max_idletime
)
)
self.executors[executor.label]["idle_since"] = time.time()
idle_since = self.executors[executor.label]["idle_since"]
if (time.time() - idle_since) > self.max_idletime:
# We have resources idle for the max duration,
# we have to scale_in now.
logger.debug(
"Idle time has reached {}s for executor {}; removing resources".format(
self.max_idletime, label
)
)
executor.scale_in(active_blocks - min_blocks)
else:
pass
# logger.debug("Strategy: Case.1b. Waiting for timer : {0}".format(idle_since))
# Case 2
# More tasks than the available slots.
elif (float(active_slots) / active_tasks) < parallelism:
# Case 2a
# We have the max blocks possible
if active_blocks >= max_blocks:
# Ignore since we already have the max nodes
# logger.debug("Strategy: Case.2a")
pass
# Case 2b
else:
# logger.debug("Strategy: Case.2b")
excess = math.ceil((active_tasks * parallelism) - active_slots)
excess_blocks = math.ceil(
float(excess) / (tasks_per_node * nodes_per_block)
)
excess_blocks = min(excess_blocks, max_blocks - active_blocks)
logger.debug("Requesting {} more blocks".format(excess_blocks))
executor.scale_out(excess_blocks)
elif active_slots == 0 and active_tasks > 0:
# Case 4
# Check if slots are being lost quickly ?
logger.debug("Requesting single slot")
if active_blocks < max_blocks:
executor.scale_out(1)
# Case 3
# tasks ~ slots
else:
# logger.debug("Strategy: Case 3")
pass
|
https://github.com/Parsl/parsl/issues/1530
|
2020-01-09 13:51:29.561 parsl.dataflow.flow_control:142 [ERROR] Flow control callback threw an exception
- logging and proceeding anyway
Traceback (most recent call last):
File "/home/benc/parsl/src/parsl/parsl/dataflow/flow_control.py", line 140, in make_callback
self.callback(tasks=self._event_buffer, kind=kind)
File "/home/benc/parsl/src/parsl/parsl/dataflow/strategy.py", line 192, in _strategy_simple
running = sum([1 for x in status if x.state == JobState.RUNNING])
File "/home/benc/parsl/src/parsl/parsl/dataflow/strategy.py", line 192, in <listcomp>
running = sum([1 for x in status if x.state == JobState.RUNNING])
AttributeError: 'int' object has no attribute 'state'
|
AttributeError
|
def __repr__(self):
init = self.__init__
# This test looks for a single layer of wrapping performed by
# functools.update_wrapper, commonly used in decorators. This will
# allow RepresentationMixin to see through a single such decorator
# applied to the __init__ method of a class, and find the underlying
# arguments. It will not see through multiple layers of such
# decorators, or cope with other decorators which do not use
# functools.update_wrapper.
if hasattr(init, "__wrapped__"):
init = init.__wrapped__
argspec = inspect.getfullargspec(init)
if len(argspec.args) > 1 and argspec.defaults is not None:
defaults = dict(zip(reversed(argspec.args), reversed(argspec.defaults)))
else:
defaults = {}
for arg in argspec.args[1:]:
if not hasattr(self, arg):
template = "class {} uses {} in the constructor, but does not define it as an attribute"
raise AttributeError(template.format(self.__class__.__name__, arg))
if len(defaults) != 0:
args = [getattr(self, a) for a in argspec.args[1 : -len(defaults)]]
else:
args = [getattr(self, a) for a in argspec.args[1:]]
kwargs = {key: getattr(self, key) for key in defaults}
def assemble_multiline(args, kwargs):
def indent(text):
lines = text.splitlines()
if len(lines) <= 1:
return text
return "\n".join(" " + l for l in lines).strip()
args = ["\n {},".format(indent(repr(a))) for a in args]
kwargs = [
"\n {}={}".format(k, indent(repr(v))) for k, v in sorted(kwargs.items())
]
info = "".join(args) + ", ".join(kwargs)
return self.__class__.__name__ + "({}\n)".format(info)
def assemble_line(args, kwargs):
kwargs = ["{}={}".format(k, repr(v)) for k, v in sorted(kwargs.items())]
info = ", ".join([repr(a) for a in args] + kwargs)
return self.__class__.__name__ + "({})".format(info)
if len(assemble_line(args, kwargs)) <= self.__class__.__max_width__:
return assemble_line(args, kwargs)
else:
return assemble_multiline(args, kwargs)
|
def __repr__(self):
init = self.__init__
# This test looks for a single layer of wrapping performed by
# functools.update_wrapper, commonly used in decorators. This will
# allow RepresentationMixin to see through a single such decorator
# applied to the __init__ method of a class, and find the underlying
# arguments. It will not see through multiple layers of such
# decorators, or cope with other decorators which do not use
# functools.update_wrapper.
if hasattr(init, "__wrapped__"):
init = init.__wrapped__
argspec = inspect.getfullargspec(init)
if len(argspec.args) > 1:
defaults = dict(zip(reversed(argspec.args), reversed(argspec.defaults)))
else:
defaults = {}
for arg in argspec.args[1:]:
if not hasattr(self, arg):
template = "class {} uses {} in the constructor, but does not define it as an attribute"
raise AttributeError(template.format(self.__class__.__name__, arg))
args = [getattr(self, a) for a in argspec.args[1 : -len(defaults)]]
kwargs = {key: getattr(self, key) for key in defaults}
def assemble_multiline(args, kwargs):
def indent(text):
lines = text.splitlines()
if len(lines) <= 1:
return text
return "\n".join(" " + l for l in lines).strip()
args = ["\n {},".format(indent(repr(a))) for a in args]
kwargs = [
"\n {}={}".format(k, indent(repr(v))) for k, v in sorted(kwargs.items())
]
info = "".join(args) + ", ".join(kwargs)
return self.__class__.__name__ + "({}\n)".format(info)
def assemble_line(args, kwargs):
kwargs = ["{}={}".format(k, repr(v)) for k, v in sorted(kwargs.items())]
info = ", ".join([repr(a) for a in args] + kwargs)
return self.__class__.__name__ + "({})".format(info)
if len(assemble_line(args, kwargs)) <= self.__class__.__max_width__:
return assemble_line(args, kwargs)
else:
return assemble_multiline(args, kwargs)
|
https://github.com/Parsl/parsl/issues/1124
|
$ python b.py
Traceback (most recent call last):
File "b.py", line 10, in <module>
print(x)
File "/home/benc/parsl/src/parsl/parsl/utils.py", line 193, in __repr__
defaults = dict(zip(reversed(argspec.args), reversed(argspec.defaults)))
TypeError: 'NoneType' object is not reversible
|
TypeError
|
def _status(self):
"""Update the resource dictionary with job statuses."""
job_id_list = " ".join(self.resources.keys())
cmd = "condor_q {0} -af:jr JobStatus".format(job_id_list)
retcode, stdout, stderr = self.execute_wait(cmd)
"""
Example output:
$ condor_q 34524642.0 34524643.0 -af:jr JobStatus
34524642.0 2
34524643.0 1
"""
for line in stdout.splitlines():
parts = line.strip().split()
job_id = parts[0]
status = translate_table.get(parts[1], "UNKNOWN")
self.resources[job_id]["status"] = status
|
def _status(self):
"""Update the resource dictionary with job statuses."""
job_id_list = " ".join(self.resources.keys())
cmd = "condor_q {0} -af:jr JobStatus".format(job_id_list)
retcode, stdout, stderr = self.execute_wait(cmd)
"""
Example output:
$ condor_q 34524642.0 34524643.0 -af:jr JobStatus
34524642.0 2
34524643.0 1
"""
for line in stdout.strip().split("\n"):
parts = line.split()
job_id = parts[0]
status = translate_table.get(parts[1], "UNKNOWN")
self.resources[job_id]["status"] = status
|
https://github.com/Parsl/parsl/issues/1298
|
2019-09-20 15:32:16.535 parsl.dataflow.flow_control:142 [ERROR] Flow control callback threw an exception - logging and proceeding anyway
Traceback (most recent call last):
File "/afs/crc.nd.edu/user/a/awoodard/.local/lib/python3.6/site-packages/parsl/dataflow/flow_control.py", line 140, in make_callback
self.callback(tasks=self._event_buffer, kind=kind)
File "/afs/crc.nd.edu/user/a/awoodard/.local/lib/python3.6/site-packages/parsl/dataflow/strategy.py", line 177, in _strategy_simple
status = executor.status()
File "/afs/crc.nd.edu/user/a/awoodard/.local/lib/python3.6/site-packages/parsl/executors/high_throughput/executor.py", line 620, in status
status = self.provider.status(list(self.blocks.values()))
File "/afs/crc.nd.edu/user/a/awoodard/.local/lib/python3.6/site-packages/parsl/providers/condor/condor.py", line 160, in status
self._status()
File "/afs/crc.nd.edu/user/a/awoodard/.local/lib/python3.6/site-packages/parsl/providers/condor/condor.py", line 142, in _status
job_id = parts[0]
IndexError: list index out of range
|
IndexError
|
def start(self, poll_period=None):
"""Start the NeedNameQeueu
Parameters:
----------
TODO: Move task receiving to a thread
"""
logger.info("Incoming ports bound")
if poll_period is None:
poll_period = self.poll_period
start = time.time()
count = 0
self._kill_event = threading.Event()
self._task_puller_thread = threading.Thread(
target=self.migrate_tasks_to_internal, args=(self._kill_event,)
)
self._task_puller_thread.start()
self._command_thread = threading.Thread(
target=self._command_server, args=(self._kill_event,)
)
self._command_thread.start()
poller = zmq.Poller()
# poller.register(self.task_incoming, zmq.POLLIN)
poller.register(self.task_outgoing, zmq.POLLIN)
poller.register(self.results_incoming, zmq.POLLIN)
# These are managers which we should examine in an iteration
# for scheduling a job (or maybe any other attention?).
# Anything altering the state of the manager should add it
# onto this list.
interesting_managers = set()
while not self._kill_event.is_set():
self.socks = dict(poller.poll(timeout=poll_period))
# Listen for requests for work
if (
self.task_outgoing in self.socks
and self.socks[self.task_outgoing] == zmq.POLLIN
):
logger.debug("[MAIN] starting task_outgoing section")
message = self.task_outgoing.recv_multipart()
manager = message[0]
if manager not in self._ready_manager_queue:
reg_flag = False
try:
msg = json.loads(message[1].decode("utf-8"))
msg["reg_time"] = datetime.datetime.strptime(
msg["reg_time"], "%Y-%m-%d %H:%M:%S"
)
reg_flag = True
except Exception:
logger.warning(
"[MAIN] Got Exception reading registration message from manager:{}".format(
manager
),
exc_info=True,
)
logger.debug("[MAIN] Message :\n{}\n".format(message[0]))
# By default we set up to ignore bad nodes/registration messages.
self._ready_manager_queue[manager] = {
"last": time.time(),
"free_capacity": 0,
"block_id": None,
"max_capacity": 0,
"worker_count": 0,
"active": True,
"tasks": [],
}
if reg_flag is True:
interesting_managers.add(manager)
logger.info(
"[MAIN] Adding manager: {} to ready queue".format(manager)
)
self._ready_manager_queue[manager].update(msg)
logger.info(
"[MAIN] Registration info for manager {}: {}".format(
manager, msg
)
)
if self.monitoring_enabled:
logger.info(
"Sending message {} to hub".format(
self._ready_manager_queue[manager]
)
)
self.hub_channel.send_pyobj(
(MessageType.NODE_INFO, self._ready_manager_queue[manager])
)
if (
msg["python_v"].rsplit(".", 1)[0]
!= self.current_platform["python_v"].rsplit(".", 1)[0]
or msg["parsl_v"] != self.current_platform["parsl_v"]
):
logger.warn(
"[MAIN] Manager {} has incompatible version info with the interchange".format(
manager
)
)
if self.suppress_failure is False:
logger.debug("Setting kill event")
self._kill_event.set()
e = ManagerLost(
manager, self._ready_manager_queue[manager]["hostname"]
)
result_package = {
"task_id": -1,
"exception": serialize_object(e),
}
pkl_package = pickle.dumps(result_package)
self.results_outgoing.send(pkl_package)
logger.warning(
"[MAIN] Sent failure reports, unregistering manager"
)
else:
logger.debug(
"[MAIN] Suppressing shutdown due to version incompatibility"
)
else:
logger.info(
"[MAIN] Manager {} has compatible Parsl version {}".format(
manager, msg["parsl_v"]
)
)
logger.info(
"[MAIN] Manager {} has compatible Python version {}".format(
manager, msg["python_v"].rsplit(".", 1)[0]
)
)
else:
# Registration has failed.
if self.suppress_failure is False:
self._kill_event.set()
e = BadRegistration(manager, critical=True)
result_package = {
"task_id": -1,
"exception": serialize_object(e),
}
pkl_package = pickle.dumps(result_package)
self.results_outgoing.send(pkl_package)
else:
logger.debug(
"[MAIN] Suppressing bad registration from manager:{}".format(
manager
)
)
else:
tasks_requested = int.from_bytes(message[1], "little")
self._ready_manager_queue[manager]["last"] = time.time()
if tasks_requested == HEARTBEAT_CODE:
logger.debug("[MAIN] Manager {} sent heartbeat".format(manager))
self.task_outgoing.send_multipart(
[manager, b"", PKL_HEARTBEAT_CODE]
)
else:
logger.debug(
"[MAIN] Manager {} requested {} tasks".format(
manager, tasks_requested
)
)
self._ready_manager_queue[manager]["free_capacity"] = (
tasks_requested
)
interesting_managers.add(manager)
logger.debug("[MAIN] leaving task_outgoing section")
# If we had received any requests, check if there are tasks that could be passed
logger.debug(
"Managers count (total/interesting): {}/{}".format(
len(self._ready_manager_queue), len(interesting_managers)
)
)
if interesting_managers and not self.pending_task_queue.empty():
shuffled_managers = list(interesting_managers)
random.shuffle(shuffled_managers)
while (
shuffled_managers and not self.pending_task_queue.empty()
): # cf. the if statement above...
manager = shuffled_managers.pop()
tasks_inflight = len(self._ready_manager_queue[manager]["tasks"])
real_capacity = min(
self._ready_manager_queue[manager]["free_capacity"],
self._ready_manager_queue[manager]["max_capacity"] - tasks_inflight,
)
if real_capacity and self._ready_manager_queue[manager]["active"]:
tasks = self.get_tasks(real_capacity)
if tasks:
self.task_outgoing.send_multipart(
[manager, b"", pickle.dumps(tasks)]
)
task_count = len(tasks)
count += task_count
tids = [t["task_id"] for t in tasks]
self._ready_manager_queue[manager]["free_capacity"] -= (
task_count
)
self._ready_manager_queue[manager]["tasks"].extend(tids)
logger.debug(
"[MAIN] Sent tasks: {} to manager {}".format(tids, manager)
)
if self._ready_manager_queue[manager]["free_capacity"] > 0:
logger.debug(
"[MAIN] Manager {} has free_capacity {}".format(
manager,
self._ready_manager_queue[manager]["free_capacity"],
)
)
# ... so keep it in the interesting_managers list
else:
logger.debug(
"[MAIN] Manager {} is now saturated".format(manager)
)
interesting_managers.remove(manager)
else:
interesting_managers.remove(manager)
# logger.debug("Nothing to send to manager {}".format(manager))
logger.debug(
"[MAIN] leaving _ready_manager_queue section, with {} managers still interesting".format(
len(interesting_managers)
)
)
else:
logger.debug(
"[MAIN] either no interesting managers or no tasks, so skipping manager pass"
)
# Receive any results and forward to client
if (
self.results_incoming in self.socks
and self.socks[self.results_incoming] == zmq.POLLIN
):
logger.debug("[MAIN] entering results_incoming section")
manager, *b_messages = self.results_incoming.recv_multipart()
if manager not in self._ready_manager_queue:
logger.warning(
"[MAIN] Received a result from a un-registered manager: {}".format(
manager
)
)
else:
logger.debug(
"[MAIN] Got {} result items in batch".format(len(b_messages))
)
for b_message in b_messages:
r = pickle.loads(b_message)
# logger.debug("[MAIN] Received result for task {} from {}".format(r['task_id'], manager))
self._ready_manager_queue[manager]["tasks"].remove(r["task_id"])
self.results_outgoing.send_multipart(b_messages)
logger.debug(
"[MAIN] Current tasks: {}".format(
self._ready_manager_queue[manager]["tasks"]
)
)
logger.debug("[MAIN] leaving results_incoming section")
bad_managers = [
manager
for manager in self._ready_manager_queue
if time.time() - self._ready_manager_queue[manager]["last"]
> self.heartbeat_threshold
]
for manager in bad_managers:
logger.debug(
"[MAIN] Last: {} Current: {}".format(
self._ready_manager_queue[manager]["last"], time.time()
)
)
logger.warning(
"[MAIN] Too many heartbeats missed for manager {}".format(manager)
)
for tid in self._ready_manager_queue[manager]["tasks"]:
try:
raise ManagerLost(
manager, self._ready_manager_queue[manager]["hostname"]
)
except Exception:
result_package = {
"task_id": tid,
"exception": serialize_object(
RemoteExceptionWrapper(*sys.exc_info())
),
}
pkl_package = pickle.dumps(result_package)
self.results_outgoing.send(pkl_package)
logger.warning("[MAIN] Sent failure reports, unregistering manager")
self._ready_manager_queue.pop(manager, "None")
if manager in interesting_managers:
interesting_managers.remove(manager)
delta = time.time() - start
logger.info("Processed {} tasks in {} seconds".format(count, delta))
logger.warning("Exiting")
|
def start(self, poll_period=None):
"""Start the NeedNameQeueu
Parameters:
----------
TODO: Move task receiving to a thread
"""
logger.info("Incoming ports bound")
if poll_period is None:
poll_period = self.poll_period
start = time.time()
count = 0
self._kill_event = threading.Event()
self._task_puller_thread = threading.Thread(
target=self.migrate_tasks_to_internal, args=(self._kill_event,)
)
self._task_puller_thread.start()
self._command_thread = threading.Thread(
target=self._command_server, args=(self._kill_event,)
)
self._command_thread.start()
poller = zmq.Poller()
# poller.register(self.task_incoming, zmq.POLLIN)
poller.register(self.task_outgoing, zmq.POLLIN)
poller.register(self.results_incoming, zmq.POLLIN)
# These are managers which we should examine in an iteration
# for scheduling a job (or maybe any other attention?).
# Anything altering the state of the manager should add it
# onto this list.
interesting_managers = set()
while not self._kill_event.is_set():
self.socks = dict(poller.poll(timeout=poll_period))
# Listen for requests for work
if (
self.task_outgoing in self.socks
and self.socks[self.task_outgoing] == zmq.POLLIN
):
logger.debug("[MAIN] starting task_outgoing section")
message = self.task_outgoing.recv_multipart()
manager = message[0]
if manager not in self._ready_manager_queue:
reg_flag = False
try:
msg = json.loads(message[1].decode("utf-8"))
msg["reg_time"] = datetime.datetime.strptime(
msg["reg_time"], "%Y-%m-%d %H:%M:%S"
)
reg_flag = True
except Exception:
logger.warning(
"[MAIN] Got Exception reading registration message from manager:{}".format(
manager
),
exc_info=True,
)
logger.debug("[MAIN] Message :\n{}\n".format(message[0]))
# By default we set up to ignore bad nodes/registration messages.
self._ready_manager_queue[manager] = {
"last": time.time(),
"free_capacity": 0,
"block_id": None,
"max_capacity": 0,
"worker_count": 0,
"active": True,
"tasks": [],
}
if reg_flag is True:
interesting_managers.add(manager)
logger.info(
"[MAIN] Adding manager: {} to ready queue".format(manager)
)
self._ready_manager_queue[manager].update(msg)
logger.info(
"[MAIN] Registration info for manager {}: {}".format(
manager, msg
)
)
if self.monitoring_enabled:
logger.info(
"Sending message {} to hub".format(
self._ready_manager_queue[manager]
)
)
self.hub_channel.send_pyobj(
(MessageType.NODE_INFO, self._ready_manager_queue[manager])
)
if (
msg["python_v"].rsplit(".", 1)[0]
!= self.current_platform["python_v"].rsplit(".", 1)[0]
or msg["parsl_v"] != self.current_platform["parsl_v"]
):
logger.warn(
"[MAIN] Manager {} has incompatible version info with the interchange".format(
manager
)
)
if self.suppress_failure is False:
logger.debug("Setting kill event")
self._kill_event.set()
e = ManagerLost(
manager, self._ready_manager_queue[manager]["hostname"]
)
result_package = {
"task_id": -1,
"exception": serialize_object(e),
}
pkl_package = pickle.dumps(result_package)
self.results_outgoing.send(pkl_package)
logger.warning(
"[MAIN] Sent failure reports, unregistering manager"
)
else:
logger.debug(
"[MAIN] Suppressing shutdown due to version incompatibility"
)
else:
logger.info(
"[MAIN] Manager {} has compatible Parsl version {}".format(
manager, msg["parsl_v"]
)
)
logger.info(
"[MAIN] Manager {} has compatible Python version {}".format(
manager, msg["python_v"].rsplit(".", 1)[0]
)
)
else:
# Registration has failed.
if self.suppress_failure is False:
self._kill_event.set()
e = BadRegistration(manager, critical=True)
result_package = {
"task_id": -1,
"exception": serialize_object(e),
}
pkl_package = pickle.dumps(result_package)
self.results_outgoing.send(pkl_package)
else:
logger.debug(
"[MAIN] Suppressing bad registration from manager:{}".format(
manager
)
)
else:
tasks_requested = int.from_bytes(message[1], "little")
self._ready_manager_queue[manager]["last"] = time.time()
if tasks_requested == HEARTBEAT_CODE:
logger.debug("[MAIN] Manager {} sent heartbeat".format(manager))
self.task_outgoing.send_multipart(
[manager, b"", PKL_HEARTBEAT_CODE]
)
else:
logger.debug(
"[MAIN] Manager {} requested {} tasks".format(
manager, tasks_requested
)
)
self._ready_manager_queue[manager]["free_capacity"] = (
tasks_requested
)
interesting_managers.add(manager)
logger.debug("[MAIN] leaving task_outgoing section")
# If we had received any requests, check if there are tasks that could be passed
logger.debug(
"Managers count (total/interesting): {}/{}".format(
len(self._ready_manager_queue), len(interesting_managers)
)
)
if interesting_managers and not self.pending_task_queue.empty():
shuffled_managers = list(interesting_managers)
random.shuffle(shuffled_managers)
while (
shuffled_managers and not self.pending_task_queue.empty()
): # cf. the if statement above...
manager = shuffled_managers.pop()
tasks_inflight = len(self._ready_manager_queue[manager]["tasks"])
real_capacity = min(
self._ready_manager_queue[manager]["free_capacity"],
self._ready_manager_queue[manager]["max_capacity"] - tasks_inflight,
)
if real_capacity and self._ready_manager_queue[manager]["active"]:
tasks = self.get_tasks(real_capacity)
if tasks:
self.task_outgoing.send_multipart(
[manager, b"", pickle.dumps(tasks)]
)
task_count = len(tasks)
count += task_count
tids = [t["task_id"] for t in tasks]
self._ready_manager_queue[manager]["free_capacity"] -= (
task_count
)
self._ready_manager_queue[manager]["tasks"].extend(tids)
logger.debug(
"[MAIN] Sent tasks: {} to manager {}".format(tids, manager)
)
if self._ready_manager_queue[manager]["free_capacity"] > 0:
logger.debug(
"[MAIN] Manager {} has free_capacity {}".format(
manager,
self._ready_manager_queue[manager]["free_capacity"],
)
)
# ... so keep it in the interesting_managers list
else:
logger.debug(
"[MAIN] Manager {} is now saturated".format(manager)
)
interesting_managers.remove(manager)
else:
interesting_managers.remove(manager)
# logger.debug("Nothing to send to manager {}".format(manager))
logger.debug(
"[MAIN] leaving _ready_manager_queue section, with {} managers still interesting".format(
len(interesting_managers)
)
)
else:
logger.debug(
"[MAIN] either no interesting managers or no tasks, so skipping manager pass"
)
# Receive any results and forward to client
if (
self.results_incoming in self.socks
and self.socks[self.results_incoming] == zmq.POLLIN
):
logger.debug("[MAIN] entering results_incoming section")
manager, *b_messages = self.results_incoming.recv_multipart()
if manager not in self._ready_manager_queue:
logger.warning(
"[MAIN] Received a result from a un-registered manager: {}".format(
manager
)
)
else:
logger.debug(
"[MAIN] Got {} result items in batch".format(len(b_messages))
)
for b_message in b_messages:
r = pickle.loads(b_message)
# logger.debug("[MAIN] Received result for task {} from {}".format(r['task_id'], manager))
self._ready_manager_queue[manager]["tasks"].remove(r["task_id"])
self.results_outgoing.send_multipart(b_messages)
logger.debug(
"[MAIN] Current tasks: {}".format(
self._ready_manager_queue[manager]["tasks"]
)
)
logger.debug("[MAIN] leaving results_incoming section")
bad_managers = [
manager
for manager in self._ready_manager_queue
if time.time() - self._ready_manager_queue[manager]["last"]
> self.heartbeat_threshold
]
for manager in bad_managers:
logger.debug(
"[MAIN] Last: {} Current: {}".format(
self._ready_manager_queue[manager]["last"], time.time()
)
)
logger.warning(
"[MAIN] Too many heartbeats missed for manager {}".format(manager)
)
for tid in self._ready_manager_queue[manager]["tasks"]:
try:
raise ManagerLost(
manager, self._ready_manager_queue[manager]["hostname"]
)
except Exception:
result_package = {
"task_id": tid,
"exception": serialize_object(
RemoteExceptionWrapper(*sys.exc_info())
),
}
pkl_package = pickle.dumps(result_package)
self.results_outgoing.send(pkl_package)
logger.warning("[MAIN] Sent failure reports, unregistering manager")
self._ready_manager_queue.pop(manager, "None")
delta = time.time() - start
logger.info("Processed {} tasks in {} seconds".format(count, delta))
logger.warning("Exiting")
|
https://github.com/Parsl/parsl/issues/1128
|
Process Process-1:
Traceback (most recent call last):
File "/home/zzli/anaconda3/envs/funcx/lib/python3.6/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/home/zzli/anaconda3/envs/funcx/lib/python3.6/multiprocessing/process.py", line 93, in run
self._target(*self._args, **self._kwargs)
File "/home/zzli/anaconda3/envs/funcx/lib/python3.6/site-packages/parsl-0.8.0-py3.6.egg/parsl/executors/high_throughput/interchange.py", line 563, in starter
ic.start()
File "/home/zzli/anaconda3/envs/funcx/lib/python3.6/site-packages/parsl-0.8.0-py3.6.egg/parsl/executors/high_throughput/interchange.py", line 458, in start
tasks_inflight = len(self._ready_manager_queue[manager]['tasks'])
KeyError: b'7c51d275c32f'
|
KeyError
|
def remote_side_bash_executor(func, *args, **kwargs):
"""Execute the bash app type function and return the command line string.
This string is reformatted with the *args, and **kwargs
from call time.
"""
import os
import time
import subprocess
import logging
import parsl.app.errors as pe
logging.basicConfig(
filename="/tmp/bashexec.{0}.log".format(time.time()), level=logging.DEBUG
)
# start_t = time.time()
func_name = func.__name__
partial_cmdline = None
# Try to run the func to compose the commandline
try:
# Execute the func to get the commandline
partial_cmdline = func(*args, **kwargs)
# Reformat the commandline with current args and kwargs
executable = partial_cmdline.format(*args, **kwargs)
except AttributeError as e:
if partial_cmdline is not None:
raise pe.AppBadFormatting(
"App formatting failed for app '{}' with AttributeError: {}".format(
func_name, e
)
)
else:
raise pe.BashAppNoReturn(
"Bash app '{}' did not return a value, or returned none - with this exception: {}".format(
func_name, e
),
None,
)
except IndexError as e:
raise pe.AppBadFormatting(
"App formatting failed for app '{}' with IndexError: {}".format(
func_name, e
)
)
except Exception as e:
logging.error(
"Caught exception during formatting of app '{}': {}".format(func_name, e)
)
raise e
logging.debug("Executable: %s", executable)
# Updating stdout, stderr if values passed at call time.
def open_std_fd(fdname):
# fdname is 'stdout' or 'stderr'
stdfspec = kwargs.get(fdname) # spec is str name or tuple (name, mode)
if stdfspec is None:
return None
elif isinstance(stdfspec, str):
fname = stdfspec
mode = "a+"
elif isinstance(stdfspec, tuple):
if len(stdfspec) != 2:
raise pe.BadStdStreamFile(
"std descriptor %s has incorrect tuple length %s"
% (fdname, len(stdfspec)),
TypeError("Bad Tuple Length"),
)
fname, mode = stdfspec
else:
raise pe.BadStdStreamFile(
"std descriptor %s has unexpected type %s"
% (fdname, str(type(stdfspec))),
TypeError("Bad Tuple Type"),
)
try:
if os.path.dirname(fname):
os.makedirs(os.path.dirname(fname), exist_ok=True)
fd = open(fname, mode)
except Exception as e:
raise pe.BadStdStreamFile(fname, e)
return fd
std_out = open_std_fd("stdout")
std_err = open_std_fd("stderr")
timeout = kwargs.get("walltime")
if std_err is not None:
print(
"--> executable follows <--\n{}\n--> end executable <--".format(executable),
file=std_err,
)
returncode = None
try:
proc = subprocess.Popen(
executable,
stdout=std_out,
stderr=std_err,
shell=True,
executable="/bin/bash",
)
proc.wait(timeout=timeout)
returncode = proc.returncode
except subprocess.TimeoutExpired:
# print("Timeout")
raise pe.AppTimeout("[{}] App exceeded walltime: {}".format(func_name, timeout))
except Exception as e:
# print("Caught exception: ", e)
raise pe.AppException(
"[{}] App caught exception: {}".format(func_name, proc.returncode), e
)
if returncode != 0:
raise pe.AppFailure(
"[{}] App failed with exit code: {}".format(func_name, proc.returncode),
proc.returncode,
)
# TODO : Add support for globs here
missing = []
for outputfile in kwargs.get("outputs", []):
fpath = outputfile
if type(outputfile) != str:
fpath = outputfile.filepath
if not os.path.exists(fpath):
missing.extend([outputfile])
if missing:
raise pe.MissingOutputs("[{}] Missing outputs".format(func_name), missing)
# exec_duration = time.time() - start_t
return returncode
|
def remote_side_bash_executor(func, *args, **kwargs):
"""Execute the bash app type function and return the command line string.
This string is reformatted with the *args, and **kwargs
from call time.
"""
import os
import time
import subprocess
import logging
import parsl.app.errors as pe
logging.basicConfig(
filename="/tmp/bashexec.{0}.log".format(time.time()), level=logging.DEBUG
)
# start_t = time.time()
func_name = func.__name__
partial_cmdline = None
# Try to run the func to compose the commandline
try:
# Execute the func to get the commandline
partial_cmdline = func(*args, **kwargs)
# Reformat the commandline with current args and kwargs
executable = partial_cmdline.format(*args, **kwargs)
except AttributeError as e:
if partial_cmdline is not None:
raise pe.AppBadFormatting(
"App formatting failed for app '{}' with AttributeError: {}".format(
func_name, e
)
)
else:
raise pe.BashAppNoReturn(
"Bash app '{}' did not return a value, or returned none - with this exception: {}".format(
func_name, e
),
None,
)
except IndexError as e:
raise pe.AppBadFormatting(
"App formatting failed for app '{}' with IndexError: {}".format(
func_name, e
)
)
except Exception as e:
logging.error(
"Caught exception during formatting of app '{}': {}".format(func_name, e)
)
raise e
logging.debug("Executable: %s", executable)
# Updating stdout, stderr if values passed at call time.
def open_std_fd(fdname):
# fdname is 'stdout' or 'stderr'
stdfspec = kwargs.get(fdname) # spec is str name or tuple (name, mode)
if stdfspec is None:
return None
elif isinstance(stdfspec, str):
fname = stdfspec
mode = "a+"
elif isinstance(stdfspec, tuple):
if len(stdfspec) != 2:
raise pe.BadStdStreamFile(
"std descriptor %s has incorrect tuple length %s"
% (fdname, len(stdfspec)),
TypeError("Bad Tuple Length"),
)
fname, mode = stdfspec
else:
raise pe.BadStdStreamFile(
"std descriptor %s has unexpected type %s"
% (fdname, str(type(stdfspec))),
TypeError("Bad Tuple Type"),
)
try:
fd = open(fname, mode)
if os.path.dirname(fname):
os.makedirs(os.path.dirname(fname), exist_ok=True)
except Exception as e:
raise pe.BadStdStreamFile(fname, e)
return fd
std_out = open_std_fd("stdout")
std_err = open_std_fd("stderr")
timeout = kwargs.get("walltime")
if std_err is not None:
print(
"--> executable follows <--\n{}\n--> end executable <--".format(executable),
file=std_err,
)
returncode = None
try:
proc = subprocess.Popen(
executable,
stdout=std_out,
stderr=std_err,
shell=True,
executable="/bin/bash",
)
proc.wait(timeout=timeout)
returncode = proc.returncode
except subprocess.TimeoutExpired:
# print("Timeout")
raise pe.AppTimeout("[{}] App exceeded walltime: {}".format(func_name, timeout))
except Exception as e:
# print("Caught exception: ", e)
raise pe.AppException(
"[{}] App caught exception: {}".format(func_name, proc.returncode), e
)
if returncode != 0:
raise pe.AppFailure(
"[{}] App failed with exit code: {}".format(func_name, proc.returncode),
proc.returncode,
)
# TODO : Add support for globs here
missing = []
for outputfile in kwargs.get("outputs", []):
fpath = outputfile
if type(outputfile) != str:
fpath = outputfile.filepath
if not os.path.exists(fpath):
missing.extend([outputfile])
if missing:
raise pe.MissingOutputs("[{}] Missing outputs".format(func_name), missing)
# exec_duration = time.time() - start_t
return returncode
|
https://github.com/Parsl/parsl/issues/1001
|
2019-05-31 11:22:06 parsl.dataflow.dflow:251 [ERROR] Task 0 failed
Traceback (most recent call last):
File "/home/yadu/miniconda3/envs/mpi_executor/lib/python3.6/site-packages/parsl/dataflow/dflow.py", line 248, in handle_exec_update
res.reraise()
File "/home/yadu/miniconda3/envs/mpi_executor/lib/python3.6/site-packages/parsl/app/errors.py", line 163, in reraise
reraise(t, v, tb)
File "/home/yadu/.local/lib/python3.6/site-packages/six.py", line 692, in reraise
raise value.with_traceback(tb)
File "/home/yadu/miniconda3/envs/mpi_executor/lib/python3.6/site-packages/parsl/app/errors.py", line 172, in wrapper
return func(*args, **kwargs)
File "/home/yadu/miniconda3/envs/mpi_executor/lib/python3.6/site-packages/parsl/app/bash.py", line 79, in remote_side_bash_executor
std_out = open_std_fd('stdout')
File "/home/yadu/miniconda3/envs/mpi_executor/lib/python3.6/site-packages/parsl/app/bash.py", line 76, in open_std_fd
raise pe.BadStdStreamFile(fname, e)
parsl.app.errors.BadStdStreamFile: FilePath: [/home/yadu/src/parsl/parsl/tests/manual_tests/runinfo/050/task_logs/0000/task_0000_sleeper_bash.stdout] Exception: [Errno 2] No such file or directory: '/home/yadu/src/parsl/parsl/tests/manual_tests/runinfo/050/task_logs/0000/task_0000_sleeper_bash.stdout'
|
parsl.app.errors.BadStdStreamFile
|
def open_std_fd(fdname):
# fdname is 'stdout' or 'stderr'
stdfspec = kwargs.get(fdname) # spec is str name or tuple (name, mode)
if stdfspec is None:
return None
elif isinstance(stdfspec, str):
fname = stdfspec
mode = "a+"
elif isinstance(stdfspec, tuple):
if len(stdfspec) != 2:
raise pe.BadStdStreamFile(
"std descriptor %s has incorrect tuple length %s"
% (fdname, len(stdfspec)),
TypeError("Bad Tuple Length"),
)
fname, mode = stdfspec
else:
raise pe.BadStdStreamFile(
"std descriptor %s has unexpected type %s" % (fdname, str(type(stdfspec))),
TypeError("Bad Tuple Type"),
)
try:
if os.path.dirname(fname):
os.makedirs(os.path.dirname(fname), exist_ok=True)
fd = open(fname, mode)
except Exception as e:
raise pe.BadStdStreamFile(fname, e)
return fd
|
def open_std_fd(fdname):
# fdname is 'stdout' or 'stderr'
stdfspec = kwargs.get(fdname) # spec is str name or tuple (name, mode)
if stdfspec is None:
return None
elif isinstance(stdfspec, str):
fname = stdfspec
mode = "a+"
elif isinstance(stdfspec, tuple):
if len(stdfspec) != 2:
raise pe.BadStdStreamFile(
"std descriptor %s has incorrect tuple length %s"
% (fdname, len(stdfspec)),
TypeError("Bad Tuple Length"),
)
fname, mode = stdfspec
else:
raise pe.BadStdStreamFile(
"std descriptor %s has unexpected type %s" % (fdname, str(type(stdfspec))),
TypeError("Bad Tuple Type"),
)
try:
fd = open(fname, mode)
if os.path.dirname(fname):
os.makedirs(os.path.dirname(fname), exist_ok=True)
except Exception as e:
raise pe.BadStdStreamFile(fname, e)
return fd
|
https://github.com/Parsl/parsl/issues/1001
|
2019-05-31 11:22:06 parsl.dataflow.dflow:251 [ERROR] Task 0 failed
Traceback (most recent call last):
File "/home/yadu/miniconda3/envs/mpi_executor/lib/python3.6/site-packages/parsl/dataflow/dflow.py", line 248, in handle_exec_update
res.reraise()
File "/home/yadu/miniconda3/envs/mpi_executor/lib/python3.6/site-packages/parsl/app/errors.py", line 163, in reraise
reraise(t, v, tb)
File "/home/yadu/.local/lib/python3.6/site-packages/six.py", line 692, in reraise
raise value.with_traceback(tb)
File "/home/yadu/miniconda3/envs/mpi_executor/lib/python3.6/site-packages/parsl/app/errors.py", line 172, in wrapper
return func(*args, **kwargs)
File "/home/yadu/miniconda3/envs/mpi_executor/lib/python3.6/site-packages/parsl/app/bash.py", line 79, in remote_side_bash_executor
std_out = open_std_fd('stdout')
File "/home/yadu/miniconda3/envs/mpi_executor/lib/python3.6/site-packages/parsl/app/bash.py", line 76, in open_std_fd
raise pe.BadStdStreamFile(fname, e)
parsl.app.errors.BadStdStreamFile: FilePath: [/home/yadu/src/parsl/parsl/tests/manual_tests/runinfo/050/task_logs/0000/task_0000_sleeper_bash.stdout] Exception: [Errno 2] No such file or directory: '/home/yadu/src/parsl/parsl/tests/manual_tests/runinfo/050/task_logs/0000/task_0000_sleeper_bash.stdout'
|
parsl.app.errors.BadStdStreamFile
|
def resource_distribution_plot(
df_resources,
df_task,
type="psutil_process_time_user",
label="CPU Time Distribution",
option="avg",
columns=20,
):
# E.g., psutil_process_time_user or psutil_process_memory_percent
min_range = min(df_resources[type].astype("float"))
max_range = max(df_resources[type].astype("float"))
time_step = (max_range - min_range) / columns
if min_range == max_range:
x_axis = [min_range]
else:
x_axis = []
for i in np.arange(min_range, max_range + time_step, time_step):
x_axis.append(i)
apps_dict = dict()
for i in range(len(df_task)):
row = df_task.iloc[i]
apps_dict[row["task_id"]] = []
def y_axis_setup():
items = [0] * len(x_axis)
for app, tasks in apps_dict.items():
if option == "avg":
task = (
df_resources[df_resources["task_id"] == app][type]
.astype("float")
.mean()
)
elif option == "max":
task = max(
df_resources[df_resources["task_id"] == app][type].astype("float")
)
for i in range(len(x_axis) - 1):
a = task >= x_axis[i]
b = task < x_axis[i + 1]
if a and b:
items[i] += 1
if task >= x_axis[-1]:
items[-1] += 1
return items
if "memory" not in type:
xaxis = dict(autorange=True, title="CPU user time (seconds)")
else:
xaxis = dict(autorange=True, title="Memory usage (bytes)")
fig = go.Figure(
data=[go.Bar(x=x_axis, y=y_axis_setup(), name="tasks")],
layout=go.Layout(
xaxis=xaxis, yaxis=dict(title="Tasks"), title=label + "(" + option + ")"
),
)
return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
|
def resource_distribution_plot(
df_resources,
df_task,
type="psutil_process_time_user",
label="CPU Time Distribution",
option="avg",
columns=20,
):
# E.g., psutil_process_time_user or psutil_process_memory_percent
min_range = min(df_resources[type].astype("float"))
max_range = max(df_resources[type].astype("float"))
time_step = (max_range - min_range) / columns
x_axis = []
for i in np.arange(min_range, max_range + time_step, time_step):
x_axis.append(i)
apps_dict = dict()
for i in range(len(df_task)):
row = df_task.iloc[i]
apps_dict[row["task_id"]] = []
def y_axis_setup():
items = []
for app, tasks in apps_dict.items():
tmp = []
if option == "avg":
task = (
df_resources[df_resources["task_id"] == app][type]
.astype("float")
.mean()
)
elif option == "max":
task = max(
df_resources[df_resources["task_id"] == app][type].astype("float")
)
for i in range(len(x_axis) - 1):
a = task >= x_axis[i]
b = task < x_axis[i + 1]
tmp.append(a & b)
items = np.sum([items, tmp], axis=0)
return items
if "memory" not in type:
xaxis = dict(autorange=True, title="CPU user time (seconds)")
else:
xaxis = dict(autorange=True, title="Memory usage (bytes)")
fig = go.Figure(
data=[go.Bar(x=x_axis[:-1], y=y_axis_setup(), name="tasks")],
layout=go.Layout(
xaxis=xaxis, yaxis=dict(title="Tasks"), title=label + "(" + option + ")"
),
)
return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
|
https://github.com/Parsl/parsl/issues/926
|
[2019-05-03 18:18:01,362] ERROR in app: Exception on /workflow/9294c39d-a64d-4ad5-9ff8-4b8d757c1af1/resource_usage [GET]
Traceback (most recent call last):
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/awoodard/ci/viz_server/viz_server/views.py", line 145, in workflow_resources
df_resources, df_task, type='psutil_process_time_user', label='CPU Time Distribution', option='avg'),
File "/Users/awoodard/ci/viz_server/viz_server/plots/default/workflow_resource_plots.py", line 15, in resource_distribution_plot
for i in np.arange(min_range, max_range + time_step, time_step):
ZeroDivisionError: float division by zero
|
ZeroDivisionError
|
def y_axis_setup():
items = [0] * len(x_axis)
for app, tasks in apps_dict.items():
if option == "avg":
task = (
df_resources[df_resources["task_id"] == app][type]
.astype("float")
.mean()
)
elif option == "max":
task = max(
df_resources[df_resources["task_id"] == app][type].astype("float")
)
for i in range(len(x_axis) - 1):
a = task >= x_axis[i]
b = task < x_axis[i + 1]
if a and b:
items[i] += 1
if task >= x_axis[-1]:
items[-1] += 1
return items
|
def y_axis_setup():
items = []
for app, tasks in apps_dict.items():
tmp = []
if option == "avg":
task = (
df_resources[df_resources["task_id"] == app][type]
.astype("float")
.mean()
)
elif option == "max":
task = max(
df_resources[df_resources["task_id"] == app][type].astype("float")
)
for i in range(len(x_axis) - 1):
a = task >= x_axis[i]
b = task < x_axis[i + 1]
tmp.append(a & b)
items = np.sum([items, tmp], axis=0)
return items
|
https://github.com/Parsl/parsl/issues/926
|
[2019-05-03 18:18:01,362] ERROR in app: Exception on /workflow/9294c39d-a64d-4ad5-9ff8-4b8d757c1af1/resource_usage [GET]
Traceback (most recent call last):
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/awoodard/ci/viz_server/viz_server/views.py", line 145, in workflow_resources
df_resources, df_task, type='psutil_process_time_user', label='CPU Time Distribution', option='avg'),
File "/Users/awoodard/ci/viz_server/viz_server/plots/default/workflow_resource_plots.py", line 15, in resource_distribution_plot
for i in np.arange(min_range, max_range + time_step, time_step):
ZeroDivisionError: float division by zero
|
ZeroDivisionError
|
def resource_time_series(tasks, type="psutil_process_time_user", label="CPU user time"):
tasks["epoch_time"] = (
pd.to_datetime(tasks["timestamp"]) - pd.Timestamp("1970-01-01")
) // pd.Timedelta("1s")
step = int(tasks["resource_monitoring_interval"][0])
start = tasks["epoch_time"].min()
end = tasks["epoch_time"].max()
tasks["relative_time"] = tasks["epoch_time"] - start
if end != start:
bins = pd.cut(
tasks["relative_time"], range(0, end - start + 1, step), include_lowest=True
)
df = tasks.groupby(bins, as_index=False)[type].mean()
df["time"] = step * df.index
fig = go.Figure(
data=[
go.Scatter(
x=df["time"],
y=df[type],
)
],
layout=go.Layout(
xaxis=dict(autorange=True, title="Time (seconds)"),
yaxis=dict(title=label),
title=label,
),
)
else:
fig = go.Figure(
data=[
go.Scatter(
x=[0],
y=[tasks[type].mean()],
)
],
layout=go.Layout(
xaxis=dict(autorange=True, title="Time (seconds)"),
yaxis=dict(title=label),
title=label,
),
)
return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
|
def resource_time_series(tasks, type="psutil_process_time_user", label="CPU user time"):
tasks["epoch_time"] = (
pd.to_datetime(tasks["timestamp"]) - pd.Timestamp("1970-01-01")
) // pd.Timedelta("1s")
step = int(tasks["resource_monitoring_interval"][0])
start = tasks["epoch_time"].min()
end = tasks["epoch_time"].max()
tasks["relative_time"] = tasks["epoch_time"] - start
bins = pd.cut(
tasks["relative_time"], range(0, end - start + 1, step), include_lowest=True
)
df = tasks.groupby(bins, as_index=False)[type].mean()
df["time"] = step * df.index
fig = go.Figure(
data=[
go.Scatter(
x=df["time"],
y=df[type],
)
],
layout=go.Layout(
xaxis=dict(autorange=True, title="Time (seconds)"),
yaxis=dict(title=label),
title=label,
),
)
return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
|
https://github.com/Parsl/parsl/issues/926
|
[2019-05-03 18:18:01,362] ERROR in app: Exception on /workflow/9294c39d-a64d-4ad5-9ff8-4b8d757c1af1/resource_usage [GET]
Traceback (most recent call last):
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/awoodard/ci/viz_server/viz_server/views.py", line 145, in workflow_resources
df_resources, df_task, type='psutil_process_time_user', label='CPU Time Distribution', option='avg'),
File "/Users/awoodard/ci/viz_server/viz_server/plots/default/workflow_resource_plots.py", line 15, in resource_distribution_plot
for i in np.arange(min_range, max_range + time_step, time_step):
ZeroDivisionError: float division by zero
|
ZeroDivisionError
|
def workflow_resources(workflow_id):
workflow_details = Workflow.query.filter_by(run_id=workflow_id).first()
if workflow_details is None:
return render_template(
"error.html", message="Workflow %s could not be found" % workflow_id
)
df_resources = pd.read_sql_query(
"SELECT * FROM resource WHERE run_id='%s'" % (workflow_id), db.engine
)
if df_resources.empty:
return render_template(
"error.html",
message="Workflow %s does not have any resource usage records."
% workflow_id,
)
df_task = pd.read_sql_query(
"SELECT * FROM task WHERE run_id='%s'" % (workflow_id), db.engine
)
df_task_resources = pd.read_sql_query(
"""
SELECT task_id, timestamp, resource_monitoring_interval,
psutil_process_cpu_percent, psutil_process_time_user,
psutil_process_memory_percent, psutil_process_memory_resident
from resource
where run_id = '%s'
"""
% (workflow_id),
db.engine,
)
return render_template(
"resource_usage.html",
workflow_details=workflow_details,
user_time_distribution_avg_plot=resource_distribution_plot(
df_resources,
df_task,
type="psutil_process_time_user",
label="CPU Time Distribution",
option="avg",
),
user_time_distribution_max_plot=resource_distribution_plot(
df_resources,
df_task,
type="psutil_process_time_user",
label="CPU Time Distribution",
option="max",
),
memory_usage_distribution_avg_plot=resource_distribution_plot(
df_resources,
df_task,
type="psutil_process_memory_resident",
label="Memory Distribution",
option="avg",
),
memory_usage_distribution_max_plot=resource_distribution_plot(
df_resources,
df_task,
type="psutil_process_memory_resident",
label="Memory Distribution",
option="max",
),
user_time_time_series=resource_time_series(
df_task_resources, type="psutil_process_time_user", label="CPU User Time"
),
cpu_percent_time_series=resource_time_series(
df_task_resources,
type="psutil_process_cpu_percent",
label="CPU Utilization",
),
memory_percent_time_series=resource_time_series(
df_task_resources,
type="psutil_process_memory_percent",
label="Memory Utilization",
),
memory_resident_time_series=resource_time_series(
df_task_resources,
type="psutil_process_memory_resident",
label="Memory Usage",
),
)
|
def workflow_resources(workflow_id):
workflow_details = Workflow.query.filter_by(run_id=workflow_id).first()
if workflow_details is None:
return render_template(
"error.html", message="Workflow %s could not be found" % workflow_id
)
df_resources = pd.read_sql_query(
"SELECT * FROM resource WHERE run_id='%s'" % (workflow_id), db.engine
)
df_task = pd.read_sql_query(
"SELECT * FROM task WHERE run_id='%s'" % (workflow_id), db.engine
)
df_task_resources = pd.read_sql_query(
"""
SELECT task_id, timestamp, resource_monitoring_interval,
psutil_process_cpu_percent, psutil_process_time_user,
psutil_process_memory_percent, psutil_process_memory_resident
from resource
where run_id = '%s'
"""
% (workflow_id),
db.engine,
)
return render_template(
"resource_usage.html",
workflow_details=workflow_details,
user_time_distribution_avg_plot=resource_distribution_plot(
df_resources,
df_task,
type="psutil_process_time_user",
label="CPU Time Distribution",
option="avg",
),
user_time_distribution_max_plot=resource_distribution_plot(
df_resources,
df_task,
type="psutil_process_time_user",
label="CPU Time Distribution",
option="max",
),
memory_usage_distribution_avg_plot=resource_distribution_plot(
df_resources,
df_task,
type="psutil_process_memory_resident",
label="Memory Distribution",
option="avg",
),
memory_usage_distribution_max_plot=resource_distribution_plot(
df_resources,
df_task,
type="psutil_process_memory_resident",
label="Memory Distribution",
option="max",
),
user_time_time_series=resource_time_series(
df_task_resources, type="psutil_process_time_user", label="CPU User Time"
),
cpu_percent_time_series=resource_time_series(
df_task_resources,
type="psutil_process_cpu_percent",
label="CPU Utilization",
),
memory_percent_time_series=resource_time_series(
df_task_resources,
type="psutil_process_memory_percent",
label="Memory Utilization",
),
memory_resident_time_series=resource_time_series(
df_task_resources,
type="psutil_process_memory_resident",
label="Memory Usage",
),
)
|
https://github.com/Parsl/parsl/issues/926
|
[2019-05-03 18:18:01,362] ERROR in app: Exception on /workflow/9294c39d-a64d-4ad5-9ff8-4b8d757c1af1/resource_usage [GET]
Traceback (most recent call last):
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/awoodard/software/miniconda3/envs/parsl/lib/python3.7/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/awoodard/ci/viz_server/viz_server/views.py", line 145, in workflow_resources
df_resources, df_task, type='psutil_process_time_user', label='CPU Time Distribution', option='avg'),
File "/Users/awoodard/ci/viz_server/viz_server/plots/default/workflow_resource_plots.py", line 15, in resource_distribution_plot
for i in np.arange(min_range, max_range + time_step, time_step):
ZeroDivisionError: float division by zero
|
ZeroDivisionError
|
def status(self, job_ids):
"""Get the status of a list of jobs identified by the job identifiers
returned from the submit request.
Args:
- job_ids (list) : A list of job identifiers
Returns:
- A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED',
'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list.
Raises:
- ExecutionProviderExceptions or its subclasses
"""
if job_ids:
self._status()
return [self.resources[jid]["status"] for jid in job_ids]
|
def status(self, job_ids):
"""Get the status of a list of jobs identified by the job identifiers
returned from the submit request.
Args:
- job_ids (list) : A list of job identifiers
Returns:
- A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED',
'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list.
Raises:
- ExecutionProviderExceptions or its subclasses
"""
self._status()
return [self.resources[jid]["status"] for jid in job_ids]
|
https://github.com/Parsl/parsl/issues/196
|
Traceback (most recent call last):
File "/Users/awoodard/software/anaconda3/envs/parsl_py36/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/Users/awoodard/software/anaconda3/envs/parsl_py36/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/Users/awoodard/ci/parsl/parsl/dataflow/flow_control.py", line 121, in _wake_up_timer
self.make_callback(kind='timer')
File "/Users/awoodard/ci/parsl/parsl/dataflow/flow_control.py", line 141, in make_callback
self.callback(tasks=self._event_buffer, kind=kind)
File "/Users/awoodard/ci/parsl/parsl/dataflow/strategy.py", line 162, in _strategy_simple
status = exc.status()
File "/Users/awoodard/ci/parsl/parsl/executors/ipp.py", line 253, in status
status = self.execution_provider.status(self.engines)
File "/Users/awoodard/ci/libsubmit/libsubmit/providers/cluster_provider.py", line 174, in status
self._status()
File "/Users/awoodard/ci/libsubmit/libsubmit/providers/slurm/slurm.py", line 141, in _status
self.resources[job_id]['status'] = status
KeyError: '41784475_20'
|
KeyError
|
def __init__(
self,
config=None,
executors=None,
lazyErrors=True,
appCache=True,
rundir=None,
retries=0,
checkpointFiles=None,
checkpointMode=None,
data_manager=None,
):
"""Initialize the DataFlowKernel.
Please note that keyword args passed to the DFK here will always override
options passed in via the config.
KWargs:
- config (dict) : A single data object encapsulating all config attributes
- executors (list of Executor objs): Optional, kept for (somewhat) backward compatibility with 0.2.0
- lazyErrors(bool) : Default=True, allow workflow to continue on app failures.
- appCache (bool) :Enable caching of apps
- rundir (str) : Path to run directory. Defaults to ./runinfo/runNNN
- retries(int): Default=0, Set the number of retry attempts in case of failure
- checkpointFiles (list of str): List of filepaths to checkpoint files
- checkpointMode (None, 'dfk_exit', 'task_exit', 'periodic'): Method to use.
- data_manager (DataManager): User created DataManager
Returns:
DataFlowKernel object
"""
# Create run dirs for this run
self.rundir = make_rundir(config=config, path=rundir)
parsl.set_file_logger("{}/parsl.log".format(self.rundir), level=logging.DEBUG)
logger.info("Parsl version: {}".format(get_version()))
logger.info("Libsubmit version: {}".format(libsubmit.__version__))
# Update config with defaults
self._config = update_config(config, self.rundir)
# Set the data manager
if data_manager:
self.data_manager = data_manager
else:
self.data_manager = DataManager(config=self._config)
# Start the anonymized usage tracker and send init msg
self.usage_tracker = UsageTracker(self)
self.usage_tracker.send_message()
# Load Memoizer with checkpoints before we start the run.
if checkpointFiles:
checkpoint_src = checkpointFiles
elif self._config and self._config["globals"]["checkpointFiles"]:
checkpoint_src = self._config["globals"]["checkpointFiles"]
else:
checkpoint_src = None
cpts = self.load_checkpoints(checkpoint_src)
# Initialize the memoizer
self.memoizer = Memoizer(self, memoize=appCache, checkpoint=cpts)
self.checkpointed_tasks = 0
self._checkpoint_timer = None
if self._config:
self._executors_managed = True
# Create the executors
epf = EPF()
self.executors = epf.make(self.rundir, self._config)
# set global vars from config
self.lazy_fail = self._config["globals"].get("lazyErrors", lazyErrors)
self.fail_retries = self._config["globals"].get("retries", retries)
self.flowcontrol = FlowControl(self, self._config)
self.checkpoint_mode = self._config["globals"].get(
"checkpointMode", checkpointMode
)
if self.checkpoint_mode == "periodic":
period = self._config["globals"].get("checkpointPeriod", "00:30:00")
try:
h, m, s = map(int, period.split(":"))
checkpoint_period = (h * 3600) + (m * 60) + s
self._checkpoint_timer = Timer(
self.checkpoint, interval=checkpoint_period
)
except Exception as e:
logger.error(
"invalid checkpointPeriod provided:{0} expected HH:MM:SS".format(
period
)
)
self._checkpoint_timer = Timer(self.checkpoint, interval=(30 * 60))
else:
self._executors_managed = False
self.fail_retries = retries
self.lazy_fail = lazyErrors
self.executors = {i: x for i, x in enumerate(executors)}
self.flowcontrol = FlowNoControl(self, None)
self.checkpoint_mode = checkpointMode
self.task_count = 0
self.fut_task_lookup = {}
self.tasks = {}
self.task_launch_lock = threading.Lock()
logger.debug("Using executors: {0}".format(self.executors))
atexit.register(self.cleanup)
|
def __init__(
self,
config=None,
executors=None,
lazyErrors=True,
appCache=True,
rundir=None,
retries=0,
checkpointFiles=None,
checkpointMode=None,
data_manager=None,
):
"""Initialize the DataFlowKernel.
Please note that keyword args passed to the DFK here will always override
options passed in via the config.
KWargs:
- config (dict) : A single data object encapsulating all config attributes
- executors (list of Executor objs): Optional, kept for (somewhat) backward compatibility with 0.2.0
- lazyErrors(bool) : Default=True, allow workflow to continue on app failures.
- appCache (bool) :Enable caching of apps
- rundir (str) : Path to run directory. Defaults to ./runinfo/runNNN
- retries(int): Default=0, Set the number of retry attempts in case of failure
- checkpointFiles (list of str): List of filepaths to checkpoint files
- checkpointMode (None, 'dfk_exit', 'task_exit', 'periodic'): Method to use.
- data_manager (DataManager): User created DataManager
Returns:
DataFlowKernel object
"""
# Create run dirs for this run
self.rundir = make_rundir(config=config, path=rundir)
parsl.set_file_logger("{}/parsl.log".format(self.rundir), level=logging.DEBUG)
logger.info("Parsl version: {}".format(get_version()))
logger.info("Libsubmit version: {}".format(libsubmit.__version__))
# Update config with defaults
self._config = update_config(config, self.rundir)
# Set the data manager
if data_manager:
self.data_manager = data_manager
else:
self.data_manager = DataManager(config=self._config)
# Start the anonymized usage tracker and send init msg
self.usage_tracker = UsageTracker(self)
self.usage_tracker.send_message()
# Load checkpoints if any
cpts = self.load_checkpoints(checkpointFiles)
# Initialize the memoizer
self.memoizer = Memoizer(self, memoize=appCache, checkpoint=cpts)
self.checkpointed_tasks = 0
self._checkpoint_timer = None
if self._config:
self._executors_managed = True
# Create the executors
epf = EPF()
self.executors = epf.make(self.rundir, self._config)
# set global vars from config
self.lazy_fail = self._config["globals"].get("lazyErrors", lazyErrors)
self.fail_retries = self._config["globals"].get("retries", retries)
self.flowcontrol = FlowControl(self, self._config)
self.checkpoint_mode = self._config["globals"].get(
"checkpointMode", checkpointMode
)
if self.checkpoint_mode == "periodic":
period = self._config["globals"].get("checkpointPeriod", "00:30:00")
try:
h, m, s = map(int, period.split(":"))
checkpoint_period = (h * 3600) + (m * 60) + s
self._checkpoint_timer = Timer(
self.checkpoint, interval=checkpoint_period
)
except Exception as e:
logger.error(
"invalid checkpointPeriod provided:{0} expected HH:MM:SS".format(
period
)
)
self._checkpoint_timer = Timer(self.checkpoint, interval=(30 * 60))
else:
self._executors_managed = False
self.fail_retries = retries
self.lazy_fail = lazyErrors
self.executors = {i: x for i, x in enumerate(executors)}
self.flowcontrol = FlowNoControl(self, None)
self.checkpoint_mode = checkpointMode
self.task_count = 0
self.fut_task_lookup = {}
self.tasks = {}
self.task_launch_lock = threading.Lock()
logger.debug("Using executors: {0}".format(self.executors))
atexit.register(self.cleanup)
|
https://github.com/Parsl/parsl/issues/234
|
Traceback (most recent call last):
File "test_regression_233.py", line 38, in <module>
test_checkpoint_availability()
File "test_regression_233.py", line 27, in test_checkpoint_availability
original = run_checkpointed([])
File "test_regression_233.py", line 18, in run_checkpointed
x = cached_rand(i)
File "/home/yadu/src/parsl/parsl/app/app_factory.py", line 76, in __call__
return app_obj(*args, **kwargs)
File "/home/yadu/src/parsl/parsl/app/python_app.py", line 46, in __call__
**kwargs)
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 499, in submit
task_id, func, *new_args, **kwargs)
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 322, in launch_task
exec_fu.add_done_callback(partial(self.handle_update, task_id))
File "/usr/lib/python3.5/concurrent/futures/_base.py", line 376, in add_done_callback
fn(self)
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 218, in handle_update
self.checkpoint(tasks=[task_id])
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 617, in checkpoint
if self.tasks[task_id]['app_fu'].done() and \
AttributeError: 'NoneType' object has no attribute 'done'
|
AttributeError
|
def handle_update(self, task_id, future, memo_cbk=False):
"""This function is called only as a callback from a task being done.
Move done task from runnable -> done
Move newly doable tasks from pending -> runnable , and launch
Args:
task_id (string) : Task id which is a uuid string
future (Future) : The future object corresponding to the task which
makes this callback
KWargs:
memo_cbk(Bool) : Indicates that the call is coming from a memo update,
that does not require additional memo updates.
"""
final_state_flag = False
try:
res = future.result()
if isinstance(res, RemoteException):
res.reraise()
except Exception as e:
logger.exception("Task {} failed".format(task_id))
# We keep the history separately, since the future itself could be
# tossed.
self.tasks[task_id]["fail_history"].append(future._exception)
self.tasks[task_id]["fail_count"] += 1
if not self.lazy_fail:
logger.debug("Eager fail, skipping retry logic")
raise e
if self.tasks[task_id]["fail_count"] <= self.fail_retries:
logger.debug("Task {} marked for retry".format(task_id))
self.tasks[task_id]["status"] = States.pending
else:
logger.info(
"Task {} failed after {} retry attempts".format(
task_id, self.fail_retries
)
)
self.tasks[task_id]["status"] = States.failed
final_state_flag = True
else:
logger.info("Task {} completed".format(task_id))
self.tasks[task_id]["status"] = States.done
final_state_flag = True
if not memo_cbk and final_state_flag is True:
# Update the memoizer with the new result if this is not a
# result from a memo lookup and the task has reached a terminal state.
self.memoizer.update_memo(task_id, self.tasks[task_id], future)
if self.checkpoint_mode is "task_exit":
self.checkpoint(tasks=[task_id])
# Identify tasks that have resolved dependencies and launch
for tid in list(self.tasks):
# Skip all non-pending tasks
if self.tasks[tid]["status"] != States.pending:
continue
if self._count_deps(self.tasks[tid]["depends"], tid) == 0:
# We can now launch *task*
new_args, kwargs, exceptions = self.sanitize_and_wrap(
task_id, self.tasks[tid]["args"], self.tasks[tid]["kwargs"]
)
self.tasks[tid]["args"] = new_args
self.tasks[tid]["kwargs"] = kwargs
if not exceptions:
# There are no dependency errors
exec_fu = None
# Acquire a lock, retest the state, launch
with self.task_launch_lock:
if self.tasks[tid]["status"] == States.pending:
self.tasks[tid]["status"] = States.running
exec_fu = self.launch_task(
tid, self.tasks[tid]["func"], *new_args, **kwargs
)
if exec_fu:
self.tasks[task_id]["exec_fu"] = exec_fu
try:
self.tasks[tid]["app_fu"].update_parent(exec_fu)
self.tasks[tid]["exec_fu"] = exec_fu
except AttributeError as e:
logger.error(
"Task {}: Caught AttributeError at update_parent".format(
tid
)
)
raise e
else:
logger.info("Task {} deferred due to dependency failure".format(tid))
# Raise a dependency exception
self.tasks[tid]["status"] = States.dep_fail
try:
fu = Future()
fu.retries_left = 0
self.tasks[tid]["exec_fu"] = fu
self.tasks[tid]["app_fu"].update_parent(fu)
fu.set_exception(DependencyError(exceptions, tid, None))
except AttributeError as e:
logger.error("Task {} AttributeError at update_parent".format(tid))
raise e
return
|
def handle_update(self, task_id, future, memo_cbk=False):
"""This function is called only as a callback from a task being done.
Move done task from runnable -> done
Move newly doable tasks from pending -> runnable , and launch
Args:
task_id (string) : Task id which is a uuid string
future (Future) : The future object corresponding to the task which
makes this callback
KWargs:
memo_cbk(Bool) : Indicates that the call is coming from a memo update,
that does not require additional memo updates.
"""
# TODO : Remove, this check is redundant
final_state_flag = False
if future.done():
try:
future.result()
except Exception as e:
logger.exception("Task {} failed".format(task_id))
# We keep the history separately, since the future itself could be
# tossed.
self.tasks[task_id]["fail_history"].append(future._exception)
self.tasks[task_id]["fail_count"] += 1
if not self.lazy_fail:
logger.debug("Eager fail, skipping retry logic")
raise e
if self.tasks[task_id]["fail_count"] <= self.fail_retries:
logger.debug("Task {} marked for retry".format(task_id))
self.tasks[task_id]["status"] = States.pending
else:
logger.info(
"Task {} failed after {} retry attempts".format(
task_id, self.fail_retries
)
)
self.tasks[task_id]["status"] = States.failed
final_state_flag = True
else:
logger.info("Task {} completed".format(task_id))
self.tasks[task_id]["status"] = States.done
final_state_flag = True
if not memo_cbk and final_state_flag is True:
# Update the memoizer with the new result if this is not a
# result from a memo lookup and the task has reached a terminal state.
self.memoizer.update_memo(task_id, self.tasks[task_id], future)
if self.checkpoint_mode is "task_exit":
self.checkpoint()
logger.debug("Task {} checkpoint created at task exit".format(task_id))
# Identify tasks that have resolved dependencies and launch
for tid in list(self.tasks):
# Skip all non-pending tasks
if self.tasks[tid]["status"] != States.pending:
continue
if self._count_deps(self.tasks[tid]["depends"], tid) == 0:
# We can now launch *task*
new_args, kwargs, exceptions = self.sanitize_and_wrap(
task_id, self.tasks[tid]["args"], self.tasks[tid]["kwargs"]
)
self.tasks[tid]["args"] = new_args
self.tasks[tid]["kwargs"] = kwargs
if not exceptions:
# There are no dependency errors
exec_fu = None
# Acquire a lock, retest the state, launch
with self.task_launch_lock:
if self.tasks[tid]["status"] == States.pending:
self.tasks[tid]["status"] = States.running
exec_fu = self.launch_task(
tid, self.tasks[tid]["func"], *new_args, **kwargs
)
if exec_fu:
self.tasks[task_id]["exec_fu"] = exec_fu
try:
self.tasks[tid]["app_fu"].update_parent(exec_fu)
self.tasks[tid]["exec_fu"] = exec_fu
except AttributeError as e:
logger.error(
"Task {}: Caught AttributeError at update_parent".format(
tid
)
)
raise e
else:
logger.info("Task {} deferred due to dependency failure".format(tid))
# Raise a dependency exception
self.tasks[tid]["status"] = States.dep_fail
try:
fu = Future()
fu.retries_left = 0
self.tasks[tid]["exec_fu"] = fu
self.tasks[tid]["app_fu"].update_parent(fu)
fu.set_exception(DependencyError(exceptions, tid, None))
except AttributeError as e:
logger.error("Task {} AttributeError at update_parent".format(tid))
raise e
return
|
https://github.com/Parsl/parsl/issues/234
|
Traceback (most recent call last):
File "test_regression_233.py", line 38, in <module>
test_checkpoint_availability()
File "test_regression_233.py", line 27, in test_checkpoint_availability
original = run_checkpointed([])
File "test_regression_233.py", line 18, in run_checkpointed
x = cached_rand(i)
File "/home/yadu/src/parsl/parsl/app/app_factory.py", line 76, in __call__
return app_obj(*args, **kwargs)
File "/home/yadu/src/parsl/parsl/app/python_app.py", line 46, in __call__
**kwargs)
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 499, in submit
task_id, func, *new_args, **kwargs)
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 322, in launch_task
exec_fu.add_done_callback(partial(self.handle_update, task_id))
File "/usr/lib/python3.5/concurrent/futures/_base.py", line 376, in add_done_callback
fn(self)
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 218, in handle_update
self.checkpoint(tasks=[task_id])
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 617, in checkpoint
if self.tasks[task_id]['app_fu'].done() and \
AttributeError: 'NoneType' object has no attribute 'done'
|
AttributeError
|
def launch_task(self, task_id, executable, *args, **kwargs):
"""Handle the actual submission of the task to the executor layer.
If the app task has the sites attributes not set (default=='all')
the task is launched on a randomly selected executor from the
list of executors. This behavior could later be updates to support
binding to sites based on user specified criteria.
If the app task specifies a particular set of sites, it will be
targetted at those specific sites.
Args:
task_id (uuid string) : A uuid string that uniquely identifies the task
executable (callable) : A callable object
args (list of positional args)
kwargs (arbitrary keyword arguments)
Returns:
Future that tracks the execution of the submitted executable
"""
hit, memo_fu = self.memoizer.check_memo(task_id, self.tasks[task_id])
if hit:
self.handle_update(task_id, memo_fu, memo_cbk=True)
return memo_fu
target_sites = self.tasks[task_id]["sites"]
executor = None
if isinstance(target_sites, str) and target_sites.lower() == "all":
# Pick a random site from the list
site, executor = random.choice(list(self.executors.items()))
elif isinstance(target_sites, list):
# Pick a random site from user specified list
try:
site = random.choice(target_sites)
executor = self.executors[site]
except Exception as e:
logger.error(
"Task {}: requests invalid site [{}]".format(task_id, target_sites)
)
else:
logger.error(
"App {} specifies invalid site option, expects str|list".format(
self.tasks[task_id]["func"].__name__
)
)
exec_fu = executor.submit(executable, *args, **kwargs)
self.tasks[task_id]["status"] = States.running
exec_fu.retries_left = self.fail_retries - self.tasks[task_id]["fail_count"]
exec_fu.add_done_callback(partial(self.handle_update, task_id))
logger.info("Task {} launched on site {}".format(task_id, site))
return exec_fu
|
def launch_task(self, task_id, executable, *args, **kwargs):
"""Handle the actual submission of the task to the executor layer.
If the app task has the sites attributes not set (default=='all')
the task is launched on a randomly selected executor from the
list of executors. This behavior could later be updates to support
binding to sites based on user specified criteria.
If the app task specifies a particular set of sites, it will be
targetted at those specific sites.
Args:
task_id (uuid string) : A uuid string that uniquely identifies the task
executable (callable) : A callable object
args (list of positional args)
kwargs (arbitrary keyword arguments)
Returns:
Future that tracks the execution of the submitted executable
"""
hit, memo_fu = self.memoizer.check_memo(task_id, self.tasks[task_id])
if hit:
self.handle_update(task_id, memo_fu, memo_cbk=True)
return memo_fu
target_sites = self.tasks[task_id]["sites"]
executor = None
if isinstance(target_sites, str) and target_sites.lower() == "all":
# Pick a random site from the list
site, executor = random.choice(list(self.executors.items()))
elif isinstance(target_sites, list):
# Pick a random site from user specified list
try:
site = random.choice(target_sites)
executor = self.executors[site]
except Exception as e:
logger.error(
"Task {}: requests invalid site [{}]".format(task_id, target_sites)
)
else:
logger.error(
"App {} specifies invalid site option, expects str|list".format(
self.tasks[task_id]["func"].__name__
)
)
exec_fu = executor.submit(executable, *args, **kwargs)
exec_fu.retries_left = self.fail_retries - self.tasks[task_id]["fail_count"]
exec_fu.add_done_callback(partial(self.handle_update, task_id))
logger.info("Task {} launched on site {}".format(task_id, site))
return exec_fu
|
https://github.com/Parsl/parsl/issues/234
|
Traceback (most recent call last):
File "test_regression_233.py", line 38, in <module>
test_checkpoint_availability()
File "test_regression_233.py", line 27, in test_checkpoint_availability
original = run_checkpointed([])
File "test_regression_233.py", line 18, in run_checkpointed
x = cached_rand(i)
File "/home/yadu/src/parsl/parsl/app/app_factory.py", line 76, in __call__
return app_obj(*args, **kwargs)
File "/home/yadu/src/parsl/parsl/app/python_app.py", line 46, in __call__
**kwargs)
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 499, in submit
task_id, func, *new_args, **kwargs)
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 322, in launch_task
exec_fu.add_done_callback(partial(self.handle_update, task_id))
File "/usr/lib/python3.5/concurrent/futures/_base.py", line 376, in add_done_callback
fn(self)
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 218, in handle_update
self.checkpoint(tasks=[task_id])
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 617, in checkpoint
if self.tasks[task_id]['app_fu'].done() and \
AttributeError: 'NoneType' object has no attribute 'done'
|
AttributeError
|
def submit(self, func, *args, parsl_sites="all", fn_hash=None, cache=False, **kwargs):
"""Add task to the dataflow system.
>>> IF all deps are met :
>>> send to the runnable queue and launch the task
>>> ELSE:
>>> post the task in the pending queue
Args:
- func : A function object
- *args : Args to the function
KWargs :
- parsl_sites (List|String) : List of sites this call could go to.
Default='all'
- fn_hash (Str) : Hash of the function and inputs
Default=None
- cache (Bool) : To enable memoization or not
- kwargs (dict) : Rest of the kwargs to the fn passed as dict.
Returns:
(AppFuture) [DataFutures,]
"""
task_id = self.task_count
self.task_count += 1
# Get the dep count and a list of dependencies for the task
dep_cnt, depends = self._count_all_deps(task_id, args, kwargs)
task_def = {
"depends": depends,
"sites": parsl_sites,
"func": func,
"func_name": func.__name__,
"args": args,
"kwargs": kwargs,
"fn_hash": fn_hash,
"memoize": cache,
"callback": None,
"dep_cnt": dep_cnt,
"exec_fu": None,
"checkpoint": None,
"fail_count": 0,
"fail_history": [],
"env": None,
"status": States.unsched,
"app_fu": None,
}
if task_id in self.tasks:
raise DuplicateTaskError("Task {0} in pending list".format(task_id))
else:
self.tasks[task_id] = task_def
# Extract stdout and stderr to pass to AppFuture:
task_stdout = kwargs.get("stdout", None)
task_stderr = kwargs.get("stderr", None)
logger.info(
"Task {} submitted for App {}, waiting on tasks {}".format(
task_id, task_def["func_name"], [fu.tid for fu in depends]
)
)
# Handle three cases here:
# No pending deps
# - But has failures -> dep_fail
# - No failures -> running
# Has pending deps -> pending
if dep_cnt == 0:
new_args, kwargs, exceptions = self.sanitize_and_wrap(task_id, args, kwargs)
self.tasks[task_id]["args"] = new_args
self.tasks[task_id]["kwargs"] = kwargs
if not exceptions:
self.tasks[task_id]["exec_fu"] = self.launch_task(
task_id, func, *new_args, **kwargs
)
self.tasks[task_id]["app_fu"] = AppFuture(
self.tasks[task_id]["exec_fu"],
tid=task_id,
stdout=task_stdout,
stderr=task_stderr,
)
logger.debug(
"Task {} launched with AppFut:{}".format(task_id, task_def["app_fu"])
)
else:
self.tasks[task_id]["exec_fu"] = None
app_fu = AppFuture(
self.tasks[task_id]["exec_fu"],
tid=task_id,
stdout=task_stdout,
stderr=task_stderr,
)
app_fu.set_exception(
DependencyError(exceptions, "Failures in input dependencies", None)
)
self.tasks[task_id]["app_fu"] = app_fu
self.tasks[task_id]["status"] = States.dep_fail
logger.debug(
"Task {} failed due to failure in parent task(s):{}".format(
task_id, task_def["app_fu"]
)
)
else:
# Send to pending, create the AppFuture with no parent and have it set
# when an executor future is available.
self.tasks[task_id]["app_fu"] = AppFuture(
None, tid=task_id, stdout=task_stdout, stderr=task_stderr
)
self.tasks[task_id]["status"] = States.pending
logger.debug(
"Task {} launched with AppFut:{}".format(task_id, task_def["app_fu"])
)
return task_def["app_fu"]
|
def submit(self, func, *args, parsl_sites="all", fn_hash=None, cache=False, **kwargs):
"""Add task to the dataflow system.
>>> IF all deps are met :
>>> send to the runnable queue and launch the task
>>> ELSE:
>>> post the task in the pending queue
Args:
- func : A function object
- *args : Args to the function
KWargs :
- parsl_sites (List|String) : List of sites this call could go to.
Default='all'
- fn_hash (Str) : Hash of the function and inputs
Default=None
- cache (Bool) : To enable memoization or not
- kwargs (dict) : Rest of the kwargs to the fn passed as dict.
Returns:
(AppFuture) [DataFutures,]
"""
task_id = self.task_count
self.task_count += 1
# Get the dep count and a list of dependencies for the task
dep_cnt, depends = self._count_all_deps(task_id, args, kwargs)
task_def = {
"depends": depends,
"sites": parsl_sites,
"func": func,
"func_name": func.__name__,
"args": args,
"kwargs": kwargs,
"fn_hash": fn_hash,
"memoize": cache,
"callback": None,
"dep_cnt": dep_cnt,
"exec_fu": None,
"checkpoint": None,
"fail_count": 0,
"fail_history": [],
"env": None,
"status": States.unsched,
"app_fu": None,
}
if task_id in self.tasks:
raise DuplicateTaskError("Task {0} in pending list".format(task_id))
else:
self.tasks[task_id] = task_def
# Extract stdout and stderr to pass to AppFuture:
task_stdout = kwargs.get("stdout", None)
task_stderr = kwargs.get("stderr", None)
logger.info(
"Task {} submitted for App {}, waiting on tasks {}".format(
task_id, task_def["func_name"], [fu.tid for fu in depends]
)
)
# Handle three cases here:
# No pending deps
# - But has failures -> dep_fail
# - No failures -> running
# Has pending deps -> pending
if dep_cnt == 0:
new_args, kwargs, exceptions = self.sanitize_and_wrap(task_id, args, kwargs)
self.tasks[task_id]["args"] = new_args
self.tasks[task_id]["kwargs"] = kwargs
if not exceptions:
self.tasks[task_id]["exec_fu"] = self.launch_task(
task_id, func, *new_args, **kwargs
)
self.tasks[task_id]["app_fu"] = AppFuture(
self.tasks[task_id]["exec_fu"],
tid=task_id,
stdout=task_stdout,
stderr=task_stderr,
)
self.tasks[task_id]["status"] = States.running
logger.debug(
"Task {} launched with AppFut:{}".format(task_id, task_def["app_fu"])
)
else:
self.tasks[task_id]["exec_fu"] = None
app_fu = AppFuture(
self.tasks[task_id]["exec_fu"],
tid=task_id,
stdout=task_stdout,
stderr=task_stderr,
)
app_fu.set_exception(
DependencyError(exceptions, "Failures in input dependencies", None)
)
self.tasks[task_id]["app_fu"] = app_fu
self.tasks[task_id]["status"] = States.dep_fail
logger.debug(
"Task {} failed due to failure in parent task(s):{}".format(
task_id, task_def["app_fu"]
)
)
else:
# Send to pending, create the AppFuture with no parent and have it set
# when an executor future is available.
self.tasks[task_id]["app_fu"] = AppFuture(
None, tid=task_id, stdout=task_stdout, stderr=task_stderr
)
self.tasks[task_id]["status"] = States.pending
logger.debug(
"Task {} launched with AppFut:{}".format(task_id, task_def["app_fu"])
)
return task_def["app_fu"]
|
https://github.com/Parsl/parsl/issues/234
|
Traceback (most recent call last):
File "test_regression_233.py", line 38, in <module>
test_checkpoint_availability()
File "test_regression_233.py", line 27, in test_checkpoint_availability
original = run_checkpointed([])
File "test_regression_233.py", line 18, in run_checkpointed
x = cached_rand(i)
File "/home/yadu/src/parsl/parsl/app/app_factory.py", line 76, in __call__
return app_obj(*args, **kwargs)
File "/home/yadu/src/parsl/parsl/app/python_app.py", line 46, in __call__
**kwargs)
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 499, in submit
task_id, func, *new_args, **kwargs)
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 322, in launch_task
exec_fu.add_done_callback(partial(self.handle_update, task_id))
File "/usr/lib/python3.5/concurrent/futures/_base.py", line 376, in add_done_callback
fn(self)
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 218, in handle_update
self.checkpoint(tasks=[task_id])
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 617, in checkpoint
if self.tasks[task_id]['app_fu'].done() and \
AttributeError: 'NoneType' object has no attribute 'done'
|
AttributeError
|
def checkpoint(self, tasks=None):
"""Checkpoint the dfk incrementally to a checkpoint file.
When called, every task that has been completed yet not
checkpointed is checkpointed to a file.
Kwargs:
- tasks (List of task ids) : List of task ids to checkpoint. Default=None
if set to None, we iterate over all tasks held by the DFK.
.. note::
Checkpointing only works if memoization is enabled
Returns:
Checkpoint dir if checkpoints were written successfully.
By default the checkpoints are written to the RUNDIR of the current
run under RUNDIR/checkpoints/{tasks.pkl, dfk.pkl}
"""
checkpoint_queue = None
if tasks:
checkpoint_queue = tasks
else:
checkpoint_queue = self.tasks
checkpoint_dir = "{0}/checkpoint".format(self.rundir)
checkpoint_dfk = checkpoint_dir + "/dfk.pkl"
checkpoint_tasks = checkpoint_dir + "/tasks.pkl"
if not os.path.exists(checkpoint_dir):
try:
os.makedirs(checkpoint_dir)
except FileExistsError as e:
pass
with open(checkpoint_dfk, "wb") as f:
state = {
"config": self.config,
"rundir": self.rundir,
"task_count": self.task_count,
}
pickle.dump(state, f)
count = 0
with open(checkpoint_tasks, "ab") as f:
for task_id in checkpoint_queue:
if (
not self.tasks[task_id]["checkpoint"]
and self.tasks[task_id]["status"] == States.done
):
hashsum = self.tasks[task_id]["hashsum"]
if not hashsum:
continue
t = {"hash": hashsum, "exception": None, "result": None}
try:
# Asking for the result will raise an exception if
# the app had failed. Should we even checkpoint these?
# TODO : Resolve this question ?
r = self.memoizer.hash_lookup(hashsum).result()
except Exception as e:
t["exception"] = e
else:
t["result"] = r
# We are using pickle here since pickle dumps to a file in 'ab'
# mode behave like a incremental log.
pickle.dump(t, f)
count += 1
self.tasks[task_id]["checkpoint"] = True
logger.debug("Task {} checkpointed".format(task_id))
self.checkpointed_tasks += count
if count == 0:
if self.checkpointed_tasks == 0:
logger.warn("No tasks checkpointed, please ensure caching is enabled")
else:
logger.debug("No tasks checkpointed")
else:
logger.info("Done checkpointing {} tasks".format(count))
return checkpoint_dir
|
def checkpoint(self):
"""Checkpoint the dfk incrementally to a checkpoint file.
When called, every task that has been completed yet not
checkpointed is checkpointed to a file.
.. note::
Checkpointing only works if memoization is enabled
Returns:
Checkpoint dir if checkpoints were written successfully.
By default the checkpoints are written to the RUNDIR of the current
run under RUNDIR/checkpoints/{tasks.pkl, dfk.pkl}
"""
logger.info("Checkpointing.. ")
checkpoint_dir = "{0}/checkpoint".format(self.rundir)
checkpoint_dfk = checkpoint_dir + "/dfk.pkl"
checkpoint_tasks = checkpoint_dir + "/tasks.pkl"
if not os.path.exists(checkpoint_dir):
try:
os.makedirs(checkpoint_dir)
except FileExistsError as e:
pass
with open(checkpoint_dfk, "wb") as f:
state = {
"config": self.config,
"rundir": self.rundir,
"task_count": self.task_count,
}
pickle.dump(state, f)
count = 0
with open(checkpoint_tasks, "ab") as f:
for task_id in self.tasks:
if (
self.tasks[task_id]["app_fu"].done()
and not self.tasks[task_id]["checkpoint"]
):
hashsum = self.tasks[task_id]["hashsum"]
if not hashsum:
continue
t = {"hash": hashsum, "exception": None, "result": None}
try:
# Asking for the result will raise an exception if
# the app had failed. Should we even checkpoint these?
# TODO : Resolve this question ?
r = self.memoizer.hash_lookup(hashsum).result()
except Exception as e:
t["exception"] = e
else:
t["result"] = r
# We are using pickle here since pickle dumps to a file in 'ab'
# mode behave like a incremental log.
pickle.dump(t, f)
count += 1
self.tasks[task_id]["checkpoint"] = True
logger.debug("Task {} checkpointed".format(task_id))
self.checkpointed_tasks += count
if count == 0:
if self.checkpointed_tasks == 0:
logger.warn("No tasks checkpointed, please ensure caching is enabled")
else:
logger.debug("No tasks checkpointed")
else:
logger.info("Done checkpointing {} tasks".format(count))
return checkpoint_dir
|
https://github.com/Parsl/parsl/issues/234
|
Traceback (most recent call last):
File "test_regression_233.py", line 38, in <module>
test_checkpoint_availability()
File "test_regression_233.py", line 27, in test_checkpoint_availability
original = run_checkpointed([])
File "test_regression_233.py", line 18, in run_checkpointed
x = cached_rand(i)
File "/home/yadu/src/parsl/parsl/app/app_factory.py", line 76, in __call__
return app_obj(*args, **kwargs)
File "/home/yadu/src/parsl/parsl/app/python_app.py", line 46, in __call__
**kwargs)
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 499, in submit
task_id, func, *new_args, **kwargs)
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 322, in launch_task
exec_fu.add_done_callback(partial(self.handle_update, task_id))
File "/usr/lib/python3.5/concurrent/futures/_base.py", line 376, in add_done_callback
fn(self)
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 218, in handle_update
self.checkpoint(tasks=[task_id])
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 617, in checkpoint
if self.tasks[task_id]['app_fu'].done() and \
AttributeError: 'NoneType' object has no attribute 'done'
|
AttributeError
|
def _load_checkpoints(self, checkpointDirs):
"""Load a checkpoint file into a lookup table.
The data being loaded from the pickle file mostly contains input
attributes of the task: func, args, kwargs, env...
To simplify the check of whether the exact task has been completed
in the checkpoint, we hash these input params and use it as the key
for the memoized lookup table.
Args:
- checkpointDirs (list) : List of filepaths to checkpoints
Eg. ['runinfo/001', 'runinfo/002']
Returns:
- memoized_lookup_table (dict)
"""
memo_lookup_table = {}
for checkpoint_dir in checkpointDirs:
logger.info("Loading checkpoints from {}".format(checkpoint_dir))
checkpoint_file = os.path.join(checkpoint_dir, "tasks.pkl")
try:
with open(checkpoint_file, "rb") as f:
while True:
try:
data = pickle.load(f)
# Copy and hash only the input attributes
memo_fu = Future()
if data["exception"]:
memo_fu.set_exception(data["exception"])
else:
memo_fu.set_result(data["result"])
memo_lookup_table[data["hash"]] = memo_fu
except EOFError:
# Done with the checkpoint file
break
except FileNotFoundError:
reason = "Checkpoint file was not found: {}".format(checkpoint_file)
logger.error(reason)
raise BadCheckpoint(reason)
except Exception as e:
reason = "Failed to load Checkpoint: {}".format(checkpoint_file)
logger.error(reason)
raise BadCheckpoint(reason)
logger.info(
"Completed loading checkpoint:{0} with {1} tasks".format(
checkpoint_file, len(memo_lookup_table.keys())
)
)
return memo_lookup_table
|
def _load_checkpoints(self, checkpointDirs):
"""Load a checkpoint file into a lookup table.
The data being loaded from the pickle file mostly contains input
attributes of the task: func, args, kwargs, env...
To simplify the check of whether the exact task has been completed
in the checkpoint, we hash these input params and use it as the key
for the memoized lookup table.
Args:
- checkpointDirs (list) : List of filepaths to checkpoints
Eg. ['runinfo/001', 'runinfo/002']
Returns:
- memoized_lookup_table (dict)
"""
memo_lookup_table = {}
for checkpoint_dir in checkpointDirs:
checkpoint_file = os.path.join(checkpoint_dir, "tasks.pkl")
try:
with open(checkpoint_file, "rb") as f:
while True:
try:
data = pickle.load(f)
# Copy and hash only the input attributes
memo_fu = Future()
if data["exception"]:
memo_fu.set_exception(data["exception"])
else:
memo_fu.set_result(data["result"])
memo_lookup_table[data["hash"]] = memo_fu
except EOFError:
# Done with the checkpoint file
break
except FileNotFoundError:
reason = "Checkpoint file was not found: {}".format(checkpoint_file)
logger.error(reason)
raise BadCheckpoint(reason)
except Exception as e:
reason = "Failed to load Checkpoint: {}".format(checkpoint_file)
logger.error(reason)
raise BadCheckpoint(reason)
logger.info(
"Completed loading checkpoint:{0} with {1} tasks".format(
checkpoint_file, len(memo_lookup_table.keys())
)
)
return memo_lookup_table
|
https://github.com/Parsl/parsl/issues/234
|
Traceback (most recent call last):
File "test_regression_233.py", line 38, in <module>
test_checkpoint_availability()
File "test_regression_233.py", line 27, in test_checkpoint_availability
original = run_checkpointed([])
File "test_regression_233.py", line 18, in run_checkpointed
x = cached_rand(i)
File "/home/yadu/src/parsl/parsl/app/app_factory.py", line 76, in __call__
return app_obj(*args, **kwargs)
File "/home/yadu/src/parsl/parsl/app/python_app.py", line 46, in __call__
**kwargs)
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 499, in submit
task_id, func, *new_args, **kwargs)
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 322, in launch_task
exec_fu.add_done_callback(partial(self.handle_update, task_id))
File "/usr/lib/python3.5/concurrent/futures/_base.py", line 376, in add_done_callback
fn(self)
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 218, in handle_update
self.checkpoint(tasks=[task_id])
File "/home/yadu/src/parsl/parsl/dataflow/dflow.py", line 617, in checkpoint
if self.tasks[task_id]['app_fu'].done() and \
AttributeError: 'NoneType' object has no attribute 'done'
|
AttributeError
|
def __repr__(self):
# The DataFuture could be wrapping an AppFuture whose parent is a Future
# check to find the top level parent
if isinstance(self.parent, AppFuture):
parent = self.parent.parent
else:
parent = self.parent
if parent:
with parent._condition:
if parent._state == FINISHED:
if parent._exception:
return "<%s at %#x state=%s raised %s>" % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[parent._state],
parent._exception.__class__.__name__,
)
else:
return "<%s at %#x state=%s returned %s>" % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[parent._state],
self.filepath + "_file",
)
return "<%s at %#x state=%s>" % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[parent._state],
)
else:
return "<%s at %#x state=%s>" % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state],
)
|
def __repr__(self):
parent = self.parent.parent
if parent:
with parent._condition:
if parent._state == FINISHED:
if parent._exception:
return "<%s at %#x state=%s raised %s>" % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[parent._state],
parent._exception.__class__.__name__,
)
else:
return "<%s at %#x state=%s returned %s>" % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[parent._state],
self.filepath + "_file",
)
return "<%s at %#x state=%s>" % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[parent._state],
)
else:
return "<%s at %#x state=%s>" % (
self.__class__.__name__,
id(self),
_STATE_TO_DESCRIPTION_MAP[self._state],
)
|
https://github.com/Parsl/parsl/issues/164
|
In [9]: from parsl import *
In [10]: dfk = DataFlowKernel(config=config)
In [11]: unsorted_file = File("globus://037f054a-15cf-11e8-b611-0ac6873fc732/unsorted.txt")
In [12]: unsorted_file.stage_in()
DEBUG:parsl.app.futures:Creating DataFuture with parent : None
DEBUG:parsl.app.futures:Filepath : /Users/awoodard/unsorted.txt
Out[12]: ---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
~/software/anaconda3/envs/parsl_py36/lib/python3.6/site-packages/IPython/core/formatters.py in __call__(self, obj)
700 type_pprinters=self.type_printers,
701 deferred_pprinters=self.deferred_printers)
--> 702 printer.pretty(obj)
703 printer.flush()
704 return stream.getvalue()
~/software/anaconda3/envs/parsl_py36/lib/python3.6/site-packages/IPython/lib/pretty.py in pretty(self, obj)
393 if callable(meth):
394 return meth(obj, self, cycle)
--> 395 return _default_pprint(obj, self, cycle)
396 finally:
397 self.end_group()
~/software/anaconda3/envs/parsl_py36/lib/python3.6/site-packages/IPython/lib/pretty.py in _default_pprint(obj, p, cycle)
508 if _safe_getattr(klass, '__repr__', None) is not object.__repr__:
509 # A user-provided repr. Find newlines and replace them with p.break_()
--> 510 _repr_pprint(obj, p, cycle)
511 return
512 p.begin_group(1, '<')
~/software/anaconda3/envs/parsl_py36/lib/python3.6/site-packages/IPython/lib/pretty.py in _repr_pprint(obj, p, cycle)
699 """A pprint that just redirects to the normal repr function."""
700 # Find newlines and replace them with p.break_()
--> 701 output = repr(obj)
702 for idx,output_line in enumerate(output.splitlines()):
703 if idx:
~/ci/parsl/parsl/app/futures.py in __repr__(self)
182 def __repr__(self):
183
--> 184 parent = self.parent.parent
185 if parent:
186 with parent._condition:
AttributeError: 'Future' object has no attribute 'parent'
|
AttributeError
|
def _upload_dependencies_to_object_store(
self, runtime_configuration, pipeline_name, operation
):
operation_artifact_archive = self._get_dependency_archive_name(operation)
cos_directory = pipeline_name
# upload operation dependencies to object store
try:
t0 = time.time()
dependency_archive_path = self._generate_dependency_archive(operation)
self.log_pipeline_info(
pipeline_name,
f"generated dependency archive: {dependency_archive_path}",
operation_name=operation.name,
duration=(time.time() - t0),
)
cos_client = CosClient(config=runtime_configuration)
t0 = time.time()
cos_client.upload_file_to_dir(
dir=cos_directory,
file_name=operation_artifact_archive,
file_path=dependency_archive_path,
)
self.log_pipeline_info(
pipeline_name,
f"uploaded dependency archive to: {cos_directory}/{operation_artifact_archive}",
operation_name=operation.name,
duration=(time.time() - t0),
)
except FileNotFoundError as ex:
self.log.error(
"Dependencies were not found building archive for operation: {}".format(
operation.name
),
exc_info=True,
)
raise FileNotFoundError(
"Node '{}' referenced dependencies that were not found: {}".format(
operation.name, ex
)
) from ex
except MaxRetryError as ex:
cos_endpoint = runtime_configuration.metadata.get("cos_endpoint")
self.log.error(
"Connection was refused when attempting to connect to : {}".format(
cos_endpoint
),
exc_info=True,
)
raise RuntimeError(
"Connection was refused when attempting to upload artifacts to : '{}'. Please "
"check your object storage settings. ".format(cos_endpoint)
) from ex
except BaseException as ex:
self.log.error(
"Error uploading artifacts to object storage for operation: {}".format(
operation.name
),
exc_info=True,
)
raise ex from ex
|
def _upload_dependencies_to_object_store(
self, runtime_configuration, pipeline_name, operation
):
operation_artifact_archive = self._get_dependency_archive_name(operation)
cos_directory = pipeline_name
# upload operation dependencies to object store
try:
t0 = time.time()
dependency_archive_path = self._generate_dependency_archive(operation)
self.log_pipeline_info(
pipeline_name,
f"generated dependency archive: {dependency_archive_path}",
operation_name=operation.name,
duration=(time.time() - t0),
)
cos_client = CosClient(config=runtime_configuration)
t0 = time.time()
cos_client.upload_file_to_dir(
dir=cos_directory,
file_name=operation_artifact_archive,
file_path=dependency_archive_path,
)
self.log_pipeline_info(
pipeline_name,
f"uploaded dependency archive to: {cos_directory}/{operation_artifact_archive}",
operation_name=operation.name,
duration=(time.time() - t0),
)
except FileNotFoundError as ex:
self.log.error(
"Dependencies were not found building archive for operation: {}".format(
operation.name
),
exc_info=True,
)
raise FileNotFoundError(
"Node '{}' referenced dependencies that were not found: {}".format(
operation.name, ex
)
)
except BaseException as ex:
self.log.error(
"Error uploading artifacts to object storage for operation: {}".format(
operation.name
),
exc_info=True,
)
raise ex from ex
|
https://github.com/elyra-ai/elyra/issues/1376
|
[I 2021-03-04 17:28:22.928 ServerApp] Creating pipeline definition as a .py file
[I 2021-03-04 17:28:22.949 ServerApp] airflow 'hello_world_apache_airflow-0304172822' - processing pipeline dependencies to: https://minio-service.kubernetes:9000 bucket: airflow-pipeline-artifacts folder: hello_world_apache_airflow-0304172822
[I 2021-03-04 17:28:22.949 ServerApp] airflow 'hello_world_apache_airflow-0304172822':'load_data' - processing operation dependencies for id: a1de79c3-2be4-4fc3-ab86-4959b5291252
[I 2021-03-04 17:28:22.951 ServerApp] airflow 'hello_world_apache_airflow-0304172822':'load_data' - generated dependency archive: /var/folders/h8/l3qh8v192rv_vvggc6wyphzc0000gn/T/elyra/load_data-a1de79c3-2be4-4fc3-ab86-4959b5291252.tar.gz (0.002 secs)
Retrying (Retry(total=4, connect=None, read=None, redirect=None, status=None)) after connection broken by 'NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x7fd51ee6b950>: Failed to establish a new connection: [Errno 8] nodename nor servname provided, or not known')': /airflow-pipeline-artifacts?location=
Retrying (Retry(total=3, connect=None, read=None, redirect=None, status=None)) after connection broken by 'NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x7fd51eed9a10>: Failed to establish a new connection: [Errno 8] nodename nor servname provided, or not known')': /airflow-pipeline-artifacts?location=
Retrying (Retry(total=2, connect=None, read=None, redirect=None, status=None)) after connection broken by 'NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x7fd51ee9e710>: Failed to establish a new connection: [Errno 8] nodename nor servname provided, or not known')': /airflow-pipeline-artifacts?location=
Retrying (Retry(total=1, connect=None, read=None, redirect=None, status=None)) after connection broken by 'NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x7fd51ee9ef90>: Failed to establish a new connection: [Errno 8] nodename nor servname provided, or not known')': /airflow-pipeline-artifacts?location=
Retrying (Retry(total=0, connect=None, read=None, redirect=None, status=None)) after connection broken by 'NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x7fd51ee99310>: Failed to establish a new connection: [Errno 8] nodename nor servname provided, or not known')': /airflow-pipeline-artifacts?location=
[E 2021-03-04 17:28:28.964 ServerApp] Error uploading artifacts to object storage for operation: load_data
Traceback (most recent call last):
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/connection.py", line 170, in _new_conn
(self._dns_host, self.port), self.timeout, **extra_kw
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/util/connection.py", line 73, in create_connection
for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/socket.py", line 752, in getaddrinfo
for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
socket.gaierror: [Errno 8] nodename nor servname provided, or not known
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/connectionpool.py", line 706, in urlopen
chunked=chunked,
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/connectionpool.py", line 382, in _make_request
self._validate_conn(conn)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/connectionpool.py", line 1010, in _validate_conn
conn.connect()
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/connection.py", line 353, in connect
conn = self._new_conn()
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/connection.py", line 182, in _new_conn
self, "Failed to establish a new connection: %s" % e
urllib3.exceptions.NewConnectionError: <urllib3.connection.HTTPSConnection object at 0x7fd51ee36210>: Failed to establish a new connection: [Errno 8] nodename nor servname provided, or not known
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/pipeline/processor.py", line 218, in _upload_dependencies_to_object_store
cos_client = CosClient(config=runtime_configuration)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/util/cos.py", line 41, in __init__
self.client = self.__initialize_object_store()
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/util/cos.py", line 53, in __initialize_object_store
if not self.client.bucket_exists(self.bucket):
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/minio/api.py", line 404, in bucket_exists
self._url_open('HEAD', bucket_name=bucket_name)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/minio/api.py", line 2189, in _url_open
region = self._get_bucket_region(bucket_name)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/minio/api.py", line 2067, in _get_bucket_region
region = self._get_bucket_location(bucket_name)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/minio/api.py", line 2102, in _get_bucket_location
headers=headers)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/poolmanager.py", line 375, in urlopen
response = conn.urlopen(method, u.request_uri, **kw)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/connectionpool.py", line 796, in urlopen
**response_kw
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/connectionpool.py", line 796, in urlopen
**response_kw
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/connectionpool.py", line 796, in urlopen
**response_kw
[Previous line repeated 2 more times]
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/connectionpool.py", line 756, in urlopen
method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/util/retry.py", line 573, in increment
raise MaxRetryError(_pool, url, error or ResponseError(cause))
urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='minio-service.kubernetes', port=9000): Max retries exceeded with url: /airflow-pipeline-artifacts?location= (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x7fd51ee36210>: Failed to establish a new connection: [Errno 8] nodename nor servname provided, or not known'))
[E 2021-03-04 17:28:28.972 ServerApp] Uncaught exception POST /elyra/pipeline/export?1614907702924 (::1)
HTTPServerRequest(protocol='http', host='localhost:8888', method='POST', uri='/elyra/pipeline/export?1614907702924', version='HTTP/1.1', remote_ip='::1')
Traceback (most recent call last):
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/tornado/web.py", line 1704, in _execute
result = await result
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/pipeline/handlers.py", line 54, in post
pipeline_overwrite
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/pipeline/processor.py", line 86, in export
None, processor.export, pipeline, pipeline_export_format, pipeline_export_path, overwrite)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/pipeline/processor_airflow.py", line 123, in export
pipeline_name=pipeline_name)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/pipeline/processor_airflow.py", line 256, in create_pipeline_file
notebook_ops = self._cc_pipeline(pipeline, pipeline_name)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/pipeline/processor_airflow.py", line 219, in _cc_pipeline
operation)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/pipeline/processor.py", line 238, in _upload_dependencies_to_object_store
raise ex from ex
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/pipeline/processor.py", line 218, in _upload_dependencies_to_object_store
cos_client = CosClient(config=runtime_configuration)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/util/cos.py", line 41, in __init__
self.client = self.__initialize_object_store()
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/util/cos.py", line 53, in __initialize_object_store
if not self.client.bucket_exists(self.bucket):
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/minio/api.py", line 404, in bucket_exists
self._url_open('HEAD', bucket_name=bucket_name)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/minio/api.py", line 2189, in _url_open
region = self._get_bucket_region(bucket_name)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/minio/api.py", line 2067, in _get_bucket_region
region = self._get_bucket_location(bucket_name)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/minio/api.py", line 2102, in _get_bucket_location
headers=headers)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/poolmanager.py", line 375, in urlopen
response = conn.urlopen(method, u.request_uri, **kw)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/connectionpool.py", line 796, in urlopen
**response_kw
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/connectionpool.py", line 796, in urlopen
**response_kw
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/connectionpool.py", line 796, in urlopen
**response_kw
[Previous line repeated 2 more times]
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/connectionpool.py", line 756, in urlopen
method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/util/retry.py", line 573, in increment
raise MaxRetryError(_pool, url, error or ResponseError(cause))
urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='minio-service.kubernetes', port=9000): Max retries exceeded with url: /airflow-pipeline-artifacts?location= (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x7fd51ee36210>: Failed to establish a new connection: [Errno 8] nodename nor servname provided, or not known'))
[E 2021-03-04 17:28:28.975 ServerApp] Uncaught exception in write_error
Traceback (most recent call last):
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/tornado/web.py", line 1704, in _execute
result = await result
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/pipeline/handlers.py", line 54, in post
pipeline_overwrite
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/pipeline/processor.py", line 86, in export
None, processor.export, pipeline, pipeline_export_format, pipeline_export_path, overwrite)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/pipeline/processor_airflow.py", line 123, in export
pipeline_name=pipeline_name)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/pipeline/processor_airflow.py", line 256, in create_pipeline_file
notebook_ops = self._cc_pipeline(pipeline, pipeline_name)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/pipeline/processor_airflow.py", line 219, in _cc_pipeline
operation)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/pipeline/processor.py", line 238, in _upload_dependencies_to_object_store
raise ex from ex
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/pipeline/processor.py", line 218, in _upload_dependencies_to_object_store
cos_client = CosClient(config=runtime_configuration)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/util/cos.py", line 41, in __init__
self.client = self.__initialize_object_store()
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/util/cos.py", line 53, in __initialize_object_store
if not self.client.bucket_exists(self.bucket):
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/minio/api.py", line 404, in bucket_exists
self._url_open('HEAD', bucket_name=bucket_name)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/minio/api.py", line 2189, in _url_open
region = self._get_bucket_region(bucket_name)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/minio/api.py", line 2067, in _get_bucket_region
region = self._get_bucket_location(bucket_name)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/minio/api.py", line 2102, in _get_bucket_location
headers=headers)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/poolmanager.py", line 375, in urlopen
response = conn.urlopen(method, u.request_uri, **kw)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/connectionpool.py", line 796, in urlopen
**response_kw
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/connectionpool.py", line 796, in urlopen
**response_kw
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/connectionpool.py", line 796, in urlopen
**response_kw
[Previous line repeated 2 more times]
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/connectionpool.py", line 756, in urlopen
method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/urllib3/util/retry.py", line 573, in increment
raise MaxRetryError(_pool, url, error or ResponseError(cause))
urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='minio-service.kubernetes', port=9000): Max retries exceeded with url: /airflow-pipeline-artifacts?location= (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x7fd51ee36210>: Failed to establish a new connection: [Errno 8] nodename nor servname provided, or not known'))
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/tornado/web.py", line 1217, in send_error
self.write_error(status_code, **kwargs)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/util/http.py", line 75, in write_error
self.set_status(status_code, reason=reply['reason'])
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/tornado/web.py", line 358, in set_status
self._reason = escape.native_str(reason)
File "/opt/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/tornado/escape.py", line 228, in to_unicode
raise TypeError("Expected bytes, unicode, or None; got %r" % type(value))
TypeError: Expected bytes, unicode, or None; got <class 'urllib3.exceptions.NewConnectionError'>
|
urllib3.exceptions.NewConnectionError
|
def process(self, operation: Operation):
filepath = self.get_valid_filepath(operation.filename)
file_dir = os.path.dirname(filepath)
file_name = os.path.basename(filepath)
self.log.debug(f"Processing python script: {filepath}")
argv = ["python3", filepath, "--PYTHONHOME", file_dir]
envs = os.environ # Make sure this process's env is "available" in subprocess
envs.update(operation.env_vars_as_dict())
t0 = time.time()
try:
run(argv, cwd=file_dir, env=envs, check=True)
except Exception as ex:
raise RuntimeError(f"Internal error executing {filepath}: {ex}") from ex
t1 = time.time()
duration = t1 - t0
self.log.debug(f"Execution of {file_name} took {duration:.3f} secs.")
|
def process(self, operation: Operation):
filepath = self.get_valid_filepath(operation.filename)
file_dir = os.path.dirname(filepath)
file_name = os.path.basename(filepath)
self.log.debug(f"Processing python script: {filepath}")
argv = ["python3", filepath, "--PYTHONHOME", file_dir]
envs = operation.env_vars_as_dict()
t0 = time.time()
try:
run(argv, cwd=file_dir, env=envs, check=True)
except Exception as ex:
raise RuntimeError(f"Internal error executing {filepath}: {ex}") from ex
t1 = time.time()
duration = t1 - t0
self.log.debug(f"Execution of {file_name} took {duration:.3f} secs.")
|
https://github.com/elyra-ai/elyra/issues/1042
|
[I 10:35:22.335 LabApp] local 'request' - processing pipeline
Traceback (most recent call last):
File "/Users/patti/meetups/l_a_l_11_09/test_workspace/my-examples-repo/pipelines/hello_world/request.py", line 1, in <module>
import requests
ModuleNotFoundError: No module named 'requests'
[E 10:35:22.403 LabApp] Uncaught exception POST /elyra/pipeline/schedule?1604687722331 (::1)
HTTPServerRequest(protocol='http', host='localhost:8888', method='POST', uri='/elyra/pipeline/schedule?1604687722331', version='HTTP/1.1', remote_ip='::1')
Traceback (most recent call last):
File "/Users/patti/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/pipeline/processor_local.py", line 220, in process
run(argv, cwd=file_dir, env=envs, check=True)
File "/Users/patti/anaconda3/envs/elyra_ga/lib/python3.7/subprocess.py", line 512, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command '['python3', '/Users/patti/meetups/l_a_l_11_09/test_workspace/my-examples-repo/pipelines/hello_world/request.py', '--PYTHONHOME', '/Users/patti/meetups/l_a_l_11_09/test_workspace/my-examples-repo/pipelines/hello_world']' returned non-zero exit status 1.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/patti/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/pipeline/processor_local.py", line 74, in process
operation_processor.process(operation)
File "/Users/patti/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/pipeline/processor_local.py", line 222, in process
raise RuntimeError(f'Internal error executing {filepath}: {ex}') from ex
RuntimeError: Internal error executing /Users/patti/meetups/l_a_l_11_09/test_workspace/my-examples-repo/pipelines/hello_world/request.py: Command '['python3', '/Users/patti/meetups/l_a_l_11_09/test_workspace/my-examples-repo/pipelines/hello_world/request.py', '--PYTHONHOME', '/Users/patti/meetups/l_a_l_11_09/test_workspace/my-examples-repo/pipelines/hello_world']' returned non-zero exit status 1.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/patti/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/tornado/web.py", line 1703, in _execute
result = await result
File "/Users/patti/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/pipeline/handlers.py", line 89, in post
response = await PipelineProcessorManager.instance().process(pipeline)
File "/Users/patti/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/pipeline/processor.py", line 70, in process
res = await asyncio.get_event_loop().run_in_executor(None, processor.process, pipeline)
File "/Users/patti/anaconda3/envs/elyra_ga/lib/python3.7/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/Users/patti/anaconda3/envs/elyra_ga/lib/python3.7/site-packages/elyra/pipeline/processor_local.py", line 79, in process
raise RuntimeError(f'Error processing operation {operation.name}.') from ex
RuntimeError: Error processing operation request.
[E 10:35:22.407 LabApp] {
"Host": "localhost:8888",
"Connection": "keep-alive",
"Content-Length": "1871",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36",
"X-Xsrftoken": "2|d32b6c13|a2d7c54da22962e3437ecfb28af5d2ec|1602537750",
"Authorization": "token d0664a26a6f3c6a2143fd6d62f67f1d720cfacd56bd84f5b",
"Content-Type": "text/plain;charset=UTF-8",
"Accept": "*/*",
"Origin": "http://localhost:8888",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Dest": "empty",
"Referer": "http://localhost:8888/lab/workspaces/auto-v",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-US,en;q=0.9,de;q=0.8",
"Cookie": "_xsrf=2|d32b6c13|a2d7c54da22962e3437ecfb28af5d2ec|1602537750; username-localhost-8889=\"2|1:0|10:1604624344|23:username-localhost-8889|44:ZWQ2OGI0ZGU3ZmU4NGZjNjg1OTQ4N2QzYzAwZTU3Njg=|8a9048391f96af4e758d57e781becdcd13b85f8be60b3b491314c46a1751ac16\"; username-localhost-8888=\"2|1:0|10:1604687721|23:username-localhost-8888|44:MmU1Nzg0M2JmZWNhNDI1NzhjYjRjNTBlN2UwNDE0ODI=|c819c59b00df1be5e939a9e642b1d056562cd6115a6f5d343faf505eaed1da6b\""
}
[E 10:35:22.407 LabApp] 500 POST /elyra/pipeline/schedule?1604687722331 (::1) 73.43ms referer=http://localhost:8888/lab/workspaces/auto-v
|
ModuleNotFoundError
|
def env_vars_as_dict(self, logger: Optional[object] = None) -> Dict:
"""Operation stores environment variables in a list of name=value pairs, while
subprocess.run() requires a dictionary - so we must convert. If no envs are
configured on the Operation, the existing env is returned, otherwise envs
configured on the Operation are overlayed on the existing env.
"""
envs = {}
for nv in self.env_vars:
if len(nv) > 0:
nv_pair = nv.split("=")
if len(nv_pair) == 2:
envs[nv_pair[0]] = nv_pair[1]
else:
if logger:
logger.warning(
f"Could not process environment variable entry `{nv}`, skipping..."
)
else:
print(
f"Could not process environment variable entry `{nv}`, skipping..."
)
return envs
|
def env_vars_as_dict(self, logger: Optional[object] = None) -> Dict:
"""Operation stores environment variables in a list of name=value pairs, while
subprocess.run() requires a dictionary - so we must convert. If no envs are
configured on the Operation, the existing env is returned, otherwise envs
configured on the Operation are overlayed on the existing env.
"""
envs = os.environ.copy()
for nv in self.env_vars:
if len(nv) > 0:
nv_pair = nv.split("=")
if len(nv_pair) == 2:
envs[nv_pair[0]] = nv_pair[1]
else:
if logger:
logger.warning(
f"Could not process environment variable entry `{nv}`, skipping..."
)
else:
print(
f"Could not process environment variable entry `{nv}`, skipping..."
)
return envs
|
https://github.com/elyra-ai/elyra/issues/961
|
Traceback (most recent call last):
File "/opt/conda/lib/python3.7/site-packages/elyra/pipeline/processor_local.py", line 151, in process
subprocess.run(argv, cwd=file_dir, env=envs, check=True)
File "/opt/conda/lib/python3.7/subprocess.py", line 512, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command '['papermill', '/home/jovyan/work/Untitled.ipynb', '/home/jovyan/work/Untitled.ipynb', '--cwd', '/home/jovyan/work']' returned non-zero exit status 1.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/opt/conda/lib/python3.7/site-packages/elyra/pipeline/processor_local.py", line 70, in process
operation_processor.process(operation)
File "/opt/conda/lib/python3.7/site-packages/elyra/pipeline/processor_local.py", line 154, in process
raise RuntimeError(f'Internal error executing {filepath}') from ex
RuntimeError: Internal error executing /home/jovyan/work/Untitled.ipynb
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/opt/conda/lib/python3.7/site-packages/tornado/web.py", line 1703, in _execute
result = await result
File "/opt/conda/lib/python3.7/site-packages/elyra/pipeline/handlers.py", line 89, in post
response = await PipelineProcessorManager.instance().process(pipeline)
File "/opt/conda/lib/python3.7/site-packages/elyra/pipeline/processor.py", line 69, in process
res = await asyncio.get_event_loop().run_in_executor(None, processor.process, pipeline)
File "/opt/conda/lib/python3.7/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/opt/conda/lib/python3.7/site-packages/elyra/pipeline/processor_local.py", line 72, in process
raise RuntimeError(f'Error processing operation {operation.name}.') from ex
RuntimeError: Error processing operation Untitled.
|
subprocess.CalledProcessError
|
def process(self, operation: Operation):
raise NotImplementedError
|
def process(self, operation: Operation):
filepath = get_absolute_path(self._root_dir, operation.filename)
if not os.path.exists(filepath):
raise FileNotFoundError(f"Could not find {filepath}")
if not os.path.isfile(filepath):
raise ValueError(f"Not a file: {filepath}")
self.log.debug(f"Processing: {filepath}")
file_dir = os.path.dirname(filepath)
argv = self._create_execute_command(filepath, file_dir)
envs = operation.env_vars_as_dict(self.log)
try:
run(argv, cwd=file_dir, env=envs, check=True)
except Exception as ex:
raise RuntimeError(f"Internal error executing {filepath}: {ex}") from ex
|
https://github.com/elyra-ai/elyra/issues/961
|
Traceback (most recent call last):
File "/opt/conda/lib/python3.7/site-packages/elyra/pipeline/processor_local.py", line 151, in process
subprocess.run(argv, cwd=file_dir, env=envs, check=True)
File "/opt/conda/lib/python3.7/subprocess.py", line 512, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command '['papermill', '/home/jovyan/work/Untitled.ipynb', '/home/jovyan/work/Untitled.ipynb', '--cwd', '/home/jovyan/work']' returned non-zero exit status 1.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/opt/conda/lib/python3.7/site-packages/elyra/pipeline/processor_local.py", line 70, in process
operation_processor.process(operation)
File "/opt/conda/lib/python3.7/site-packages/elyra/pipeline/processor_local.py", line 154, in process
raise RuntimeError(f'Internal error executing {filepath}') from ex
RuntimeError: Internal error executing /home/jovyan/work/Untitled.ipynb
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/opt/conda/lib/python3.7/site-packages/tornado/web.py", line 1703, in _execute
result = await result
File "/opt/conda/lib/python3.7/site-packages/elyra/pipeline/handlers.py", line 89, in post
response = await PipelineProcessorManager.instance().process(pipeline)
File "/opt/conda/lib/python3.7/site-packages/elyra/pipeline/processor.py", line 69, in process
res = await asyncio.get_event_loop().run_in_executor(None, processor.process, pipeline)
File "/opt/conda/lib/python3.7/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/opt/conda/lib/python3.7/site-packages/elyra/pipeline/processor_local.py", line 72, in process
raise RuntimeError(f'Error processing operation {operation.name}.') from ex
RuntimeError: Error processing operation Untitled.
|
subprocess.CalledProcessError
|
def start(self):
include_invalid = not self.valid_only
try:
runtimes = self.metadata_manager.get_all_metadata_summary(
include_invalid=include_invalid
)
except KeyError:
runtimes = None
if not runtimes:
print(
"No metadata available for external runtimes at: '{}'".format(
self.metadata_manager.get_metadata_location
)
)
return
if self.json_output:
[
print(
"Runtime: {} {}\n{}".format(
rt.name,
"**INVALID**" if rt.reason and len(rt.reason) > 0 else "",
rt.to_json(),
)
)
for rt in runtimes
]
else:
sorted_runtimes = sorted(runtimes, key=lambda runtime: runtime.name)
# pad to width of longest runtime name
max_name_len = 0
max_resource_len = 0
for runtime in sorted_runtimes:
max_name_len = max(len(runtime.name), max_name_len)
max_resource_len = max(len(runtime.resource), max_resource_len)
print("Available metadata for external runtimes:")
for runtime in sorted_runtimes:
invalid = ""
if runtime.reason and len(runtime.reason) > 0:
invalid = "**INVALID** ({})".format(runtime.reason)
print(
" %s %s %s"
% (
runtime.name.ljust(max_name_len),
runtime.resource.ljust(max_resource_len),
invalid,
)
)
|
def start(self):
include_invalid = not self.valid_only
runtimes = self.metadata_manager.get_all_metadata_summary(
include_invalid=include_invalid
)
if not runtimes:
print(
"No metadata available for external runtimes at : '{}'".format(
self.metadata_manager.get_metadata_location
)
)
return
if self.json_output:
[
print(
"Runtime: {} {}\n{}".format(
rt.name,
"**INVALID**" if rt.reason and len(rt.reason) > 0 else "",
rt.to_json(),
)
)
for rt in runtimes
]
else:
sorted_runtimes = sorted(runtimes, key=lambda runtime: runtime.name)
# pad to width of longest runtime name
max_name_len = 0
max_resource_len = 0
for runtime in sorted_runtimes:
max_name_len = max(len(runtime.name), max_name_len)
max_resource_len = max(len(runtime.resource), max_resource_len)
print("Available metadata for external runtimes:")
for runtime in sorted_runtimes:
invalid = ""
if runtime.reason and len(runtime.reason) > 0:
invalid = "**INVALID** ({})".format(runtime.reason)
print(
" %s %s %s"
% (
runtime.name.ljust(max_name_len),
runtime.resource.ljust(max_resource_len),
invalid,
)
)
|
https://github.com/elyra-ai/elyra/issues/317
|
C:\Users\Administrator\Downloads>jupyter runtimes list
Traceback (most recent call last):
File "c:\programdata\anaconda3\envs\elyra\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "c:\programdata\anaconda3\envs\elyra\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\ProgramData\Anaconda3\envs\elyra\Scripts\jupyter-runtimes.EXE\__main__.py", line 7, in <module>
File "c:\programdata\anaconda3\envs\elyra\lib\site-packages\traitlets\config\application.py", line 664, in launch_instance
app.start()
File "c:\programdata\anaconda3\envs\elyra\lib\site-packages\elyra\metadata\runtime.py", line 282, in start
return self.subapp.start()
File "c:\programdata\anaconda3\envs\elyra\lib\site-packages\elyra\metadata\runtime.py", line 67, in start
runtimes = self.metadata_manager.get_all_metadata_summary(include_invalid=include_invalid)
File "c:\programdata\anaconda3\envs\elyra\lib\site-packages\elyra\metadata\metadata.py", line 99, in get_all_metadata_summary
return self.metadata_store.get_all_metadata_summary(include_invalid=include_invalid)
File "c:\programdata\anaconda3\envs\elyra\lib\site-packages\elyra\metadata\metadata.py", line 178, in get_all_metadata_summary
metadata_list = self._load_metadata_resources(include_invalid=include_invalid)
File "c:\programdata\anaconda3\envs\elyra\lib\site-packages\elyra\metadata\metadata.py", line 291, in _load_metadata_resources
raise KeyError("Namespace '{}' was not found!".format(self.namespace))
KeyError: "Namespace 'runtimes' was not found!"
|
KeyError
|
def start(self):
self._validate_parameters()
# init with required, conditionally add optional # TODO - drive from metadata? Will need better
metadata = dict(
api_endpoint=self.api_endpoint,
cos_endpoint=self.cos_endpoint,
cos_username=self.cos_username,
cos_password=self.cos_password,
cos_bucket=self.cos_bucket,
)
runtime = Runtime(
schema_name=self.schema_name,
name=self.name,
display_name=self.display_name,
metadata=metadata,
)
ex_msg = None
resource = None
try:
resource = self.metadata_manager.add(self.name, runtime, replace=self.replace)
except Exception as ex:
ex_msg = str(ex)
if resource:
print(
"Metadata for {} runtime '{}' has been written to: {}".format(
self.schema_name, self.name, resource
)
)
else:
if ex_msg:
self._log_and_exit(
"The following exception occurred while saving metadata '{}' for {} runtime: {}".format(
self.name, self.schema_name, ex_msg
),
display_help=True,
)
else:
self._log_and_exit(
"A failure occurred while saving metadata '{}' for {} runtime. Check log output.".format(
self.name, self.schema_name
),
display_help=True,
)
|
def start(self):
self._validate_parameters()
# init with required, conditionally add optional # TODO - drive from metadata? Will need better
metadata = dict(
api_endpoint=self.api_endpoint,
cos_endpoint=self.cos_endpoint,
cos_bucket=self.cos_bucket,
)
if self.cos_username:
metadata["cos_username"] = self.cos_username
if self.cos_password:
metadata["cos_password"] = self.cos_password
runtime = Runtime(
schema_name=self.schema_name,
name=self.name,
display_name=self.display_name,
metadata=metadata,
)
ex_msg = None
resource = None
try:
resource = self.metadata_manager.add(self.name, runtime, replace=self.replace)
except Exception as ex:
ex_msg = str(ex)
if resource:
print(
"Metadata for {} runtime '{}' has been written to: {}".format(
self.schema_name, self.name, resource
)
)
else:
if ex_msg:
self._log_and_exit(
"The following exception occurred while saving metadata '{}' for {} runtime: {}".format(
self.name, self.schema_name, ex_msg
),
display_help=True,
)
else:
self._log_and_exit(
"A failure occurred while saving metadata '{}' for {} runtime. Check log output.".format(
self.name, self.schema_name
),
display_help=True,
)
|
https://github.com/elyra-ai/elyra/issues/219
|
[E 14:28:56.246 LabApp] Uncaught exception POST /scheduler?1574720936239 (::1)
HTTPServerRequest(protocol='http', host='localhost:8888', method='POST', uri='/scheduler?1574720936239', version='HTTP/1.1', remote_ip='::1')
Traceback (most recent call last):
File "/opt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1697, in _execute
result = method(*self.path_args, **self.path_kwargs)
File "/opt/anaconda3/lib/python3.6/site-packages/ai_workspace/scheduler/handler.py", line 67, in post
cos_username = runtime_configuration.metadata['cos_username']
KeyError: 'cos_username'
|
KeyError
|
def _validate_parameters(self):
self._confirm_required("name", self.name)
self._confirm_required("display_name", self.display_name)
self._confirm_required("api_endpoint", self.api_endpoint)
self._confirm_required("cos_endpoint", self.cos_endpoint)
self._confirm_required("cos_username", self.cos_username)
self._confirm_required("cos_password", self.cos_password)
self._confirm_required("cos_bucket", self.cos_bucket)
|
def _validate_parameters(self):
self._confirm_required("name", self.name)
self._confirm_required("display_name", self.display_name)
self._confirm_required("api_endpoint", self.api_endpoint)
self._confirm_required("cos_endpoint", self.cos_endpoint)
self._confirm_required("cos_bucket", self.cos_bucket)
|
https://github.com/elyra-ai/elyra/issues/219
|
[E 14:28:56.246 LabApp] Uncaught exception POST /scheduler?1574720936239 (::1)
HTTPServerRequest(protocol='http', host='localhost:8888', method='POST', uri='/scheduler?1574720936239', version='HTTP/1.1', remote_ip='::1')
Traceback (most recent call last):
File "/opt/anaconda3/lib/python3.6/site-packages/tornado/web.py", line 1697, in _execute
result = method(*self.path_args, **self.path_kwargs)
File "/opt/anaconda3/lib/python3.6/site-packages/ai_workspace/scheduler/handler.py", line 67, in post
cos_username = runtime_configuration.metadata['cos_username']
KeyError: 'cos_username'
|
KeyError
|
def traverse(self, path, response=None, validated_hook=None):
"""Traverse the object space
The REQUEST must already have a PARENTS item with at least one
object in it. This is typically the root object.
"""
request = self
request_get = request.get
if response is None:
response = self.response
# remember path for later use
browser_path = path
# Cleanup the path list
if path[:1] == "/":
path = path[1:]
if path[-1:] == "/":
path = path[:-1]
clean = []
for item in path.split("/"):
# Make sure that certain things that dont make sense
# cannot be traversed.
if item in ("REQUEST", "aq_self", "aq_base"):
return response.notFoundError(path)
if not item or item == ".":
continue
elif item == "..":
del clean[-1]
else:
clean.append(item)
path = clean
# How did this request come in? (HTTP GET, PUT, POST, etc.)
method = request_get("REQUEST_METHOD", "GET").upper()
# Probably a browser
no_acquire_flag = 0
if method in ("GET", "HEAD", "POST", "PURGE") and not is_xmlrpc_response(response):
# index_html is still the default method, only any object can
# override it by implementing its own __browser_default__ method
method = "index_html"
elif self.maybe_webdav_client:
# Probably a WebDAV client.
no_acquire_flag = 1
URL = request["URL"]
parents = request["PARENTS"]
object = parents[-1]
del parents[:]
self.roles = getRoles(None, None, object, UNSPECIFIED_ROLES)
# if the top object has a __bobo_traverse__ method, then use it
# to possibly traverse to an alternate top-level object.
if hasattr(object, "__bobo_traverse__"):
try:
new_object = object.__bobo_traverse__(request)
if new_object is not None:
object = new_object
self.roles = getRoles(None, None, object, UNSPECIFIED_ROLES)
except Exception:
pass
if not path and not method:
return response.forbiddenError(self["URL"])
# Traverse the URL to find the object:
if hasattr(object, "__of__"):
# Try to bind the top-level object to the request
# This is how you get 'self.REQUEST'
object = object.__of__(RequestContainer(REQUEST=request))
parents.append(object)
steps = self.steps
self._steps = _steps = list(map(quote, steps))
path.reverse()
request["TraversalRequestNameStack"] = request.path = path
request["ACTUAL_URL"] = request["URL"] + quote(browser_path)
# Set the posttraverse for duration of the traversal here
self._post_traverse = post_traverse = []
entry_name = ""
try:
# We build parents in the wrong order, so we
# need to make sure we reverse it when we're done.
while 1:
bpth = getattr(object, "__before_publishing_traverse__", None)
if bpth is not None:
bpth(object, self)
path = request.path = request["TraversalRequestNameStack"]
# Check for method:
if path:
entry_name = path.pop()
else:
# If we have reached the end of the path, we look to see
# if we can find IBrowserPublisher.browserDefault. If so,
# we call it to let the object tell us how to publish it.
# BrowserDefault returns the object to be published
# (usually self) and a sequence of names to traverse to
# find the method to be published.
# This is webdav support. The last object in the path
# should not be acquired. Instead, a NullResource should
# be given if it doesn't exist:
if (
no_acquire_flag
and hasattr(object, "aq_base")
and not hasattr(object, "__bobo_traverse__")
):
if object.__parent__ is not aq_inner(object).__parent__:
from webdav.NullResource import NullResource
object = NullResource(parents[-2], object.getId(), self).__of__(
parents[-2]
)
if IBrowserPublisher.providedBy(object):
adapter = object
else:
adapter = queryMultiAdapter((object, self), IBrowserPublisher)
if adapter is None:
# Zope2 doesn't set up its own adapters in a lot
# of cases so we will just use a default adapter.
adapter = DefaultPublishTraverse(object, self)
object, default_path = adapter.browserDefault(self)
if default_path:
request._hacked_path = 1
if len(default_path) > 1:
path = list(default_path)
method = path.pop()
request["TraversalRequestNameStack"] = path
continue
else:
entry_name = default_path[0]
elif (
method
and hasattr(object, method)
and entry_name != method
and getattr(object, method) is not None
):
request._hacked_path = 1
entry_name = method
method = "index_html"
else:
if hasattr(object, "__call__"):
self.roles = getRoles(
object, "__call__", object.__call__, self.roles
)
if request._hacked_path:
i = URL.rfind("/")
if i > 0:
response.setBase(URL[:i])
break
step = quote(entry_name)
_steps.append(step)
request["URL"] = URL = "%s/%s" % (request["URL"], step)
try:
subobject = self.traverseName(object, entry_name)
if hasattr(object, "__bobo_traverse__") or hasattr(object, entry_name):
check_name = entry_name
else:
check_name = None
self.roles = getRoles(object, check_name, subobject, self.roles)
object = subobject
# traverseName() might raise ZTK's NotFound
except (KeyError, AttributeError, ztkNotFound):
if response.debug_mode:
return response.debugError("Cannot locate object at: %s" % URL)
else:
return response.notFoundError(URL)
except Forbidden as e:
if self.response.debug_mode:
return response.debugError(e.args)
else:
return response.forbiddenError(entry_name)
parents.append(object)
steps.append(entry_name)
finally:
parents.reverse()
# Note - no_acquire_flag is necessary to support
# things like DAV. We have to make sure
# that the target object is not acquired
# if the request_method is other than GET
# or POST. Otherwise, you could never use
# PUT to add a new object named 'test' if
# an object 'test' existed above it in the
# hierarchy -- you'd always get the
# existing object :(
if (
no_acquire_flag
and hasattr(parents[1], "aq_base")
and not hasattr(parents[1], "__bobo_traverse__")
):
base = aq_base(parents[1])
if not hasattr(base, entry_name):
try:
if entry_name not in base:
raise AttributeError(entry_name)
except TypeError:
raise AttributeError(entry_name)
# After traversal post traversal hooks aren't available anymore
del self._post_traverse
request["PUBLISHED"] = parents.pop(0)
# Do authorization checks
user = groups = None
i = 0
if 1: # Always perform authentication.
last_parent_index = len(parents)
if hasattr(object, "__allow_groups__"):
groups = object.__allow_groups__
inext = 0
else:
inext = None
for i in range(last_parent_index):
if hasattr(parents[i], "__allow_groups__"):
groups = parents[i].__allow_groups__
inext = i + 1
break
if inext is not None:
i = inext
v = getattr(groups, "validate", old_validation)
auth = request._auth
if v is old_validation and self.roles is UNSPECIFIED_ROLES:
# No roles, so if we have a named group, get roles from
# group keys
if hasattr(groups, "keys"):
self.roles = list(groups.keys())
else:
try:
groups = groups()
except Exception:
pass
try:
self.roles = list(groups.keys())
except Exception:
pass
if groups is None:
# Public group, hack structures to get it to validate
self.roles = None
auth = ""
if v is old_validation:
user = old_validation(groups, request, auth, self.roles)
elif self.roles is UNSPECIFIED_ROLES:
user = v(request, auth)
else:
user = v(request, auth, self.roles)
while user is None and i < last_parent_index:
parent = parents[i]
i = i + 1
if hasattr(parent, "__allow_groups__"):
groups = parent.__allow_groups__
else:
continue
if hasattr(groups, "validate"):
v = groups.validate
else:
v = old_validation
if v is old_validation:
user = old_validation(groups, request, auth, self.roles)
elif self.roles is UNSPECIFIED_ROLES:
user = v(request, auth)
else:
user = v(request, auth, self.roles)
if user is None and self.roles != UNSPECIFIED_ROLES:
response.unauthorized()
if user is not None:
if validated_hook is not None:
validated_hook(self, user)
request["AUTHENTICATED_USER"] = user
request["AUTHENTICATION_PATH"] = "/".join(steps[:-i])
# Remove http request method from the URL.
request["URL"] = URL
# Run post traversal hooks
if post_traverse:
result = exec_callables(post_traverse)
if result is not None:
object = result
return object
|
def traverse(self, path, response=None, validated_hook=None):
"""Traverse the object space
The REQUEST must already have a PARENTS item with at least one
object in it. This is typically the root object.
"""
request = self
request_get = request.get
if response is None:
response = self.response
# remember path for later use
browser_path = path
# Cleanup the path list
if path[:1] == "/":
path = path[1:]
if path[-1:] == "/":
path = path[:-1]
clean = []
for item in path.split("/"):
# Make sure that certain things that dont make sense
# cannot be traversed.
if item in ("REQUEST", "aq_self", "aq_base"):
return response.notFoundError(path)
if not item or item == ".":
continue
elif item == "..":
del clean[-1]
else:
clean.append(item)
path = clean
# How did this request come in? (HTTP GET, PUT, POST, etc.)
method = request_get("REQUEST_METHOD", "GET").upper()
# Probably a browser
no_acquire_flag = 0
if method in ("GET", "POST", "PURGE") and not is_xmlrpc_response(response):
# index_html is still the default method, only any object can
# override it by implementing its own __browser_default__ method
method = "index_html"
elif self.maybe_webdav_client:
# Probably a WebDAV client.
no_acquire_flag = 1
URL = request["URL"]
parents = request["PARENTS"]
object = parents[-1]
del parents[:]
self.roles = getRoles(None, None, object, UNSPECIFIED_ROLES)
# if the top object has a __bobo_traverse__ method, then use it
# to possibly traverse to an alternate top-level object.
if hasattr(object, "__bobo_traverse__"):
try:
new_object = object.__bobo_traverse__(request)
if new_object is not None:
object = new_object
self.roles = getRoles(None, None, object, UNSPECIFIED_ROLES)
except Exception:
pass
if not path and not method:
return response.forbiddenError(self["URL"])
# Traverse the URL to find the object:
if hasattr(object, "__of__"):
# Try to bind the top-level object to the request
# This is how you get 'self.REQUEST'
object = object.__of__(RequestContainer(REQUEST=request))
parents.append(object)
steps = self.steps
self._steps = _steps = list(map(quote, steps))
path.reverse()
request["TraversalRequestNameStack"] = request.path = path
request["ACTUAL_URL"] = request["URL"] + quote(browser_path)
# Set the posttraverse for duration of the traversal here
self._post_traverse = post_traverse = []
entry_name = ""
try:
# We build parents in the wrong order, so we
# need to make sure we reverse it when we're done.
while 1:
bpth = getattr(object, "__before_publishing_traverse__", None)
if bpth is not None:
bpth(object, self)
path = request.path = request["TraversalRequestNameStack"]
# Check for method:
if path:
entry_name = path.pop()
else:
# If we have reached the end of the path, we look to see
# if we can find IBrowserPublisher.browserDefault. If so,
# we call it to let the object tell us how to publish it.
# BrowserDefault returns the object to be published
# (usually self) and a sequence of names to traverse to
# find the method to be published.
# This is webdav support. The last object in the path
# should not be acquired. Instead, a NullResource should
# be given if it doesn't exist:
if (
no_acquire_flag
and hasattr(object, "aq_base")
and not hasattr(object, "__bobo_traverse__")
):
if object.__parent__ is not aq_inner(object).__parent__:
from webdav.NullResource import NullResource
object = NullResource(parents[-2], object.getId(), self).__of__(
parents[-2]
)
if IBrowserPublisher.providedBy(object):
adapter = object
else:
adapter = queryMultiAdapter((object, self), IBrowserPublisher)
if adapter is None:
# Zope2 doesn't set up its own adapters in a lot
# of cases so we will just use a default adapter.
adapter = DefaultPublishTraverse(object, self)
object, default_path = adapter.browserDefault(self)
if default_path:
request._hacked_path = 1
if len(default_path) > 1:
path = list(default_path)
method = path.pop()
request["TraversalRequestNameStack"] = path
continue
else:
entry_name = default_path[0]
elif (
method
and hasattr(object, method)
and entry_name != method
and getattr(object, method) is not None
):
request._hacked_path = 1
entry_name = method
method = "index_html"
else:
if hasattr(object, "__call__"):
self.roles = getRoles(
object, "__call__", object.__call__, self.roles
)
if request._hacked_path:
i = URL.rfind("/")
if i > 0:
response.setBase(URL[:i])
break
step = quote(entry_name)
_steps.append(step)
request["URL"] = URL = "%s/%s" % (request["URL"], step)
try:
subobject = self.traverseName(object, entry_name)
if hasattr(object, "__bobo_traverse__") or hasattr(object, entry_name):
check_name = entry_name
else:
check_name = None
self.roles = getRoles(object, check_name, subobject, self.roles)
object = subobject
# traverseName() might raise ZTK's NotFound
except (KeyError, AttributeError, ztkNotFound):
if response.debug_mode:
return response.debugError("Cannot locate object at: %s" % URL)
else:
return response.notFoundError(URL)
except Forbidden as e:
if self.response.debug_mode:
return response.debugError(e.args)
else:
return response.forbiddenError(entry_name)
parents.append(object)
steps.append(entry_name)
finally:
parents.reverse()
# Note - no_acquire_flag is necessary to support
# things like DAV. We have to make sure
# that the target object is not acquired
# if the request_method is other than GET
# or POST. Otherwise, you could never use
# PUT to add a new object named 'test' if
# an object 'test' existed above it in the
# hierarchy -- you'd always get the
# existing object :(
if (
no_acquire_flag
and hasattr(parents[1], "aq_base")
and not hasattr(parents[1], "__bobo_traverse__")
):
base = aq_base(parents[1])
if not hasattr(base, entry_name):
try:
if entry_name not in base:
raise AttributeError(entry_name)
except TypeError:
raise AttributeError(entry_name)
# After traversal post traversal hooks aren't available anymore
del self._post_traverse
request["PUBLISHED"] = parents.pop(0)
# Do authorization checks
user = groups = None
i = 0
if 1: # Always perform authentication.
last_parent_index = len(parents)
if hasattr(object, "__allow_groups__"):
groups = object.__allow_groups__
inext = 0
else:
inext = None
for i in range(last_parent_index):
if hasattr(parents[i], "__allow_groups__"):
groups = parents[i].__allow_groups__
inext = i + 1
break
if inext is not None:
i = inext
v = getattr(groups, "validate", old_validation)
auth = request._auth
if v is old_validation and self.roles is UNSPECIFIED_ROLES:
# No roles, so if we have a named group, get roles from
# group keys
if hasattr(groups, "keys"):
self.roles = list(groups.keys())
else:
try:
groups = groups()
except Exception:
pass
try:
self.roles = list(groups.keys())
except Exception:
pass
if groups is None:
# Public group, hack structures to get it to validate
self.roles = None
auth = ""
if v is old_validation:
user = old_validation(groups, request, auth, self.roles)
elif self.roles is UNSPECIFIED_ROLES:
user = v(request, auth)
else:
user = v(request, auth, self.roles)
while user is None and i < last_parent_index:
parent = parents[i]
i = i + 1
if hasattr(parent, "__allow_groups__"):
groups = parent.__allow_groups__
else:
continue
if hasattr(groups, "validate"):
v = groups.validate
else:
v = old_validation
if v is old_validation:
user = old_validation(groups, request, auth, self.roles)
elif self.roles is UNSPECIFIED_ROLES:
user = v(request, auth)
else:
user = v(request, auth, self.roles)
if user is None and self.roles != UNSPECIFIED_ROLES:
response.unauthorized()
if user is not None:
if validated_hook is not None:
validated_hook(self, user)
request["AUTHENTICATED_USER"] = user
request["AUTHENTICATION_PATH"] = "/".join(steps[:-i])
# Remove http request method from the URL.
request["URL"] = URL
# Run post traversal hooks
if post_traverse:
result = exec_callables(post_traverse)
if result is not None:
object = result
return object
|
https://github.com/zopefoundation/Zope/issues/816
|
Traceback (most recent call last):
File "/Users/jens/src/.eggs/waitress-1.4.3-py3.7.egg/waitress/channel.py", line 349, in service
task.service()
File "/Users/jens/src/.eggs/waitress-1.4.3-py3.7.egg/waitress/task.py", line 169, in service
self.execute()
File "/Users/jens/src/.eggs/waitress-1.4.3-py3.7.egg/waitress/task.py", line 439, in execute
app_iter = self.channel.server.application(environ, start_response)
File "/Users/jens/src/.eggs/Paste-3.4.0-py3.7.egg/paste/translogger.py", line 69, in __call__
return self.application(environ, replacement_start_response)
File "/Users/jens/src/zope/Zope/src/ZPublisher/httpexceptions.py", line 30, in __call__
return self.application(environ, start_response)
File "/Users/jens/src/zope/Zope/src/ZPublisher/WSGIPublisher.py", line 359, in publish_module
response = _publish(request, new_mod_info)
File "/Users/jens/src/zope/Zope/src/ZPublisher/WSGIPublisher.py", line 250, in publish
obj = request.traverse(path, validated_hook=validate_user)
File "/Users/jens/src/zope/Zope/src/ZPublisher/BaseRequest.py", line 564, in traverse
raise AttributeError(entry_name)
AttributeError: @@absolute_url
|
AttributeError
|
def manage_DAVget(self):
"""Gets the document source or file data.
This implementation is a last resort fallback. The subclass should
override this method to provide a more appropriate implementation.
Using PrincipiaSearchSource, if it exists. It is one of the few shared
interfaces still around in common Zope content objects.
"""
if getattr(aq_base(self), "PrincipiaSearchSource", None) is not None:
return self.PrincipiaSearchSource()
# If it doesn't exist, give up.
return ""
|
def manage_DAVget(self):
"""Gets the document source"""
# The default implementation calls PrincipiaSearchSource
return self.PrincipiaSearchSource()
|
https://github.com/zopefoundation/Zope/issues/799
|
2020-03-05 12:08:22 ERROR [waitress:357][waitress] Exception while serving /foo/m
Traceback (most recent call last):
File "/home/ajung/src/zope/lib/python3.7/site-packages/waitress/channel.py", line 349, in service
task.service()
File "/home/ajung/src/zope/lib/python3.7/site-packages/waitress/task.py", line 169, in service
self.execute()
File "/home/ajung/src/zope/lib/python3.7/site-packages/waitress/task.py", line 439, in execute
app_iter = self.channel.server.application(environ, start_response)
File "/home/ajung/src/zope/lib/python3.7/site-packages/ZPublisher/httpexceptions.py", line 30, in __call__
return self.application(environ, start_response)
File "/home/ajung/src/zope/lib/python3.7/site-packages/paste/translogger.py", line 69, in __call__
return self.application(environ, replacement_start_response)
File "/home/ajung/src/zope/lib/python3.7/site-packages/ZPublisher/WSGIPublisher.py", line 364, in publish_module
response = _publish(request, new_mod_info)
File "/home/ajung/src/zope/lib/python3.7/site-packages/ZPublisher/WSGIPublisher.py", line 267, in publish
bind=1)
File "/home/ajung/src/zope/lib/python3.7/site-packages/ZPublisher/mapply.py", line 85, in mapply
return debug(object, args, context)
File "/home/ajung/src/zope/lib/python3.7/site-packages/ZPublisher/WSGIPublisher.py", line 68, in call_object
return obj(*args)
File "/home/ajung/src/zope/lib/python3.7/site-packages/webdav/Resource.py", line 680, in manage_DAVget
return self.PrincipiaSearchSource()
AttributeError: 'RequestContainer' object has no attribute 'PrincipiaSearchSource'
|
AttributeError
|
def xml_escape(value):
if not isinstance(value, (str, bytes)):
value = str(value)
if not isinstance(value, str):
value = value.decode("utf-8")
return xmltools_escape(value)
|
def xml_escape(value):
if not isinstance(value, (str, bytes)):
value = str(value)
if not isinstance(value, str):
value = value.decode("utf-8")
value = xmltools_escape(value)
return value.encode("utf-8")
|
https://github.com/zopefoundation/Zope/issues/799
|
2020-03-05 12:08:22 ERROR [waitress:357][waitress] Exception while serving /foo/m
Traceback (most recent call last):
File "/home/ajung/src/zope/lib/python3.7/site-packages/waitress/channel.py", line 349, in service
task.service()
File "/home/ajung/src/zope/lib/python3.7/site-packages/waitress/task.py", line 169, in service
self.execute()
File "/home/ajung/src/zope/lib/python3.7/site-packages/waitress/task.py", line 439, in execute
app_iter = self.channel.server.application(environ, start_response)
File "/home/ajung/src/zope/lib/python3.7/site-packages/ZPublisher/httpexceptions.py", line 30, in __call__
return self.application(environ, start_response)
File "/home/ajung/src/zope/lib/python3.7/site-packages/paste/translogger.py", line 69, in __call__
return self.application(environ, replacement_start_response)
File "/home/ajung/src/zope/lib/python3.7/site-packages/ZPublisher/WSGIPublisher.py", line 364, in publish_module
response = _publish(request, new_mod_info)
File "/home/ajung/src/zope/lib/python3.7/site-packages/ZPublisher/WSGIPublisher.py", line 267, in publish
bind=1)
File "/home/ajung/src/zope/lib/python3.7/site-packages/ZPublisher/mapply.py", line 85, in mapply
return debug(object, args, context)
File "/home/ajung/src/zope/lib/python3.7/site-packages/ZPublisher/WSGIPublisher.py", line 68, in call_object
return obj(*args)
File "/home/ajung/src/zope/lib/python3.7/site-packages/webdav/Resource.py", line 680, in manage_DAVget
return self.PrincipiaSearchSource()
AttributeError: 'RequestContainer' object has no attribute 'PrincipiaSearchSource'
|
AttributeError
|
def publish_module(
environ,
start_response,
_publish=publish, # only for testing
_response=None,
_response_factory=WSGIResponse,
_request=None,
_request_factory=WSGIRequest,
_module_name="Zope2",
):
module_info = get_module_info(_module_name)
result = ()
path_info = environ.get("PATH_INFO")
if path_info:
# BIG Comment, see discussion at
# https://github.com/zopefoundation/Zope/issues/575
#
# The WSGI server automatically treats headers, including the
# PATH_INFO, as latin-1 encoded bytestrings, according to PEP-3333. As
# this causes headache I try to show the steps a URI takes in WebOb,
# which is similar in other wsgi server implementations.
# UTF-8 URL-encoded object-id 'täst':
# http://localhost/t%C3%A4st
# unquote('/t%C3%A4st'.decode('ascii')) results in utf-8 encoded bytes
# b'/t\xc3\xa4st'
# b'/t\xc3\xa4st'.decode('latin-1') latin-1 decoding due to PEP-3333
# '/täst'
# We now have a latin-1 decoded text, which was actually utf-8 encoded.
# To reverse this we have to encode with latin-1 first.
path_info = path_info.encode("latin-1")
# So we can now decode with the right (utf-8) encoding to get text.
# This encode/decode two-step with different encodings works because
# of the way PEP-3333 restricts the type of string allowable for
# request and response metadata. The allowed characters match up in
# both latin-1 and utf-8.
path_info = path_info.decode("utf-8")
environ["PATH_INFO"] = path_info
with closing(BytesIO()) as stdout, closing(BytesIO()) as stderr:
new_response = (
_response
if _response is not None
else _response_factory(stdout=stdout, stderr=stderr)
)
new_response._http_version = environ["SERVER_PROTOCOL"].split("/")[1]
new_response._server_version = environ.get("SERVER_SOFTWARE")
new_request = (
_request
if _request is not None
else _request_factory(environ["wsgi.input"], environ, new_response)
)
for i in range(getattr(new_request, "retry_max_count", 3) + 1):
request = new_request
response = new_response
setRequest(request)
try:
with load_app(module_info) as new_mod_info:
with transaction_pubevents(request, response):
response = _publish(request, new_mod_info)
user = getSecurityManager().getUser()
if user is not None and user.getUserName() != "Anonymous User":
environ["REMOTE_USER"] = user.getUserName()
break
except TransientError:
if request.supports_retry():
request.delay_retry() # Insert a time delay
new_request = request.retry()
new_response = new_request.response
else:
raise
finally:
request.close()
clearRequest()
# Start the WSGI server response
status, headers = response.finalize()
start_response(status, headers)
if isinstance(response.body, _FILE_TYPES) or IUnboundStreamIterator.providedBy(
response.body
):
if "wsgi.file_wrapper" in environ:
result = environ["wsgi.file_wrapper"](response.body)
else:
result = response.body
else:
# If somebody used response.write, that data will be in the
# response.stdout BytesIO, so we put that before the body.
result = (response.stdout.getvalue(), response.body)
for func in response.after_list:
func()
# Return the result body iterable.
return result
|
def publish_module(
environ,
start_response,
_publish=publish, # only for testing
_response=None,
_response_factory=WSGIResponse,
_request=None,
_request_factory=WSGIRequest,
_module_name="Zope2",
):
module_info = get_module_info(_module_name)
result = ()
path_info = environ.get("PATH_INFO")
if path_info:
# BIG Comment, see discussion at
# https://github.com/zopefoundation/Zope/issues/575
#
# The WSGI server automatically treats headers, including the
# PATH_INFO, as latin-1 encoded bytestrings, according to PEP-3333. As
# this causes headache I try to show the steps a URI takes in WebOb,
# which is similar in other wsgi server implementations.
# UTF-8 URL-encoded object-id 'täst':
# http://localhost/t%C3%A4st
# unquote('/t%C3%A4st'.decode('ascii')) results in utf-8 encoded bytes
# b'/t\xc3\xa4st'
# b'/t\xc3\xa4st'.decode('latin-1') latin-1 decoding due to PEP-3333
# '/täst'
# We now have a latin-1 decoded text, which was actually utf-8 encoded.
# To reverse this we have to encode with latin-1 first.
path_info = path_info.encode("latin-1")
# So we can now decode with the right (utf-8) encoding to get text.
# This encode/decode two-step with different encodings works because
# of the way PEP-3333 restricts the type of string allowable for
# request and response metadata. The allowed characters match up in
# both latin-1 and utf-8.
path_info = path_info.decode("utf-8")
environ["PATH_INFO"] = path_info
with closing(BytesIO()) as stdout, closing(BytesIO()) as stderr:
new_response = (
_response
if _response is not None
else _response_factory(stdout=stdout, stderr=stderr)
)
new_response._http_version = environ["SERVER_PROTOCOL"].split("/")[1]
new_response._server_version = environ.get("SERVER_SOFTWARE")
new_request = (
_request
if _request is not None
else _request_factory(environ["wsgi.input"], environ, new_response)
)
for i in range(getattr(new_request, "retry_max_count", 3) + 1):
request = new_request
response = new_response
setRequest(request)
try:
with load_app(module_info) as new_mod_info:
with transaction_pubevents(request, response):
response = _publish(request, new_mod_info)
user = getSecurityManager().getUser()
if user is not None and user.getUserName() != "Anonymous User":
environ["REMOTE_USER"] = user.getUserName()
break
except TransientError:
if request.supports_retry():
request.delay_retry() # Insert a time delay
new_request = request.retry()
new_response = new_request.response
else:
raise
finally:
request.close()
clearRequest()
# Start the WSGI server response
status, headers = response.finalize()
start_response(status, headers)
if isinstance(response.body, _FILE_TYPES) or IUnboundStreamIterator.providedBy(
response.body
):
if "wsgi.file_wrapper" in environ:
result = environ["wsgi.file_wrapper"](response.body)
else:
result = response.body
else:
# If somebody used response.write, that data will be in the
# response.stdout BytesIO, so we put that before the body.
result = (response.stdout.getvalue(), response.body)
for func in response.after_list:
func()
# Return the result body iterable.
return result
|
https://github.com/zopefoundation/Zope/issues/726
|
2019-11-05 11:07:59 ERROR [ZODB.Connection:787][waitress] Shouldn't load state for AccessControl.users.User 0x04 when the connection is closed
Traceback (most recent call last):
File "/Users/dwt/.virtualenvs/tmp-274bc27ca19363/lib/python3.7/site-packages/ZODB/Connection.py", line 785, in setstate
raise ConnectionStateError(msg)
ZODB.POSException.ConnectionStateError: Shouldn't load state for AccessControl.users.User 0x04 when the connection is closed
2019-11-05 11:07:59 ERROR [waitress:363][waitress] Exception while serving /Control_Panel/Database/main/manage_minimize
Traceback (most recent call last):
File "/Users/dwt/.virtualenvs/tmp-274bc27ca19363/lib/python3.7/site-packages/waitress/channel.py", line 356, in service
task.service()
File "/Users/dwt/.virtualenvs/tmp-274bc27ca19363/lib/python3.7/site-packages/waitress/task.py", line 172, in service
self.execute()
File "/Users/dwt/.virtualenvs/tmp-274bc27ca19363/lib/python3.7/site-packages/waitress/task.py", line 440, in execute
app_iter = self.channel.server.application(environ, start_response)
File "/Users/dwt/.virtualenvs/tmp-274bc27ca19363/src/zope/src/ZPublisher/httpexceptions.py", line 30, in __call__
return self.application(environ, start_response)
File "/Users/dwt/.virtualenvs/tmp-274bc27ca19363/lib/python3.7/site-packages/paste/translogger.py", line 69, in __call__
return self.application(environ, replacement_start_response)
File "/Users/dwt/.virtualenvs/tmp-274bc27ca19363/src/zope/src/ZPublisher/WSGIPublisher.py", line 335, in publish_module
if user is not None and user.getUserName() != 'Anonymous User':
File "/Users/dwt/.virtualenvs/tmp-274bc27ca19363/lib/python3.7/site-packages/ZODB/Connection.py", line 785, in setstate
raise ConnectionStateError(msg)
ZODB.POSException.ConnectionStateError: Shouldn't load state for AccessControl.users.User 0x04 when the connection is closed
|
ZODB.POSException.ConnectionStateError
|
def publish_module(
environ,
start_response,
_publish=publish, # only for testing
_response=None,
_response_factory=WSGIResponse,
_request=None,
_request_factory=WSGIRequest,
_module_name="Zope2",
):
module_info = get_module_info(_module_name)
result = ()
path_info = environ.get("PATH_INFO")
if path_info and PY3:
# BIG Comment, see discussion at
# https://github.com/zopefoundation/Zope/issues/575
#
# The WSGI server automatically treats headers, including the
# PATH_INFO, as latin-1 encoded bytestrings, according to PEP-3333. As
# this causes headache I try to show the steps a URI takes in WebOb,
# which is similar in other wsgi server implementations.
# UTF-8 URL-encoded object-id 'täst':
# http://localhost/t%C3%A4st
# unquote('/t%C3%A4st'.decode('ascii')) results in utf-8 encoded bytes
# b'/t\xc3\xa4st'
# b'/t\xc3\xa4st'.decode('latin-1') latin-1 decoding due to PEP-3333
# '/täst'
# We now have a latin-1 decoded text, which was actually utf-8 encoded.
# To reverse this we have to encode with latin-1 first.
path_info = path_info.encode("latin-1")
# So we can now decode with the right (utf-8) encoding to get text.
# This encode/decode two-step with different encodings works because
# of the way PEP-3333 restricts the type of string allowable for
# request and response metadata. The allowed characters match up in
# both latin-1 and utf-8.
path_info = path_info.decode("utf-8")
environ["PATH_INFO"] = path_info
with closing(BytesIO()) as stdout, closing(BytesIO()) as stderr:
new_response = (
_response
if _response is not None
else _response_factory(stdout=stdout, stderr=stderr)
)
new_response._http_version = environ["SERVER_PROTOCOL"].split("/")[1]
new_response._server_version = environ.get("SERVER_SOFTWARE")
new_request = (
_request
if _request is not None
else _request_factory(environ["wsgi.input"], environ, new_response)
)
for i in range(getattr(new_request, "retry_max_count", 3) + 1):
request = new_request
response = new_response
setRequest(request)
try:
with load_app(module_info) as new_mod_info:
with transaction_pubevents(request, response):
response = _publish(request, new_mod_info)
user = getSecurityManager().getUser()
if user is not None and user.getUserName() != "Anonymous User":
environ["REMOTE_USER"] = user.getUserName()
break
except TransientError:
if request.supports_retry():
new_request = request.retry()
new_response = new_request.response
else:
raise
finally:
request.close()
clearRequest()
# Start the WSGI server response
status, headers = response.finalize()
start_response(status, headers)
if isinstance(response.body, _FILE_TYPES) or IUnboundStreamIterator.providedBy(
response.body
):
if "wsgi.file_wrapper" in environ:
result = environ["wsgi.file_wrapper"](response.body)
else:
result = response.body
else:
# If somebody used response.write, that data will be in the
# response.stdout BytesIO, so we put that before the body.
result = (response.stdout.getvalue(), response.body)
for func in response.after_list:
func()
# Return the result body iterable.
return result
|
def publish_module(
environ,
start_response,
_publish=publish, # only for testing
_response=None,
_response_factory=WSGIResponse,
_request=None,
_request_factory=WSGIRequest,
_module_name="Zope2",
):
module_info = get_module_info(_module_name)
result = ()
path_info = environ.get("PATH_INFO")
if path_info and PY3:
# BIG Comment, see discussion at
# https://github.com/zopefoundation/Zope/issues/575
#
# The WSGI server automatically treats headers, including the
# PATH_INFO, as latin-1 encoded bytestrings, according to PEP-3333. As
# this causes headache I try to show the steps a URI takes in WebOb,
# which is similar in other wsgi server implementations.
# UTF-8 URL-encoded object-id 'täst':
# http://localhost/t%C3%A4st
# unquote('/t%C3%A4st'.decode('ascii')) results in utf-8 encoded bytes
# b'/t\xc3\xa4st'
# b'/t\xc3\xa4st'.decode('latin-1') latin-1 decoding due to PEP-3333
# '/täst'
# We now have a latin-1 decoded text, which was actually utf-8 encoded.
# To reverse this we have to encode with latin-1 first.
path_info = path_info.encode("latin-1")
# So we can now decode with the right (utf-8) encoding to get text.
# This encode/decode two-step with different encodings works because
# of the way PEP-3333 restricts the type of string allowable for
# request and response metadata. The allowed characters match up in
# both latin-1 and utf-8.
path_info = path_info.decode("utf-8")
environ["PATH_INFO"] = path_info
with closing(BytesIO()) as stdout, closing(BytesIO()) as stderr:
new_response = (
_response
if _response is not None
else _response_factory(stdout=stdout, stderr=stderr)
)
new_response._http_version = environ["SERVER_PROTOCOL"].split("/")[1]
new_response._server_version = environ.get("SERVER_SOFTWARE")
new_request = (
_request
if _request is not None
else _request_factory(environ["wsgi.input"], environ, new_response)
)
for i in range(getattr(new_request, "retry_max_count", 3) + 1):
request = new_request
response = new_response
setRequest(request)
try:
with load_app(module_info) as new_mod_info:
with transaction_pubevents(request, response):
response = _publish(request, new_mod_info)
user = getSecurityManager().getUser()
if user is not None and user.getUserName() != "Anonymous User":
environ["REMOTE_USER"] = user.getUserName()
break
except TransientError:
if request.supports_retry():
new_request = request.retry()
new_response = new_request.response
else:
raise
finally:
request.close()
clearRequest()
# Start the WSGI server response
status, headers = response.finalize()
start_response(status, headers)
if isinstance(response.body, _FILE_TYPES) or IUnboundStreamIterator.providedBy(
response.body
):
if "wsgi.file_wrapper" in environ:
result = environ["wsgi.file_wrapper"](response.body)
else:
result = response.body
else:
# If somebody used response.write, that data will be in the
# response.stdout BytesIO, so we put that before the body.
result = (response.stdout.getvalue(), response.body)
for func in response.after_list:
func()
# Return the result body iterable.
return result
|
https://github.com/zopefoundation/Zope/issues/726
|
2019-11-05 11:07:59 ERROR [ZODB.Connection:787][waitress] Shouldn't load state for AccessControl.users.User 0x04 when the connection is closed
Traceback (most recent call last):
File "/Users/dwt/.virtualenvs/tmp-274bc27ca19363/lib/python3.7/site-packages/ZODB/Connection.py", line 785, in setstate
raise ConnectionStateError(msg)
ZODB.POSException.ConnectionStateError: Shouldn't load state for AccessControl.users.User 0x04 when the connection is closed
2019-11-05 11:07:59 ERROR [waitress:363][waitress] Exception while serving /Control_Panel/Database/main/manage_minimize
Traceback (most recent call last):
File "/Users/dwt/.virtualenvs/tmp-274bc27ca19363/lib/python3.7/site-packages/waitress/channel.py", line 356, in service
task.service()
File "/Users/dwt/.virtualenvs/tmp-274bc27ca19363/lib/python3.7/site-packages/waitress/task.py", line 172, in service
self.execute()
File "/Users/dwt/.virtualenvs/tmp-274bc27ca19363/lib/python3.7/site-packages/waitress/task.py", line 440, in execute
app_iter = self.channel.server.application(environ, start_response)
File "/Users/dwt/.virtualenvs/tmp-274bc27ca19363/src/zope/src/ZPublisher/httpexceptions.py", line 30, in __call__
return self.application(environ, start_response)
File "/Users/dwt/.virtualenvs/tmp-274bc27ca19363/lib/python3.7/site-packages/paste/translogger.py", line 69, in __call__
return self.application(environ, replacement_start_response)
File "/Users/dwt/.virtualenvs/tmp-274bc27ca19363/src/zope/src/ZPublisher/WSGIPublisher.py", line 335, in publish_module
if user is not None and user.getUserName() != 'Anonymous User':
File "/Users/dwt/.virtualenvs/tmp-274bc27ca19363/lib/python3.7/site-packages/ZODB/Connection.py", line 785, in setstate
raise ConnectionStateError(msg)
ZODB.POSException.ConnectionStateError: Shouldn't load state for AccessControl.users.User 0x04 when the connection is closed
|
ZODB.POSException.ConnectionStateError
|
def redirect(self, location, status=302, lock=0):
"""Cause a redirection without raising an error"""
if isinstance(location, HTTPRedirection):
status = location.getStatus()
location = location.headers["Location"]
if PY2 and isinstance(location, text_type):
location = location.encode(self.charset)
elif PY3 and isinstance(location, binary_type):
location = location.decode(self.charset)
# To be entirely correct, we must make sure that all non-ASCII
# characters in the path part are quoted correctly. This is required
# as we now allow non-ASCII IDs
parsed = list(urlparse(location))
parsed[2] = quote(parsed[2])
location = urlunparse(parsed)
self.setStatus(status, lock=lock)
self.setHeader("Location", location)
return location
|
def redirect(self, location, status=302, lock=0):
"""Cause a redirection without raising an error"""
if isinstance(location, HTTPRedirection):
status = location.getStatus()
location = str(location)
self.setStatus(status, lock=lock)
self.setHeader("Location", location)
return location
|
https://github.com/zopefoundation/Zope/issues/435
|
2018-12-18 11:37:44,424 ERROR [waitress:341][waitress] Exception when serving /Plone2/s3/view/ääää
Traceback (most recent call last):
File "/home/ajung/src/xmldirector.connector/eggs/waitress-1.1.0-py3.7.egg/waitress/channel.py", line 338, in service
task.service()
File "/home/ajung/src/xmldirector.connector/eggs/waitress-1.1.0-py3.7.egg/waitress/task.py", line 169, in service
self.execute()
File "/home/ajung/src/xmldirector.connector/eggs/waitress-1.1.0-py3.7.egg/waitress/task.py", line 399, in execute
app_iter = self.channel.server.application(env, start_response)
File "/home/ajung/src/xmldirector.connector/eggs/Zope-4.0b7-py3.7.egg/ZPublisher/httpexceptions.py", line 30, in __call__
return self.application(environ, start_response)
File "/home/ajung/src/xmldirector.connector/eggs/Zope-4.0b7-py3.7.egg/ZPublisher/WSGIPublisher.py", line 251, in publish_module
path_info = path_info.decode('utf-8')
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe4 in position 16: invalid continuation byte
|
UnicodeDecodeError
|
def default_zpublisher_encoding(value):
# This is a bit clunky but necessary :-(
# These modules are imported during the configuration process
# so a module-level call to getConfiguration in any of them
# results in getting config data structure without the necessary
# value in it.
if PY2:
# unicode is not acceptable as encoding in HTTP headers:
value = str(value)
from ZPublisher import Converters, HTTPRequest, HTTPResponse
Converters.default_encoding = value
HTTPRequest.default_encoding = value
HTTPRequest.HTTPRequest.charset = value
HTTPResponse.default_encoding = value
HTTPResponse.HTTPBaseResponse.charset = value
return value
|
def default_zpublisher_encoding(value):
# This is a bit clunky but necessary :-(
# These modules are imported during the configuration process
# so a module-level call to getConfiguration in any of them
# results in getting config data structure without the necessary
# value in it.
from ZPublisher import Converters, HTTPRequest, HTTPResponse
Converters.default_encoding = value
HTTPRequest.default_encoding = value
HTTPRequest.HTTPRequest.charset = value
HTTPResponse.default_encoding = value
HTTPResponse.HTTPBaseResponse.charset = value
return value
|
https://github.com/zopefoundation/Zope/issues/308
|
Traceback (most recent call last):
File ".../waitress/channel.py", line 338, in service
task.service()
File ".../waitress/task.py", line 169, in service
self.execute()
File ".../waitress/task.py", line 399, in execute
app_iter = self.channel.server.application(env, start_response)
File ".../ZPublisher/httpexceptions.py", line 30, in __call__
return self.application(environ, start_response)
File ".../ZPublisher/WSGIPublisher.py", line 286, in publish_module
start_response(status, headers)
File ".../waitress/task.py", line 375, in start_response
'Header value %r is not a string in %r' % (v, (k, v))
AssertionError: Header value u'text/html; charset=utf-8' is not a string in ('Content-Type', u'text/html; charset=utf-8')
|
AssertionError
|
def install_product(
app, product_dir, product_name, meta_types, folder_permissions, raise_exc=None
):
if not _is_package(product_dir, product_name):
return
__traceback_info__ = product_name
global_dict = globals()
product = __import__(
"Products.%s" % product_name, global_dict, global_dict, ("__doc__",)
)
# Install items into the misc_ namespace, used by products
# and the framework itself to store common static resources
# like icon images.
misc_ = pgetattr(product, "misc_", {})
if misc_:
if isinstance(misc_, dict):
misc_ = Misc_(product_name, misc_)
setattr(Application.misc_, product_name, misc_)
productObject = FactoryDispatcher.Product(product_name)
context = ProductContext(productObject, None, product)
# Look for an 'initialize' method in the product.
initmethod = pgetattr(product, "initialize", None)
if initmethod is not None:
initmethod(context)
|
def install_product(
app, product_dir, product_name, meta_types, folder_permissions, raise_exc=None
):
if not _is_package(product_dir, product_name):
return
__traceback_info__ = product_name
global_dict = globals()
product = __import__(
"Products.%s" % product_name, global_dict, global_dict, ("__doc__",)
)
# Install items into the misc_ namespace, used by products
# and the framework itself to store common static resources
# like icon images.
misc_ = pgetattr(product, "misc_", {})
if misc_:
if isinstance(misc_, dict):
misc_ = Misc_(product_name, misc_)
Application.misc_.__dict__[product_name] = misc_
productObject = FactoryDispatcher.Product(product_name)
context = ProductContext(productObject, None, product)
# Look for an 'initialize' method in the product.
initmethod = pgetattr(product, "initialize", None)
if initmethod is not None:
initmethod(context)
|
https://github.com/zopefoundation/Zope/issues/187
|
Running plone.app.testing.layers.PloneFixture tests:
Tear down Testing.ZopeTestCase.layer.ZopeLite in 0.000 seconds.
Set up plone.testing.zca.LayerCleanup in 0.000 seconds.
Set up plone.testing.z2.Startup in 0.058 seconds.
Set up plone.app.testing.layers.PloneFixture Traceback (most recent call last):
File "/home/jenkins/.buildout/eggs/zope.testrunner-4.7.0-py2.7.egg/zope/testrunner/runner.py", line 400, in run_layer
setup_layer(options, layer, setup_layers)
File "/home/jenkins/.buildout/eggs/zope.testrunner-4.7.0-py2.7.egg/zope/testrunner/runner.py", line 713, in setup_layer
layer.setUp()
File "/home/jenkins/workspace/plip-zope4/src/plone.app.testing/plone/app/testing/layers.py", line 107, in setUp
self.setUpProducts(app)
File "/home/jenkins/workspace/plip-zope4/src/plone.app.testing/plone/app/testing/layers.py", line 186, in setUpProducts
z2.installProduct(app, p)
File "/home/jenkins/workspace/plip-zope4/src/plone.testing/src/plone/testing/z2.py", line 68, in installProduct
raise_exc=1)
File "/home/jenkins/workspace/plip-zope4/src/Zope/src/OFS/Application.py", line 396, in install_product
Application.misc_.__dict__[product_name] = misc_
- __traceback_info__: ExternalEditor
TypeError: 'dictproxy' object does not support item assignment
|
TypeError
|
def processInputs(
self,
# "static" variables that we want to be local for speed
SEQUENCE=1,
DEFAULT=2,
RECORD=4,
RECORDS=8,
REC=12, # RECORD | RECORDS
EMPTY=16,
CONVERTED=32,
hasattr=hasattr,
getattr=getattr,
setattr=setattr,
):
"""Process request inputs
We need to delay input parsing so that it is done under
publisher control for error handling purposes.
"""
response = self.response
environ = self.environ
method = environ.get("REQUEST_METHOD", "GET")
if method != "GET":
fp = self.stdin
else:
fp = None
form = self.form
other = self.other
taintedform = self.taintedform
# If 'QUERY_STRING' is not present in environ
# FieldStorage will try to get it from sys.argv[1]
# which is not what we need.
if "QUERY_STRING" not in environ:
environ["QUERY_STRING"] = ""
meth = None
fs = ZopeFieldStorage(fp=fp, environ=environ, keep_blank_values=1)
# Keep a reference to the FieldStorage. Otherwise it's
# __del__ method is called too early and closing FieldStorage.file.
self._hold(fs)
if not hasattr(fs, "list") or fs.list is None:
if "HTTP_SOAPACTION" in environ:
# Stash XML request for interpretation by a SOAP-aware view
other["SOAPXML"] = fs.value
elif (
xmlrpc is not None
and method == "POST"
and (
"content-type" in fs.headers
and "text/xml" in fs.headers["content-type"]
)
):
# Ye haaa, XML-RPC!
meth, self.args = xmlrpc.parse_input(fs.value)
response = xmlrpc.response(response)
other["RESPONSE"] = self.response = response
self.maybe_webdav_client = 0
else:
self._file = fs.file
else:
fslist = fs.list
tuple_items = {}
defaults = {}
tainteddefaults = {}
converter = None
for item in fslist:
isFileUpload = 0
key = item.name
if key is None:
continue
if (
hasattr(item, "file")
and hasattr(item, "filename")
and hasattr(item, "headers")
):
if item.file and (
item.filename is not None
# RFC 1867 says that all fields get a content-type.
# or 'content-type' in map(lower, item.headers.keys())
):
item = FileUpload(item)
isFileUpload = 1
else:
item = item.value
flags = 0
character_encoding = ""
# Variables for potentially unsafe values.
tainted = None
converter_type = None
# Loop through the different types and set
# the appropriate flags
# We'll search from the back to the front.
# We'll do the search in two steps. First, we'll
# do a string search, and then we'll check it with
# a re search.
l = key.rfind(":")
if l >= 0:
mo = search_type(key, l)
if mo:
l = mo.start(0)
else:
l = -1
while l >= 0:
type_name = key[l + 1 :]
key = key[:l]
c = get_converter(type_name, None)
if c is not None:
converter = c
converter_type = type_name
flags = flags | CONVERTED
elif type_name == "list":
flags = flags | SEQUENCE
elif type_name == "tuple":
tuple_items[key] = 1
flags = flags | SEQUENCE
elif type_name == "method" or type_name == "action":
if l:
meth = key
else:
meth = item
elif type_name == "default_method" or type_name == "default_action":
if not meth:
if l:
meth = key
else:
meth = item
elif type_name == "default":
flags = flags | DEFAULT
elif type_name == "record":
flags = flags | RECORD
elif type_name == "records":
flags = flags | RECORDS
elif type_name == "ignore_empty":
if not item:
flags = flags | EMPTY
elif has_codec(type_name):
character_encoding = type_name
l = key.rfind(":")
if l < 0:
break
mo = search_type(key, l)
if mo:
l = mo.start(0)
else:
l = -1
# Filter out special names from form:
if key in isCGI_NAMEs or key.startswith("HTTP_"):
continue
# If the key is tainted, mark it so as well.
tainted_key = key
if "<" in key:
tainted_key = TaintedString(key)
if flags:
# skip over empty fields
if flags & EMPTY:
continue
# Split the key and its attribute
if flags & REC:
key = key.split(".")
key, attr = ".".join(key[:-1]), key[-1]
# Update the tainted_key if necessary
tainted_key = key
if "<" in key:
tainted_key = TaintedString(key)
# Attributes cannot hold a <.
if "<" in attr:
raise ValueError(
"%s is not a valid record attribute name"
% escape(attr, True)
)
# defer conversion
if flags & CONVERTED:
try:
if character_encoding:
# We have a string with a specified character
# encoding. This gets passed to the converter
# either as unicode, if it can handle it, or
# crunched back down to utf-8 if it can not.
if isinstance(item, binary_type):
item = text_type(item, character_encoding)
if hasattr(converter, "convert_unicode"):
item = converter.convert_unicode(item)
else:
item = converter(item.encode(default_encoding))
else:
item = converter(item)
# Flag potentially unsafe values
if converter_type in (
"string",
"required",
"text",
"ustring",
"utext",
):
if not isFileUpload and "<" in item:
tainted = TaintedString(item)
elif converter_type in ("tokens", "lines", "utokens", "ulines"):
is_tainted = 0
tainted = item[:]
for i in range(len(tainted)):
if "<" in tainted[i]:
is_tainted = 1
tainted[i] = TaintedString(tainted[i])
if not is_tainted:
tainted = None
except Exception:
if not item and not (flags & DEFAULT) and key in defaults:
item = defaults[key]
if flags & RECORD:
item = getattr(item, attr)
if flags & RECORDS:
item = getattr(item[-1], attr)
if tainted_key in tainteddefaults:
tainted = tainteddefaults[tainted_key]
if flags & RECORD:
tainted = getattr(tainted, attr)
if flags & RECORDS:
tainted = getattr(tainted[-1], attr)
else:
raise
elif not isFileUpload and "<" in item:
# Flag potentially unsafe values
tainted = TaintedString(item)
# If the key is tainted, we need to store stuff in the
# tainted dict as well, even if the value is safe.
if "<" in tainted_key and tainted is None:
tainted = item
# Determine which dictionary to use
if flags & DEFAULT:
mapping_object = defaults
tainted_mapping = tainteddefaults
else:
mapping_object = form
tainted_mapping = taintedform
# Insert in dictionary
if key in mapping_object:
if flags & RECORDS:
# Get the list and the last record
# in the list. reclist is mutable.
reclist = mapping_object[key]
x = reclist[-1]
if tainted:
# Store a tainted copy as well
if tainted_key not in tainted_mapping:
tainted_mapping[tainted_key] = deepcopy(reclist)
treclist = tainted_mapping[tainted_key]
lastrecord = treclist[-1]
if not hasattr(lastrecord, attr):
if flags & SEQUENCE:
tainted = [tainted]
setattr(lastrecord, attr, tainted)
else:
if flags & SEQUENCE:
getattr(lastrecord, attr).append(tainted)
else:
newrec = record()
setattr(newrec, attr, tainted)
treclist.append(newrec)
elif tainted_key in tainted_mapping:
# If we already put a tainted value into this
# recordset, we need to make sure the whole
# recordset is built.
treclist = tainted_mapping[tainted_key]
lastrecord = treclist[-1]
copyitem = item
if not hasattr(lastrecord, attr):
if flags & SEQUENCE:
copyitem = [copyitem]
setattr(lastrecord, attr, copyitem)
else:
if flags & SEQUENCE:
getattr(lastrecord, attr).append(copyitem)
else:
newrec = record()
setattr(newrec, attr, copyitem)
treclist.append(newrec)
if not hasattr(x, attr):
# If the attribute does not
# exist, setit
if flags & SEQUENCE:
item = [item]
setattr(x, attr, item)
else:
if flags & SEQUENCE:
# If the attribute is a
# sequence, append the item
# to the existing attribute
y = getattr(x, attr)
y.append(item)
setattr(x, attr, y)
else:
# Create a new record and add
# it to the list
n = record()
setattr(n, attr, item)
mapping_object[key].append(n)
elif flags & RECORD:
b = mapping_object[key]
if flags & SEQUENCE:
item = [item]
if not hasattr(b, attr):
# if it does not have the
# attribute, set it
setattr(b, attr, item)
else:
# it has the attribute so
# append the item to it
setattr(b, attr, getattr(b, attr) + item)
else:
# it is not a sequence so
# set the attribute
setattr(b, attr, item)
# Store a tainted copy as well if necessary
if tainted:
if tainted_key not in tainted_mapping:
tainted_mapping[tainted_key] = deepcopy(
mapping_object[key]
)
b = tainted_mapping[tainted_key]
if flags & SEQUENCE:
seq = getattr(b, attr, [])
seq.append(tainted)
setattr(b, attr, seq)
else:
setattr(b, attr, tainted)
elif tainted_key in tainted_mapping:
# If we already put a tainted value into this
# record, we need to make sure the whole record
# is built.
b = tainted_mapping[tainted_key]
if flags & SEQUENCE:
seq = getattr(b, attr, [])
seq.append(item)
setattr(b, attr, seq)
else:
setattr(b, attr, item)
else:
# it is not a record or list of records
found = mapping_object[key]
if tainted:
# Store a tainted version if necessary
if tainted_key not in tainted_mapping:
copied = deepcopy(found)
if isinstance(copied, list):
tainted_mapping[tainted_key] = copied
else:
tainted_mapping[tainted_key] = [copied]
tainted_mapping[tainted_key].append(tainted)
elif tainted_key in tainted_mapping:
# We may already have encountered a tainted
# value for this key, and the tainted_mapping
# needs to hold all the values.
tfound = tainted_mapping[tainted_key]
if isinstance(tfound, list):
tainted_mapping[tainted_key].append(item)
else:
tainted_mapping[tainted_key] = [tfound, item]
if isinstance(found, list):
found.append(item)
else:
found = [found, item]
mapping_object[key] = found
else:
# The dictionary does not have the key
if flags & RECORDS:
# Create a new record, set its attribute
# and put it in the dictionary as a list
a = record()
if flags & SEQUENCE:
item = [item]
setattr(a, attr, item)
mapping_object[key] = [a]
if tainted:
# Store a tainted copy if necessary
a = record()
if flags & SEQUENCE:
tainted = [tainted]
setattr(a, attr, tainted)
tainted_mapping[tainted_key] = [a]
elif flags & RECORD:
# Create a new record, set its attribute
# and put it in the dictionary
if flags & SEQUENCE:
item = [item]
r = mapping_object[key] = record()
setattr(r, attr, item)
if tainted:
# Store a tainted copy if necessary
if flags & SEQUENCE:
tainted = [tainted]
r = tainted_mapping[tainted_key] = record()
setattr(r, attr, tainted)
else:
# it is not a record or list of records
if flags & SEQUENCE:
item = [item]
mapping_object[key] = item
if tainted:
# Store a tainted copy if necessary
if flags & SEQUENCE:
tainted = [tainted]
tainted_mapping[tainted_key] = tainted
else:
# This branch is for case when no type was specified.
mapping_object = form
if not isFileUpload and "<" in item:
tainted = TaintedString(item)
elif "<" in key:
tainted = item
# Insert in dictionary
if key in mapping_object:
# it is not a record or list of records
found = mapping_object[key]
if tainted:
# Store a tainted version if necessary
if tainted_key not in taintedform:
copied = deepcopy(found)
if isinstance(copied, list):
taintedform[tainted_key] = copied
else:
taintedform[tainted_key] = [copied]
elif not isinstance(taintedform[tainted_key], list):
taintedform[tainted_key] = [taintedform[tainted_key]]
taintedform[tainted_key].append(tainted)
elif tainted_key in taintedform:
# We may already have encountered a tainted value
# for this key, and the taintedform needs to hold
# all the values.
tfound = taintedform[tainted_key]
if isinstance(tfound, list):
taintedform[tainted_key].append(item)
else:
taintedform[tainted_key] = [tfound, item]
if isinstance(found, list):
found.append(item)
else:
found = [found, item]
mapping_object[key] = found
else:
mapping_object[key] = item
if tainted:
taintedform[tainted_key] = tainted
# insert defaults into form dictionary
if defaults:
for key, value in defaults.items():
tainted_key = key
if "<" in key:
tainted_key = TaintedString(key)
if key not in form:
# if the form does not have the key,
# set the default
form[key] = value
if tainted_key in tainteddefaults:
taintedform[tainted_key] = tainteddefaults[tainted_key]
else:
# The form has the key
tdefault = tainteddefaults.get(tainted_key, value)
if isinstance(value, record):
# if the key is mapped to a record, get the
# record
r = form[key]
# First deal with tainted defaults.
if tainted_key in taintedform:
tainted = taintedform[tainted_key]
for k, v in tdefault.__dict__.items():
if not hasattr(tainted, k):
setattr(tainted, k, v)
elif tainted_key in tainteddefaults:
# Find out if any of the tainted default
# attributes needs to be copied over.
missesdefault = 0
for k, v in tdefault.__dict__.items():
if not hasattr(r, k):
missesdefault = 1
break
if missesdefault:
tainted = deepcopy(r)
for k, v in tdefault.__dict__.items():
if not hasattr(tainted, k):
setattr(tainted, k, v)
taintedform[tainted_key] = tainted
for k, v in value.__dict__.items():
# loop through the attributes and value
# in the default dictionary
if not hasattr(r, k):
# if the form dictionary doesn't have
# the attribute, set it to the default
setattr(r, k, v)
form[key] = r
elif isinstance(value, list):
# the default value is a list
l = form[key]
if not isinstance(l, list):
l = [l]
# First deal with tainted copies
if tainted_key in taintedform:
tainted = taintedform[tainted_key]
if not isinstance(tainted, list):
tainted = [tainted]
for defitem in tdefault:
if isinstance(defitem, record):
for k, v in defitem.__dict__.items():
for origitem in tainted:
if not hasattr(origitem, k):
setattr(origitem, k, v)
else:
if defitem not in tainted:
tainted.append(defitem)
taintedform[tainted_key] = tainted
elif tainted_key in tainteddefaults:
missesdefault = 0
for defitem in tdefault:
if isinstance(defitem, record):
try:
for k, v in defitem.__dict__.items():
for origitem in l:
if not hasattr(origitem, k):
missesdefault = 1
raise NestedLoopExit
except NestedLoopExit:
break
else:
if defitem not in l:
missesdefault = 1
break
if missesdefault:
tainted = deepcopy(l)
for defitem in tdefault:
if isinstance(defitem, record):
for k, v in defitem.__dict__.items():
for origitem in tainted:
if not hasattr(origitem, k):
setattr(origitem, k, v)
else:
if defitem not in tainted:
tainted.append(defitem)
taintedform[tainted_key] = tainted
for x in value:
# for each x in the list
if isinstance(x, record):
# if the x is a record
for k, v in x.__dict__.items():
# loop through each
# attribute and value in
# the record
for y in l:
# loop through each
# record in the form
# list if it doesn't
# have the attributes
# in the default
# dictionary, set them
if not hasattr(y, k):
setattr(y, k, v)
else:
# x is not a record
if x not in l:
l.append(x)
form[key] = l
else:
# The form has the key, the key is not mapped
# to a record or sequence so do nothing
pass
# Convert to tuples
if tuple_items:
for key in tuple_items.keys():
# Split the key and get the attr
k = key.split(".")
k, attr = ".".join(k[:-1]), k[-1]
a = attr
new = ""
# remove any type_names in the attr
while not a == "":
a = a.split(":")
a, new = ":".join(a[:-1]), a[-1]
attr = new
if k in form:
# If the form has the split key get its value
tainted_split_key = k
if "<" in k:
tainted_split_key = TaintedString(k)
item = form[k]
if isinstance(item, record):
# if the value is mapped to a record, check if it
# has the attribute, if it has it, convert it to
# a tuple and set it
if hasattr(item, attr):
value = tuple(getattr(item, attr))
setattr(item, attr, value)
else:
# It is mapped to a list of records
for x in item:
# loop through the records
if hasattr(x, attr):
# If the record has the attribute
# convert it to a tuple and set it
value = tuple(getattr(x, attr))
setattr(x, attr, value)
# Do the same for the tainted counterpart
if tainted_split_key in taintedform:
tainted = taintedform[tainted_split_key]
if isinstance(item, record):
seq = tuple(getattr(tainted, attr))
setattr(tainted, attr, seq)
else:
for trec in tainted:
if hasattr(trec, attr):
seq = getattr(trec, attr)
seq = tuple(seq)
setattr(trec, attr, seq)
else:
# the form does not have the split key
tainted_key = key
if "<" in key:
tainted_key = TaintedString(key)
if key in form:
# if it has the original key, get the item
# convert it to a tuple
item = form[key]
item = tuple(form[key])
form[key] = item
if tainted_key in taintedform:
tainted = tuple(taintedform[tainted_key])
taintedform[tainted_key] = tainted
if meth:
if "PATH_INFO" in environ:
path = environ["PATH_INFO"]
while path[-1:] == "/":
path = path[:-1]
else:
path = ""
other["PATH_INFO"] = path = "%s/%s" % (path, meth)
self._hacked_path = 1
|
def processInputs(
self,
# "static" variables that we want to be local for speed
SEQUENCE=1,
DEFAULT=2,
RECORD=4,
RECORDS=8,
REC=12, # RECORD | RECORDS
EMPTY=16,
CONVERTED=32,
hasattr=hasattr,
getattr=getattr,
setattr=setattr,
):
"""Process request inputs
We need to delay input parsing so that it is done under
publisher control for error handling purposes.
"""
response = self.response
environ = self.environ
method = environ.get("REQUEST_METHOD", "GET")
if method != "GET":
fp = self.stdin
else:
fp = None
form = self.form
other = self.other
taintedform = self.taintedform
# If 'QUERY_STRING' is not present in environ
# FieldStorage will try to get it from sys.argv[1]
# which is not what we need.
if "QUERY_STRING" not in environ:
environ["QUERY_STRING"] = ""
meth = None
fs = ZopeFieldStorage(fp=fp, environ=environ, keep_blank_values=1)
if not hasattr(fs, "list") or fs.list is None:
if "HTTP_SOAPACTION" in environ:
# Stash XML request for interpretation by a SOAP-aware view
other["SOAPXML"] = fs.value
elif (
xmlrpc is not None
and method == "POST"
and (
"content-type" in fs.headers
and "text/xml" in fs.headers["content-type"]
)
):
# Ye haaa, XML-RPC!
meth, self.args = xmlrpc.parse_input(fs.value)
response = xmlrpc.response(response)
other["RESPONSE"] = self.response = response
self.maybe_webdav_client = 0
else:
self._file = fs.file
else:
fslist = fs.list
tuple_items = {}
defaults = {}
tainteddefaults = {}
converter = None
for item in fslist:
isFileUpload = 0
key = item.name
if key is None:
continue
if (
hasattr(item, "file")
and hasattr(item, "filename")
and hasattr(item, "headers")
):
if item.file and (
item.filename is not None
# RFC 1867 says that all fields get a content-type.
# or 'content-type' in map(lower, item.headers.keys())
):
item = FileUpload(item)
isFileUpload = 1
else:
item = item.value
flags = 0
character_encoding = ""
# Variables for potentially unsafe values.
tainted = None
converter_type = None
# Loop through the different types and set
# the appropriate flags
# We'll search from the back to the front.
# We'll do the search in two steps. First, we'll
# do a string search, and then we'll check it with
# a re search.
l = key.rfind(":")
if l >= 0:
mo = search_type(key, l)
if mo:
l = mo.start(0)
else:
l = -1
while l >= 0:
type_name = key[l + 1 :]
key = key[:l]
c = get_converter(type_name, None)
if c is not None:
converter = c
converter_type = type_name
flags = flags | CONVERTED
elif type_name == "list":
flags = flags | SEQUENCE
elif type_name == "tuple":
tuple_items[key] = 1
flags = flags | SEQUENCE
elif type_name == "method" or type_name == "action":
if l:
meth = key
else:
meth = item
elif type_name == "default_method" or type_name == "default_action":
if not meth:
if l:
meth = key
else:
meth = item
elif type_name == "default":
flags = flags | DEFAULT
elif type_name == "record":
flags = flags | RECORD
elif type_name == "records":
flags = flags | RECORDS
elif type_name == "ignore_empty":
if not item:
flags = flags | EMPTY
elif has_codec(type_name):
character_encoding = type_name
l = key.rfind(":")
if l < 0:
break
mo = search_type(key, l)
if mo:
l = mo.start(0)
else:
l = -1
# Filter out special names from form:
if key in isCGI_NAMEs or key.startswith("HTTP_"):
continue
# If the key is tainted, mark it so as well.
tainted_key = key
if "<" in key:
tainted_key = TaintedString(key)
if flags:
# skip over empty fields
if flags & EMPTY:
continue
# Split the key and its attribute
if flags & REC:
key = key.split(".")
key, attr = ".".join(key[:-1]), key[-1]
# Update the tainted_key if necessary
tainted_key = key
if "<" in key:
tainted_key = TaintedString(key)
# Attributes cannot hold a <.
if "<" in attr:
raise ValueError(
"%s is not a valid record attribute name"
% escape(attr, True)
)
# defer conversion
if flags & CONVERTED:
try:
if character_encoding:
# We have a string with a specified character
# encoding. This gets passed to the converter
# either as unicode, if it can handle it, or
# crunched back down to utf-8 if it can not.
if isinstance(item, binary_type):
item = text_type(item, character_encoding)
if hasattr(converter, "convert_unicode"):
item = converter.convert_unicode(item)
else:
item = converter(item.encode(default_encoding))
else:
item = converter(item)
# Flag potentially unsafe values
if converter_type in (
"string",
"required",
"text",
"ustring",
"utext",
):
if not isFileUpload and "<" in item:
tainted = TaintedString(item)
elif converter_type in ("tokens", "lines", "utokens", "ulines"):
is_tainted = 0
tainted = item[:]
for i in range(len(tainted)):
if "<" in tainted[i]:
is_tainted = 1
tainted[i] = TaintedString(tainted[i])
if not is_tainted:
tainted = None
except Exception:
if not item and not (flags & DEFAULT) and key in defaults:
item = defaults[key]
if flags & RECORD:
item = getattr(item, attr)
if flags & RECORDS:
item = getattr(item[-1], attr)
if tainted_key in tainteddefaults:
tainted = tainteddefaults[tainted_key]
if flags & RECORD:
tainted = getattr(tainted, attr)
if flags & RECORDS:
tainted = getattr(tainted[-1], attr)
else:
raise
elif not isFileUpload and "<" in item:
# Flag potentially unsafe values
tainted = TaintedString(item)
# If the key is tainted, we need to store stuff in the
# tainted dict as well, even if the value is safe.
if "<" in tainted_key and tainted is None:
tainted = item
# Determine which dictionary to use
if flags & DEFAULT:
mapping_object = defaults
tainted_mapping = tainteddefaults
else:
mapping_object = form
tainted_mapping = taintedform
# Insert in dictionary
if key in mapping_object:
if flags & RECORDS:
# Get the list and the last record
# in the list. reclist is mutable.
reclist = mapping_object[key]
x = reclist[-1]
if tainted:
# Store a tainted copy as well
if tainted_key not in tainted_mapping:
tainted_mapping[tainted_key] = deepcopy(reclist)
treclist = tainted_mapping[tainted_key]
lastrecord = treclist[-1]
if not hasattr(lastrecord, attr):
if flags & SEQUENCE:
tainted = [tainted]
setattr(lastrecord, attr, tainted)
else:
if flags & SEQUENCE:
getattr(lastrecord, attr).append(tainted)
else:
newrec = record()
setattr(newrec, attr, tainted)
treclist.append(newrec)
elif tainted_key in tainted_mapping:
# If we already put a tainted value into this
# recordset, we need to make sure the whole
# recordset is built.
treclist = tainted_mapping[tainted_key]
lastrecord = treclist[-1]
copyitem = item
if not hasattr(lastrecord, attr):
if flags & SEQUENCE:
copyitem = [copyitem]
setattr(lastrecord, attr, copyitem)
else:
if flags & SEQUENCE:
getattr(lastrecord, attr).append(copyitem)
else:
newrec = record()
setattr(newrec, attr, copyitem)
treclist.append(newrec)
if not hasattr(x, attr):
# If the attribute does not
# exist, setit
if flags & SEQUENCE:
item = [item]
setattr(x, attr, item)
else:
if flags & SEQUENCE:
# If the attribute is a
# sequence, append the item
# to the existing attribute
y = getattr(x, attr)
y.append(item)
setattr(x, attr, y)
else:
# Create a new record and add
# it to the list
n = record()
setattr(n, attr, item)
mapping_object[key].append(n)
elif flags & RECORD:
b = mapping_object[key]
if flags & SEQUENCE:
item = [item]
if not hasattr(b, attr):
# if it does not have the
# attribute, set it
setattr(b, attr, item)
else:
# it has the attribute so
# append the item to it
setattr(b, attr, getattr(b, attr) + item)
else:
# it is not a sequence so
# set the attribute
setattr(b, attr, item)
# Store a tainted copy as well if necessary
if tainted:
if tainted_key not in tainted_mapping:
tainted_mapping[tainted_key] = deepcopy(
mapping_object[key]
)
b = tainted_mapping[tainted_key]
if flags & SEQUENCE:
seq = getattr(b, attr, [])
seq.append(tainted)
setattr(b, attr, seq)
else:
setattr(b, attr, tainted)
elif tainted_key in tainted_mapping:
# If we already put a tainted value into this
# record, we need to make sure the whole record
# is built.
b = tainted_mapping[tainted_key]
if flags & SEQUENCE:
seq = getattr(b, attr, [])
seq.append(item)
setattr(b, attr, seq)
else:
setattr(b, attr, item)
else:
# it is not a record or list of records
found = mapping_object[key]
if tainted:
# Store a tainted version if necessary
if tainted_key not in tainted_mapping:
copied = deepcopy(found)
if isinstance(copied, list):
tainted_mapping[tainted_key] = copied
else:
tainted_mapping[tainted_key] = [copied]
tainted_mapping[tainted_key].append(tainted)
elif tainted_key in tainted_mapping:
# We may already have encountered a tainted
# value for this key, and the tainted_mapping
# needs to hold all the values.
tfound = tainted_mapping[tainted_key]
if isinstance(tfound, list):
tainted_mapping[tainted_key].append(item)
else:
tainted_mapping[tainted_key] = [tfound, item]
if isinstance(found, list):
found.append(item)
else:
found = [found, item]
mapping_object[key] = found
else:
# The dictionary does not have the key
if flags & RECORDS:
# Create a new record, set its attribute
# and put it in the dictionary as a list
a = record()
if flags & SEQUENCE:
item = [item]
setattr(a, attr, item)
mapping_object[key] = [a]
if tainted:
# Store a tainted copy if necessary
a = record()
if flags & SEQUENCE:
tainted = [tainted]
setattr(a, attr, tainted)
tainted_mapping[tainted_key] = [a]
elif flags & RECORD:
# Create a new record, set its attribute
# and put it in the dictionary
if flags & SEQUENCE:
item = [item]
r = mapping_object[key] = record()
setattr(r, attr, item)
if tainted:
# Store a tainted copy if necessary
if flags & SEQUENCE:
tainted = [tainted]
r = tainted_mapping[tainted_key] = record()
setattr(r, attr, tainted)
else:
# it is not a record or list of records
if flags & SEQUENCE:
item = [item]
mapping_object[key] = item
if tainted:
# Store a tainted copy if necessary
if flags & SEQUENCE:
tainted = [tainted]
tainted_mapping[tainted_key] = tainted
else:
# This branch is for case when no type was specified.
mapping_object = form
if not isFileUpload and "<" in item:
tainted = TaintedString(item)
elif "<" in key:
tainted = item
# Insert in dictionary
if key in mapping_object:
# it is not a record or list of records
found = mapping_object[key]
if tainted:
# Store a tainted version if necessary
if tainted_key not in taintedform:
copied = deepcopy(found)
if isinstance(copied, list):
taintedform[tainted_key] = copied
else:
taintedform[tainted_key] = [copied]
elif not isinstance(taintedform[tainted_key], list):
taintedform[tainted_key] = [taintedform[tainted_key]]
taintedform[tainted_key].append(tainted)
elif tainted_key in taintedform:
# We may already have encountered a tainted value
# for this key, and the taintedform needs to hold
# all the values.
tfound = taintedform[tainted_key]
if isinstance(tfound, list):
taintedform[tainted_key].append(item)
else:
taintedform[tainted_key] = [tfound, item]
if isinstance(found, list):
found.append(item)
else:
found = [found, item]
mapping_object[key] = found
else:
mapping_object[key] = item
if tainted:
taintedform[tainted_key] = tainted
# insert defaults into form dictionary
if defaults:
for key, value in defaults.items():
tainted_key = key
if "<" in key:
tainted_key = TaintedString(key)
if key not in form:
# if the form does not have the key,
# set the default
form[key] = value
if tainted_key in tainteddefaults:
taintedform[tainted_key] = tainteddefaults[tainted_key]
else:
# The form has the key
tdefault = tainteddefaults.get(tainted_key, value)
if isinstance(value, record):
# if the key is mapped to a record, get the
# record
r = form[key]
# First deal with tainted defaults.
if tainted_key in taintedform:
tainted = taintedform[tainted_key]
for k, v in tdefault.__dict__.items():
if not hasattr(tainted, k):
setattr(tainted, k, v)
elif tainted_key in tainteddefaults:
# Find out if any of the tainted default
# attributes needs to be copied over.
missesdefault = 0
for k, v in tdefault.__dict__.items():
if not hasattr(r, k):
missesdefault = 1
break
if missesdefault:
tainted = deepcopy(r)
for k, v in tdefault.__dict__.items():
if not hasattr(tainted, k):
setattr(tainted, k, v)
taintedform[tainted_key] = tainted
for k, v in value.__dict__.items():
# loop through the attributes and value
# in the default dictionary
if not hasattr(r, k):
# if the form dictionary doesn't have
# the attribute, set it to the default
setattr(r, k, v)
form[key] = r
elif isinstance(value, list):
# the default value is a list
l = form[key]
if not isinstance(l, list):
l = [l]
# First deal with tainted copies
if tainted_key in taintedform:
tainted = taintedform[tainted_key]
if not isinstance(tainted, list):
tainted = [tainted]
for defitem in tdefault:
if isinstance(defitem, record):
for k, v in defitem.__dict__.items():
for origitem in tainted:
if not hasattr(origitem, k):
setattr(origitem, k, v)
else:
if defitem not in tainted:
tainted.append(defitem)
taintedform[tainted_key] = tainted
elif tainted_key in tainteddefaults:
missesdefault = 0
for defitem in tdefault:
if isinstance(defitem, record):
try:
for k, v in defitem.__dict__.items():
for origitem in l:
if not hasattr(origitem, k):
missesdefault = 1
raise NestedLoopExit
except NestedLoopExit:
break
else:
if defitem not in l:
missesdefault = 1
break
if missesdefault:
tainted = deepcopy(l)
for defitem in tdefault:
if isinstance(defitem, record):
for k, v in defitem.__dict__.items():
for origitem in tainted:
if not hasattr(origitem, k):
setattr(origitem, k, v)
else:
if defitem not in tainted:
tainted.append(defitem)
taintedform[tainted_key] = tainted
for x in value:
# for each x in the list
if isinstance(x, record):
# if the x is a record
for k, v in x.__dict__.items():
# loop through each
# attribute and value in
# the record
for y in l:
# loop through each
# record in the form
# list if it doesn't
# have the attributes
# in the default
# dictionary, set them
if not hasattr(y, k):
setattr(y, k, v)
else:
# x is not a record
if x not in l:
l.append(x)
form[key] = l
else:
# The form has the key, the key is not mapped
# to a record or sequence so do nothing
pass
# Convert to tuples
if tuple_items:
for key in tuple_items.keys():
# Split the key and get the attr
k = key.split(".")
k, attr = ".".join(k[:-1]), k[-1]
a = attr
new = ""
# remove any type_names in the attr
while not a == "":
a = a.split(":")
a, new = ":".join(a[:-1]), a[-1]
attr = new
if k in form:
# If the form has the split key get its value
tainted_split_key = k
if "<" in k:
tainted_split_key = TaintedString(k)
item = form[k]
if isinstance(item, record):
# if the value is mapped to a record, check if it
# has the attribute, if it has it, convert it to
# a tuple and set it
if hasattr(item, attr):
value = tuple(getattr(item, attr))
setattr(item, attr, value)
else:
# It is mapped to a list of records
for x in item:
# loop through the records
if hasattr(x, attr):
# If the record has the attribute
# convert it to a tuple and set it
value = tuple(getattr(x, attr))
setattr(x, attr, value)
# Do the same for the tainted counterpart
if tainted_split_key in taintedform:
tainted = taintedform[tainted_split_key]
if isinstance(item, record):
seq = tuple(getattr(tainted, attr))
setattr(tainted, attr, seq)
else:
for trec in tainted:
if hasattr(trec, attr):
seq = getattr(trec, attr)
seq = tuple(seq)
setattr(trec, attr, seq)
else:
# the form does not have the split key
tainted_key = key
if "<" in key:
tainted_key = TaintedString(key)
if key in form:
# if it has the original key, get the item
# convert it to a tuple
item = form[key]
item = tuple(form[key])
form[key] = item
if tainted_key in taintedform:
tainted = tuple(taintedform[tainted_key])
taintedform[tainted_key] = tainted
if meth:
if "PATH_INFO" in environ:
path = environ["PATH_INFO"]
while path[-1:] == "/":
path = path[:-1]
else:
path = ""
other["PATH_INFO"] = path = "%s/%s" % (path, meth)
self._hacked_path = 1
|
https://github.com/zopefoundation/Zope/issues/148
|
Traceback (most recent call last):
File "/tmp/z2py36/lib/python3.6/site-packages/waitress/channel.py", line 338, in service
task.service()
File "/tmp/z2py36/lib/python3.6/site-packages/waitress/task.py", line 169, in service
self.execute()
File "/tmp/z2py36/lib/python3.6/site-packages/waitress/task.py", line 399, in execute
app_iter = self.channel.server.application(env, start_response)
File "/tmp/z2py36/lib/python3.6/site-packages/ZPublisher/httpexceptions.py", line 33, in __call__
return self.application(environ, start_response)
File "/tmp/z2py36/lib/python3.6/site-packages/ZPublisher/WSGIPublisher.py", line 238, in publish_module
request, response, module_info, _publish=_publish)
File "/tmp/z2py36/lib/python3.6/site-packages/ZPublisher/WSGIPublisher.py", line 211, in _publish_response
reraise(exc.__class__, exc, sys.exc_info()[2])
File "/tmp/z2py36/lib/python3.6/site-packages/six.py", line 686, in reraise
raise value
File "/tmp/z2py36/lib/python3.6/site-packages/ZPublisher/WSGIPublisher.py", line 172, in _publish_response
response = _publish(request, module_info)
File "/tmp/z2py36/lib/python3.6/site-packages/ZPublisher/WSGIPublisher.py", line 162, in publish
bind=1)
File "/tmp/z2py36/lib/python3.6/site-packages/ZPublisher/mapply.py", line 85, in mapply
return debug(object, args, context)
File "/tmp/z2py36/lib/python3.6/site-packages/ZPublisher/WSGIPublisher.py", line 57, in call_object
return obj(*args)
File "/tmp/z2py36/lib/python3.6/site-packages/OFS/Image.py", line 83, in manage_addFile
newFile.manage_upload(file)
File "/tmp/z2py36/lib/python3.6/site-packages/OFS/Image.py", line 506, in manage_upload
data, size = self._read_data(file)
File "/tmp/z2py36/lib/python3.6/site-packages/OFS/Image.py", line 554, in _read_data
seek(0, 2)
File "/tmp/z2py36/lib/python3.6/tempfile.py", line 483, in func_wrapper
return func(*args, **kwargs)
ValueError: seek of closed file
|
ValueError
|
def run_feed(
self, feed=None, download=False, ignoreFirst=False, force=False, readout=True
):
"""Run the query for one URI and apply filters"""
self.shutdown = False
if not feed:
return "No such feed"
newlinks = []
new_downloads = []
# Preparations, get options
try:
feeds = config.get_rss()[feed]
except KeyError:
logging.error(T('Incorrect RSS feed description "%s"'), feed)
logging.info("Traceback: ", exc_info=True)
return T('Incorrect RSS feed description "%s"') % feed
uris = feeds.uri()
defCat = feeds.cat()
if not notdefault(defCat) or defCat not in sabnzbd.api.list_cats(default=False):
defCat = None
defPP = feeds.pp()
if not notdefault(defPP):
defPP = None
defScript = feeds.script()
if not notdefault(defScript):
defScript = None
defPrio = feeds.priority()
if not notdefault(defPrio):
defPrio = None
# Preparations, convert filters to regex's
regexes = []
reTypes = []
reCats = []
rePPs = []
rePrios = []
reScripts = []
reEnabled = []
for feed_filter in feeds.filters():
reCat = feed_filter[0]
if defCat in ("", "*"):
reCat = None
reCats.append(reCat)
rePPs.append(feed_filter[1])
reScripts.append(feed_filter[2])
reTypes.append(feed_filter[3])
if feed_filter[3] in ("<", ">", "F", "S"):
regexes.append(feed_filter[4])
else:
regexes.append(convert_filter(feed_filter[4]))
rePrios.append(feed_filter[5])
reEnabled.append(feed_filter[6] != "0")
regcount = len(regexes)
# Set first if this is the very first scan of this URI
first = (feed not in self.jobs) and ignoreFirst
# Add SABnzbd's custom User Agent
feedparser.USER_AGENT = "SABnzbd/%s" % sabnzbd.__version__
# Read the RSS feed
msg = ""
entries = []
if readout:
all_entries = []
for uri in uris:
# Reset parsing message for each feed
msg = ""
feed_parsed = {}
uri = uri.replace(" ", "%20").replace("feed://", "http://")
logging.debug("Running feedparser on %s", uri)
try:
feed_parsed = feedparser.parse(uri)
except Exception as feedparser_exc:
# Feedparser 5 would catch all errors, while 6 just throws them back at us
feed_parsed["bozo_exception"] = feedparser_exc
logging.debug("Finished parsing %s", uri)
status = feed_parsed.get("status", 999)
if status in (401, 402, 403):
msg = T("Do not have valid authentication for feed %s") % uri
elif 500 <= status <= 599:
msg = T(
"Server side error (server code %s); could not get %s on %s"
) % (status, feed, uri)
entries = feed_parsed.get("entries", [])
if not entries and "feed" in feed_parsed and "error" in feed_parsed["feed"]:
msg = T("Failed to retrieve RSS from %s: %s") % (
uri,
feed_parsed["feed"]["error"],
)
# Exception was thrown
if "bozo_exception" in feed_parsed and not entries:
msg = str(feed_parsed["bozo_exception"])
if "CERTIFICATE_VERIFY_FAILED" in msg:
msg = T(
"Server %s uses an untrusted HTTPS certificate"
) % get_base_url(uri)
msg += " - https://sabnzbd.org/certificate-errors"
elif (
"href" in feed_parsed
and feed_parsed["href"] != uri
and "login" in feed_parsed["href"]
):
# Redirect to login page!
msg = T("Do not have valid authentication for feed %s") % uri
else:
msg = T("Failed to retrieve RSS from %s: %s") % (uri, msg)
if msg:
# We need to escape any "%20" that could be in the warning due to the URL's
logging.warning_helpful(urllib.parse.unquote(msg))
elif not entries:
msg = T("RSS Feed %s was empty") % uri
logging.info(msg)
all_entries.extend(entries)
entries = all_entries
# In case of a new feed
if feed not in self.jobs:
self.jobs[feed] = {}
jobs = self.jobs[feed]
# Error in readout or now new readout
if readout:
if not entries:
return msg
else:
entries = jobs
# Filter out valid new links
for entry in entries:
if self.shutdown:
return
if readout:
try:
link, infourl, category, size, age, season, episode = _get_link(entry)
except (AttributeError, IndexError):
logging.info(T("Incompatible feed") + " " + uri)
logging.info("Traceback: ", exc_info=True)
return T("Incompatible feed")
title = entry.title
# If there's multiple feeds, remove the duplicates based on title and size
if len(uris) > 1:
skip_job = False
for job_link, job in jobs.items():
# Allow 5% size deviation because indexers might have small differences for same release
if (
job.get("title") == title
and link != job_link
and (job.get("size") * 0.95) < size < (job.get("size") * 1.05)
):
logging.info("Ignoring job %s from other feed", title)
skip_job = True
break
if skip_job:
continue
else:
link = entry
infourl = jobs[link].get("infourl", "")
category = jobs[link].get("orgcat", "")
if category in ("", "*"):
category = None
title = jobs[link].get("title", "")
size = jobs[link].get("size", 0)
age = jobs[link].get("age")
season = jobs[link].get("season", 0)
episode = jobs[link].get("episode", 0)
if link:
# Make sure spaces are quoted in the URL
link = link.strip().replace(" ", "%20")
newlinks.append(link)
if link in jobs:
jobstat = jobs[link].get("status", " ")[0]
else:
jobstat = "N"
if jobstat in "NGB" or (jobstat == "X" and readout):
# Match this title against all filters
logging.debug("Trying title %s", title)
result = False
myCat = defCat
myPP = defPP
myScript = defScript
myPrio = defPrio
n = 0
if ("F" in reTypes or "S" in reTypes) and (not season or not episode):
season, episode = sabnzbd.newsunpack.analyse_show(title)[1:3]
# Match against all filters until an positive or negative match
logging.debug("Size %s", size)
for n in range(regcount):
if reEnabled[n]:
if category and reTypes[n] == "C":
found = re.search(regexes[n], category)
if not found:
logging.debug("Filter rejected on rule %d", n)
result = False
break
elif (
reTypes[n] == "<" and size and from_units(regexes[n]) < size
):
# "Size at most" : too large
logging.debug("Filter rejected on rule %d", n)
result = False
break
elif (
reTypes[n] == ">" and size and from_units(regexes[n]) > size
):
# "Size at least" : too small
logging.debug("Filter rejected on rule %d", n)
result = False
break
elif reTypes[n] == "F" and not ep_match(
season, episode, regexes[n]
):
# "Starting from SxxEyy", too early episode
logging.debug("Filter requirement match on rule %d", n)
result = False
break
elif (
reTypes[n] == "S"
and season
and episode
and ep_match(season, episode, regexes[n], title)
):
logging.debug("Filter matched on rule %d", n)
result = True
break
else:
if regexes[n]:
found = re.search(regexes[n], title)
else:
found = False
if reTypes[n] == "M" and not found:
logging.debug("Filter rejected on rule %d", n)
result = False
break
if found and reTypes[n] == "A":
logging.debug("Filter matched on rule %d", n)
result = True
break
if found and reTypes[n] == "R":
logging.debug("Filter rejected on rule %d", n)
result = False
break
if len(reCats):
if not result and defCat:
# Apply Feed-category on non-matched items
myCat = defCat
elif result and notdefault(reCats[n]):
# Use the matched info
myCat = reCats[n]
elif category and not defCat:
# No result and no Feed-category
myCat = cat_convert(category)
if myCat:
myCat, catPP, catScript, catPrio = cat_to_opts(myCat)
else:
myCat = catPP = catScript = catPrio = None
if notdefault(rePPs[n]):
myPP = rePPs[n]
elif not (reCats[n] or category):
myPP = catPP
if notdefault(reScripts[n]):
myScript = reScripts[n]
elif not (notdefault(reCats[n]) or category):
myScript = catScript
if rePrios[n] not in (str(DEFAULT_PRIORITY), ""):
myPrio = rePrios[n]
elif not ((rePrios[n] != str(DEFAULT_PRIORITY)) or category):
myPrio = catPrio
if cfg.no_dupes() and self.check_duplicate(title):
if cfg.no_dupes() == 1:
# Dupe-detection: Discard
logging.info("Ignoring duplicate job %s", title)
continue
elif cfg.no_dupes() == 3:
# Dupe-detection: Fail
# We accept it so the Queue can send it to the History
logging.info("Found duplicate job %s", title)
else:
# Dupe-detection: Pause
myPrio = DUP_PRIORITY
act = download and not first
if link in jobs:
act = act and not jobs[link].get("status", "").endswith("*")
act = act or force
star = first or jobs[link].get("status", "").endswith("*")
else:
star = first
if result:
_HandleLink(
jobs,
link,
infourl,
title,
size,
age,
season,
episode,
"G",
category,
myCat,
myPP,
myScript,
act,
star,
priority=myPrio,
rule=n,
)
if act:
new_downloads.append(title)
else:
_HandleLink(
jobs,
link,
infourl,
title,
size,
age,
season,
episode,
"B",
category,
myCat,
myPP,
myScript,
False,
star,
priority=myPrio,
rule=n,
)
# Send email if wanted and not "forced"
if new_downloads and cfg.email_rss() and not force:
emailer.rss_mail(feed, new_downloads)
remove_obsolete(jobs, newlinks)
return msg
|
def run_feed(
self, feed=None, download=False, ignoreFirst=False, force=False, readout=True
):
"""Run the query for one URI and apply filters"""
self.shutdown = False
if not feed:
return "No such feed"
newlinks = []
new_downloads = []
# Preparations, get options
try:
feeds = config.get_rss()[feed]
except KeyError:
logging.error(T('Incorrect RSS feed description "%s"'), feed)
logging.info("Traceback: ", exc_info=True)
return T('Incorrect RSS feed description "%s"') % feed
uris = feeds.uri()
defCat = feeds.cat()
if not notdefault(defCat) or defCat not in sabnzbd.api.list_cats(default=False):
defCat = None
defPP = feeds.pp()
if not notdefault(defPP):
defPP = None
defScript = feeds.script()
if not notdefault(defScript):
defScript = None
defPrio = feeds.priority()
if not notdefault(defPrio):
defPrio = None
# Preparations, convert filters to regex's
regexes = []
reTypes = []
reCats = []
rePPs = []
rePrios = []
reScripts = []
reEnabled = []
for feed_filter in feeds.filters():
reCat = feed_filter[0]
if defCat in ("", "*"):
reCat = None
reCats.append(reCat)
rePPs.append(feed_filter[1])
reScripts.append(feed_filter[2])
reTypes.append(feed_filter[3])
if feed_filter[3] in ("<", ">", "F", "S"):
regexes.append(feed_filter[4])
else:
regexes.append(convert_filter(feed_filter[4]))
rePrios.append(feed_filter[5])
reEnabled.append(feed_filter[6] != "0")
regcount = len(regexes)
# Set first if this is the very first scan of this URI
first = (feed not in self.jobs) and ignoreFirst
# Add SABnzbd's custom User Agent
feedparser.USER_AGENT = "SABnzbd/%s" % sabnzbd.__version__
# Read the RSS feed
msg = ""
entries = []
if readout:
all_entries = []
for uri in uris:
msg = ""
feed_parsed = {}
uri = uri.replace(" ", "%20").replace("feed://", "http://")
logging.debug("Running feedparser on %s", uri)
try:
feed_parsed = feedparser.parse(uri)
except Exception as feedparser_exc:
# Feedparser 5 would catch all errors, while 6 just throws them back at us
feed_parsed["bozo_exception"] = feedparser_exc
logging.debug("Finished parsing %s", uri)
status = feed_parsed.get("status", 999)
if status in (401, 402, 403):
msg = T("Do not have valid authentication for feed %s") % uri
elif 500 <= status <= 599:
msg = T(
"Server side error (server code %s); could not get %s on %s"
) % (status, feed, uri)
entries = feed_parsed.get("entries", [])
if not entries and "feed" in feed_parsed and "error" in feed_parsed["feed"]:
msg = T("Failed to retrieve RSS from %s: %s") % (
uri,
feed_parsed["feed"]["error"],
)
# Exception was thrown
if "bozo_exception" in feed_parsed and not entries:
msg = str(feed_parsed["bozo_exception"])
if "CERTIFICATE_VERIFY_FAILED" in msg:
msg = T(
"Server %s uses an untrusted HTTPS certificate"
) % get_base_url(uri)
msg += " - https://sabnzbd.org/certificate-errors"
elif (
"href" in feed_parsed
and feed_parsed["href"] != uri
and "login" in feed_parsed["href"]
):
# Redirect to login page!
msg = T("Do not have valid authentication for feed %s") % uri
else:
msg = T("Failed to retrieve RSS from %s: %s") % (uri, msg)
if msg:
# We need to escape any "%20" that could be in the warning due to the URL's
logging.warning_helpful(urllib.parse.unquote(msg))
elif not entries:
msg = T("RSS Feed %s was empty") % uri
logging.info(msg)
all_entries.extend(entries)
entries = all_entries
# In case of a new feed
if feed not in self.jobs:
self.jobs[feed] = {}
jobs = self.jobs[feed]
# Error in readout or now new readout
if readout:
if not entries:
return msg
else:
entries = jobs
# Filter out valid new links
for entry in entries:
if self.shutdown:
return
if readout:
try:
link, infourl, category, size, age, season, episode = _get_link(entry)
except (AttributeError, IndexError):
logging.info(T("Incompatible feed") + " " + uri)
logging.info("Traceback: ", exc_info=True)
return T("Incompatible feed")
title = entry.title
# If there's multiple feeds, remove the duplicates based on title and size
if len(uris) > 1:
skip_job = False
for job_link, job in jobs.items():
# Allow 5% size deviation because indexers might have small differences for same release
if (
job.get("title") == title
and link != job_link
and (job.get("size") * 0.95) < size < (job.get("size") * 1.05)
):
logging.info("Ignoring job %s from other feed", title)
skip_job = True
break
if skip_job:
continue
else:
link = entry
infourl = jobs[link].get("infourl", "")
category = jobs[link].get("orgcat", "")
if category in ("", "*"):
category = None
title = jobs[link].get("title", "")
size = jobs[link].get("size", 0)
age = jobs[link].get("age")
season = jobs[link].get("season", 0)
episode = jobs[link].get("episode", 0)
if link:
# Make sure spaces are quoted in the URL
link = link.strip().replace(" ", "%20")
newlinks.append(link)
if link in jobs:
jobstat = jobs[link].get("status", " ")[0]
else:
jobstat = "N"
if jobstat in "NGB" or (jobstat == "X" and readout):
# Match this title against all filters
logging.debug("Trying title %s", title)
result = False
myCat = defCat
myPP = defPP
myScript = defScript
myPrio = defPrio
n = 0
if ("F" in reTypes or "S" in reTypes) and (not season or not episode):
season, episode = sabnzbd.newsunpack.analyse_show(title)[1:3]
# Match against all filters until an positive or negative match
logging.debug("Size %s", size)
for n in range(regcount):
if reEnabled[n]:
if category and reTypes[n] == "C":
found = re.search(regexes[n], category)
if not found:
logging.debug("Filter rejected on rule %d", n)
result = False
break
elif (
reTypes[n] == "<" and size and from_units(regexes[n]) < size
):
# "Size at most" : too large
logging.debug("Filter rejected on rule %d", n)
result = False
break
elif (
reTypes[n] == ">" and size and from_units(regexes[n]) > size
):
# "Size at least" : too small
logging.debug("Filter rejected on rule %d", n)
result = False
break
elif reTypes[n] == "F" and not ep_match(
season, episode, regexes[n]
):
# "Starting from SxxEyy", too early episode
logging.debug("Filter requirement match on rule %d", n)
result = False
break
elif (
reTypes[n] == "S"
and season
and episode
and ep_match(season, episode, regexes[n], title)
):
logging.debug("Filter matched on rule %d", n)
result = True
break
else:
if regexes[n]:
found = re.search(regexes[n], title)
else:
found = False
if reTypes[n] == "M" and not found:
logging.debug("Filter rejected on rule %d", n)
result = False
break
if found and reTypes[n] == "A":
logging.debug("Filter matched on rule %d", n)
result = True
break
if found and reTypes[n] == "R":
logging.debug("Filter rejected on rule %d", n)
result = False
break
if len(reCats):
if not result and defCat:
# Apply Feed-category on non-matched items
myCat = defCat
elif result and notdefault(reCats[n]):
# Use the matched info
myCat = reCats[n]
elif category and not defCat:
# No result and no Feed-category
myCat = cat_convert(category)
if myCat:
myCat, catPP, catScript, catPrio = cat_to_opts(myCat)
else:
myCat = catPP = catScript = catPrio = None
if notdefault(rePPs[n]):
myPP = rePPs[n]
elif not (reCats[n] or category):
myPP = catPP
if notdefault(reScripts[n]):
myScript = reScripts[n]
elif not (notdefault(reCats[n]) or category):
myScript = catScript
if rePrios[n] not in (str(DEFAULT_PRIORITY), ""):
myPrio = rePrios[n]
elif not ((rePrios[n] != str(DEFAULT_PRIORITY)) or category):
myPrio = catPrio
if cfg.no_dupes() and self.check_duplicate(title):
if cfg.no_dupes() == 1:
# Dupe-detection: Discard
logging.info("Ignoring duplicate job %s", title)
continue
elif cfg.no_dupes() == 3:
# Dupe-detection: Fail
# We accept it so the Queue can send it to the History
logging.info("Found duplicate job %s", title)
else:
# Dupe-detection: Pause
myPrio = DUP_PRIORITY
act = download and not first
if link in jobs:
act = act and not jobs[link].get("status", "").endswith("*")
act = act or force
star = first or jobs[link].get("status", "").endswith("*")
else:
star = first
if result:
_HandleLink(
jobs,
link,
infourl,
title,
size,
age,
season,
episode,
"G",
category,
myCat,
myPP,
myScript,
act,
star,
priority=myPrio,
rule=n,
)
if act:
new_downloads.append(title)
else:
_HandleLink(
jobs,
link,
infourl,
title,
size,
age,
season,
episode,
"B",
category,
myCat,
myPP,
myScript,
False,
star,
priority=myPrio,
rule=n,
)
# Send email if wanted and not "forced"
if new_downloads and cfg.email_rss() and not force:
emailer.rss_mail(feed, new_downloads)
remove_obsolete(jobs, newlinks)
return msg
|
https://github.com/sabnzbd/sabnzbd/issues/1634
|
500 Internal Server Error
The server encountered an unexpected condition which prevented it from fulfilling the request.
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/cherrypy/_cprequest.py", line 670, in respond
response.body = self.handler()
File "/usr/lib/python3/dist-packages/cherrypy/lib/encoding.py", line 220, in __call__
self.body = self.oldhandler(*args, **kwargs)
File "/usr/lib/python3/dist-packages/cherrypy/_cpdispatch.py", line 60, in __call__
return self.callable(*self.args, **self.kwargs)
File "/usr/share/sabnzbdplus/sabnzbd/interface.py", line 136, in internal_wrap
return wrap_func(*args, **kwargs)
File "/usr/share/sabnzbdplus/sabnzbd/interface.py", line 1780, in index
msg = sabnzbd.rss.run_feed(
File "/usr/share/sabnzbdplus/sabnzbd/rss.py", line 65, in run_feed
return __RSS.run_feed(feed, download, ignoreFirst, force=force, readout=readout)
File "/usr/share/sabnzbdplus/sabnzbd/decorators.py", line 36, in call_func
return f(*args, **kw)
File "/usr/share/sabnzbdplus/sabnzbd/rss.py", line 550, in run_feed
return msg
UnboundLocalError: local variable 'msg' referenced before assignment
Powered by CherryPy 8.9.1
|
UnboundLocalError
|
def run_feed(
self, feed=None, download=False, ignoreFirst=False, force=False, readout=True
):
"""Run the query for one URI and apply filters"""
self.shutdown = False
if not feed:
return "No such feed"
newlinks = []
new_downloads = []
# Preparations, get options
try:
feeds = config.get_rss()[feed]
except KeyError:
logging.error(T('Incorrect RSS feed description "%s"'), feed)
logging.info("Traceback: ", exc_info=True)
return T('Incorrect RSS feed description "%s"') % feed
uris = feeds.uri()
defCat = feeds.cat()
import sabnzbd.api
if not notdefault(defCat) or defCat not in sabnzbd.api.list_cats(default=False):
defCat = None
defPP = feeds.pp()
if not notdefault(defPP):
defPP = None
defScript = feeds.script()
if not notdefault(defScript):
defScript = None
defPrio = feeds.priority()
if not notdefault(defPrio):
defPrio = None
# Preparations, convert filters to regex's
regexes = []
reTypes = []
reCats = []
rePPs = []
rePrios = []
reScripts = []
reEnabled = []
for feed_filter in feeds.filters():
reCat = feed_filter[0]
if defCat in ("", "*"):
reCat = None
reCats.append(reCat)
rePPs.append(feed_filter[1])
reScripts.append(feed_filter[2])
reTypes.append(feed_filter[3])
if feed_filter[3] in ("<", ">", "F", "S"):
regexes.append(feed_filter[4])
else:
regexes.append(convert_filter(feed_filter[4]))
rePrios.append(feed_filter[5])
reEnabled.append(feed_filter[6] != "0")
regcount = len(regexes)
# Set first if this is the very first scan of this URI
first = (feed not in self.jobs) and ignoreFirst
# Add SABnzbd's custom User Agent
feedparser.USER_AGENT = "SABnzbd/%s" % sabnzbd.__version__
# Read the RSS feed
msg = ""
entries = []
if readout:
all_entries = []
for uri in uris:
# Reset parsing message for each feed
msg = ""
feed_parsed = {}
uri = uri.replace(" ", "%20").replace("feed://", "http://")
logging.debug("Running feedparser on %s", uri)
try:
feed_parsed = feedparser.parse(uri)
except Exception as feedparser_exc:
# Feedparser 5 would catch all errors, while 6 just throws them back at us
feed_parsed["bozo_exception"] = feedparser_exc
logging.debug("Finished parsing %s", uri)
status = feed_parsed.get("status", 999)
if status in (401, 402, 403):
msg = T("Do not have valid authentication for feed %s") % uri
elif 500 <= status <= 599:
msg = T(
"Server side error (server code %s); could not get %s on %s"
) % (status, feed, uri)
entries = feed_parsed.get("entries", [])
if not entries and "feed" in feed_parsed and "error" in feed_parsed["feed"]:
msg = T("Failed to retrieve RSS from %s: %s") % (
uri,
feed_parsed["feed"]["error"],
)
# Exception was thrown
if "bozo_exception" in feed_parsed and not entries:
msg = str(feed_parsed["bozo_exception"])
if "CERTIFICATE_VERIFY_FAILED" in msg:
msg = T(
"Server %s uses an untrusted HTTPS certificate"
) % get_base_url(uri)
msg += " - https://sabnzbd.org/certificate-errors"
elif (
"href" in feed_parsed
and feed_parsed["href"] != uri
and "login" in feed_parsed["href"]
):
# Redirect to login page!
msg = T("Do not have valid authentication for feed %s") % uri
else:
msg = T("Failed to retrieve RSS from %s: %s") % (uri, msg)
if msg:
# We need to escape any "%20" that could be in the warning due to the URL's
logging.warning_helpful(urllib.parse.unquote(msg))
elif not entries:
msg = T("RSS Feed %s was empty") % uri
logging.info(msg)
all_entries.extend(entries)
entries = all_entries
# In case of a new feed
if feed not in self.jobs:
self.jobs[feed] = {}
jobs = self.jobs[feed]
# Error in readout or now new readout
if readout:
if not entries:
return msg
else:
entries = jobs
# Filter out valid new links
for entry in entries:
if self.shutdown:
return
if readout:
try:
link, infourl, category, size, age, season, episode = _get_link(entry)
except (AttributeError, IndexError):
logging.info(T("Incompatible feed") + " " + uri)
logging.info("Traceback: ", exc_info=True)
return T("Incompatible feed")
title = entry.title
# If there's multiple feeds, remove the duplicates based on title and size
if len(uris) > 1:
skip_job = False
for job_link, job in jobs.items():
# Allow 5% size deviation because indexers might have small differences for same release
if (
job.get("title") == title
and link != job_link
and (job.get("size") * 0.95) < size < (job.get("size") * 1.05)
):
logging.info("Ignoring job %s from other feed", title)
skip_job = True
break
if skip_job:
continue
else:
link = entry
infourl = jobs[link].get("infourl", "")
category = jobs[link].get("orgcat", "")
if category in ("", "*"):
category = None
title = jobs[link].get("title", "")
size = jobs[link].get("size", 0)
age = jobs[link].get("age")
season = jobs[link].get("season", 0)
episode = jobs[link].get("episode", 0)
if link:
# Make sure spaces are quoted in the URL
link = link.strip().replace(" ", "%20")
newlinks.append(link)
if link in jobs:
jobstat = jobs[link].get("status", " ")[0]
else:
jobstat = "N"
if jobstat in "NGB" or (jobstat == "X" and readout):
# Match this title against all filters
logging.debug("Trying title %s", title)
result = False
myCat = defCat
myPP = defPP
myScript = defScript
myPrio = defPrio
n = 0
if ("F" in reTypes or "S" in reTypes) and (not season or not episode):
season, episode = sabnzbd.newsunpack.analyse_show(title)[1:3]
# Match against all filters until an positive or negative match
logging.debug("Size %s", size)
for n in range(regcount):
if reEnabled[n]:
if category and reTypes[n] == "C":
found = re.search(regexes[n], category)
if not found:
logging.debug("Filter rejected on rule %d", n)
result = False
break
elif (
reTypes[n] == "<" and size and from_units(regexes[n]) < size
):
# "Size at most" : too large
logging.debug("Filter rejected on rule %d", n)
result = False
break
elif (
reTypes[n] == ">" and size and from_units(regexes[n]) > size
):
# "Size at least" : too small
logging.debug("Filter rejected on rule %d", n)
result = False
break
elif reTypes[n] == "F" and not ep_match(
season, episode, regexes[n]
):
# "Starting from SxxEyy", too early episode
logging.debug("Filter requirement match on rule %d", n)
result = False
break
elif (
reTypes[n] == "S"
and season
and episode
and ep_match(season, episode, regexes[n], title)
):
logging.debug("Filter matched on rule %d", n)
result = True
break
else:
if regexes[n]:
found = re.search(regexes[n], title)
else:
found = False
if reTypes[n] == "M" and not found:
logging.debug("Filter rejected on rule %d", n)
result = False
break
if found and reTypes[n] == "A":
logging.debug("Filter matched on rule %d", n)
result = True
break
if found and reTypes[n] == "R":
logging.debug("Filter rejected on rule %d", n)
result = False
break
if len(reCats):
if not result and defCat:
# Apply Feed-category on non-matched items
myCat = defCat
elif result and notdefault(reCats[n]):
# Use the matched info
myCat = reCats[n]
elif category and not defCat:
# No result and no Feed-category
myCat = cat_convert(category)
if myCat:
myCat, catPP, catScript, catPrio = cat_to_opts(myCat)
else:
myCat = catPP = catScript = catPrio = None
if notdefault(rePPs[n]):
myPP = rePPs[n]
elif not (reCats[n] or category):
myPP = catPP
if notdefault(reScripts[n]):
myScript = reScripts[n]
elif not (notdefault(reCats[n]) or category):
myScript = catScript
if rePrios[n] not in (str(DEFAULT_PRIORITY), ""):
myPrio = rePrios[n]
elif not ((rePrios[n] != str(DEFAULT_PRIORITY)) or category):
myPrio = catPrio
if cfg.no_dupes() and self.check_duplicate(title):
if cfg.no_dupes() == 1:
# Dupe-detection: Discard
logging.info("Ignoring duplicate job %s", title)
continue
elif cfg.no_dupes() == 3:
# Dupe-detection: Fail
# We accept it so the Queue can send it to the History
logging.info("Found duplicate job %s", title)
else:
# Dupe-detection: Pause
myPrio = DUP_PRIORITY
act = download and not first
if link in jobs:
act = act and not jobs[link].get("status", "").endswith("*")
act = act or force
star = first or jobs[link].get("status", "").endswith("*")
else:
star = first
if result:
_HandleLink(
jobs,
link,
infourl,
title,
size,
age,
season,
episode,
"G",
category,
myCat,
myPP,
myScript,
act,
star,
priority=myPrio,
rule=n,
)
if act:
new_downloads.append(title)
else:
_HandleLink(
jobs,
link,
infourl,
title,
size,
age,
season,
episode,
"B",
category,
myCat,
myPP,
myScript,
False,
star,
priority=myPrio,
rule=n,
)
# Send email if wanted and not "forced"
if new_downloads and cfg.email_rss() and not force:
emailer.rss_mail(feed, new_downloads)
remove_obsolete(jobs, newlinks)
return msg
|
def run_feed(
self, feed=None, download=False, ignoreFirst=False, force=False, readout=True
):
"""Run the query for one URI and apply filters"""
self.shutdown = False
if not feed:
return "No such feed"
newlinks = []
new_downloads = []
# Preparations, get options
try:
feeds = config.get_rss()[feed]
except KeyError:
logging.error(T('Incorrect RSS feed description "%s"'), feed)
logging.info("Traceback: ", exc_info=True)
return T('Incorrect RSS feed description "%s"') % feed
uris = feeds.uri()
defCat = feeds.cat()
import sabnzbd.api
if not notdefault(defCat) or defCat not in sabnzbd.api.list_cats(default=False):
defCat = None
defPP = feeds.pp()
if not notdefault(defPP):
defPP = None
defScript = feeds.script()
if not notdefault(defScript):
defScript = None
defPrio = feeds.priority()
if not notdefault(defPrio):
defPrio = None
# Preparations, convert filters to regex's
regexes = []
reTypes = []
reCats = []
rePPs = []
rePrios = []
reScripts = []
reEnabled = []
for feed_filter in feeds.filters():
reCat = feed_filter[0]
if defCat in ("", "*"):
reCat = None
reCats.append(reCat)
rePPs.append(feed_filter[1])
reScripts.append(feed_filter[2])
reTypes.append(feed_filter[3])
if feed_filter[3] in ("<", ">", "F", "S"):
regexes.append(feed_filter[4])
else:
regexes.append(convert_filter(feed_filter[4]))
rePrios.append(feed_filter[5])
reEnabled.append(feed_filter[6] != "0")
regcount = len(regexes)
# Set first if this is the very first scan of this URI
first = (feed not in self.jobs) and ignoreFirst
# Add SABnzbd's custom User Agent
feedparser.USER_AGENT = "SABnzbd/%s" % sabnzbd.__version__
# Read the RSS feed
entries = []
if readout:
all_entries = []
for uri in uris:
msg = ""
feed_parsed = {}
uri = uri.replace(" ", "%20").replace("feed://", "http://")
logging.debug("Running feedparser on %s", uri)
try:
feed_parsed = feedparser.parse(uri)
except Exception as feedparser_exc:
# Feedparser 5 would catch all errors, while 6 just throws them back at us
feed_parsed["bozo_exception"] = feedparser_exc
logging.debug("Finished parsing %s", uri)
status = feed_parsed.get("status", 999)
if status in (401, 402, 403):
msg = T("Do not have valid authentication for feed %s") % uri
elif 500 <= status <= 599:
msg = T(
"Server side error (server code %s); could not get %s on %s"
) % (status, feed, uri)
entries = feed_parsed.get("entries", [])
if not entries and "feed" in feed_parsed and "error" in feed_parsed["feed"]:
msg = T("Failed to retrieve RSS from %s: %s") % (
uri,
feed_parsed["feed"]["error"],
)
# Exception was thrown
if "bozo_exception" in feed_parsed and not entries:
msg = str(feed_parsed["bozo_exception"])
if "CERTIFICATE_VERIFY_FAILED" in msg:
msg = T(
"Server %s uses an untrusted HTTPS certificate"
) % get_base_url(uri)
msg += " - https://sabnzbd.org/certificate-errors"
elif (
"href" in feed_parsed
and feed_parsed["href"] != uri
and "login" in feed_parsed["href"]
):
# Redirect to login page!
msg = T("Do not have valid authentication for feed %s") % uri
else:
msg = T("Failed to retrieve RSS from %s: %s") % (uri, msg)
if msg:
# We need to escape any "%20" that could be in the warning due to the URL's
logging.warning_helpful(urllib.parse.unquote(msg))
elif not entries:
msg = T("RSS Feed %s was empty") % uri
logging.info(msg)
all_entries.extend(entries)
entries = all_entries
# In case of a new feed
if feed not in self.jobs:
self.jobs[feed] = {}
jobs = self.jobs[feed]
# Error in readout or now new readout
if readout:
if not entries:
return msg
else:
entries = jobs
# Filter out valid new links
for entry in entries:
if self.shutdown:
return
if readout:
try:
link, infourl, category, size, age, season, episode = _get_link(entry)
except (AttributeError, IndexError):
logging.info(T("Incompatible feed") + " " + uri)
logging.info("Traceback: ", exc_info=True)
return T("Incompatible feed")
title = entry.title
# If there's multiple feeds, remove the duplicates based on title and size
if len(uris) > 1:
skip_job = False
for job_link, job in jobs.items():
# Allow 5% size deviation because indexers might have small differences for same release
if (
job.get("title") == title
and link != job_link
and (job.get("size") * 0.95) < size < (job.get("size") * 1.05)
):
logging.info("Ignoring job %s from other feed", title)
skip_job = True
break
if skip_job:
continue
else:
link = entry
infourl = jobs[link].get("infourl", "")
category = jobs[link].get("orgcat", "")
if category in ("", "*"):
category = None
title = jobs[link].get("title", "")
size = jobs[link].get("size", 0)
age = jobs[link].get("age")
season = jobs[link].get("season", 0)
episode = jobs[link].get("episode", 0)
if link:
# Make sure spaces are quoted in the URL
link = link.strip().replace(" ", "%20")
newlinks.append(link)
if link in jobs:
jobstat = jobs[link].get("status", " ")[0]
else:
jobstat = "N"
if jobstat in "NGB" or (jobstat == "X" and readout):
# Match this title against all filters
logging.debug("Trying title %s", title)
result = False
myCat = defCat
myPP = defPP
myScript = defScript
myPrio = defPrio
n = 0
if ("F" in reTypes or "S" in reTypes) and (not season or not episode):
season, episode = sabnzbd.newsunpack.analyse_show(title)[1:3]
# Match against all filters until an positive or negative match
logging.debug("Size %s", size)
for n in range(regcount):
if reEnabled[n]:
if category and reTypes[n] == "C":
found = re.search(regexes[n], category)
if not found:
logging.debug("Filter rejected on rule %d", n)
result = False
break
elif (
reTypes[n] == "<" and size and from_units(regexes[n]) < size
):
# "Size at most" : too large
logging.debug("Filter rejected on rule %d", n)
result = False
break
elif (
reTypes[n] == ">" and size and from_units(regexes[n]) > size
):
# "Size at least" : too small
logging.debug("Filter rejected on rule %d", n)
result = False
break
elif reTypes[n] == "F" and not ep_match(
season, episode, regexes[n]
):
# "Starting from SxxEyy", too early episode
logging.debug("Filter requirement match on rule %d", n)
result = False
break
elif (
reTypes[n] == "S"
and season
and episode
and ep_match(season, episode, regexes[n], title)
):
logging.debug("Filter matched on rule %d", n)
result = True
break
else:
if regexes[n]:
found = re.search(regexes[n], title)
else:
found = False
if reTypes[n] == "M" and not found:
logging.debug("Filter rejected on rule %d", n)
result = False
break
if found and reTypes[n] == "A":
logging.debug("Filter matched on rule %d", n)
result = True
break
if found and reTypes[n] == "R":
logging.debug("Filter rejected on rule %d", n)
result = False
break
if len(reCats):
if not result and defCat:
# Apply Feed-category on non-matched items
myCat = defCat
elif result and notdefault(reCats[n]):
# Use the matched info
myCat = reCats[n]
elif category and not defCat:
# No result and no Feed-category
myCat = cat_convert(category)
if myCat:
myCat, catPP, catScript, catPrio = cat_to_opts(myCat)
else:
myCat = catPP = catScript = catPrio = None
if notdefault(rePPs[n]):
myPP = rePPs[n]
elif not (reCats[n] or category):
myPP = catPP
if notdefault(reScripts[n]):
myScript = reScripts[n]
elif not (notdefault(reCats[n]) or category):
myScript = catScript
if rePrios[n] not in (str(DEFAULT_PRIORITY), ""):
myPrio = rePrios[n]
elif not ((rePrios[n] != str(DEFAULT_PRIORITY)) or category):
myPrio = catPrio
if cfg.no_dupes() and self.check_duplicate(title):
if cfg.no_dupes() == 1:
# Dupe-detection: Discard
logging.info("Ignoring duplicate job %s", title)
continue
elif cfg.no_dupes() == 3:
# Dupe-detection: Fail
# We accept it so the Queue can send it to the History
logging.info("Found duplicate job %s", title)
else:
# Dupe-detection: Pause
myPrio = DUP_PRIORITY
act = download and not first
if link in jobs:
act = act and not jobs[link].get("status", "").endswith("*")
act = act or force
star = first or jobs[link].get("status", "").endswith("*")
else:
star = first
if result:
_HandleLink(
jobs,
link,
infourl,
title,
size,
age,
season,
episode,
"G",
category,
myCat,
myPP,
myScript,
act,
star,
priority=myPrio,
rule=n,
)
if act:
new_downloads.append(title)
else:
_HandleLink(
jobs,
link,
infourl,
title,
size,
age,
season,
episode,
"B",
category,
myCat,
myPP,
myScript,
False,
star,
priority=myPrio,
rule=n,
)
# Send email if wanted and not "forced"
if new_downloads and cfg.email_rss() and not force:
emailer.rss_mail(feed, new_downloads)
remove_obsolete(jobs, newlinks)
return msg
|
https://github.com/sabnzbd/sabnzbd/issues/1634
|
500 Internal Server Error
The server encountered an unexpected condition which prevented it from fulfilling the request.
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/cherrypy/_cprequest.py", line 670, in respond
response.body = self.handler()
File "/usr/lib/python3/dist-packages/cherrypy/lib/encoding.py", line 220, in __call__
self.body = self.oldhandler(*args, **kwargs)
File "/usr/lib/python3/dist-packages/cherrypy/_cpdispatch.py", line 60, in __call__
return self.callable(*self.args, **self.kwargs)
File "/usr/share/sabnzbdplus/sabnzbd/interface.py", line 136, in internal_wrap
return wrap_func(*args, **kwargs)
File "/usr/share/sabnzbdplus/sabnzbd/interface.py", line 1780, in index
msg = sabnzbd.rss.run_feed(
File "/usr/share/sabnzbdplus/sabnzbd/rss.py", line 65, in run_feed
return __RSS.run_feed(feed, download, ignoreFirst, force=force, readout=readout)
File "/usr/share/sabnzbdplus/sabnzbd/decorators.py", line 36, in call_func
return f(*args, **kw)
File "/usr/share/sabnzbdplus/sabnzbd/rss.py", line 550, in run_feed
return msg
UnboundLocalError: local variable 'msg' referenced before assignment
Powered by CherryPy 8.9.1
|
UnboundLocalError
|
def sanitize_foldername(name):
"""Return foldername with dodgy chars converted to safe ones
Remove any leading and trailing dot and space characters
"""
if not name:
return name
illegal = CH_ILLEGAL + ':"'
legal = CH_LEGAL + "-'"
if sabnzbd.WIN32 or sabnzbd.cfg.sanitize_safe():
# Remove all bad Windows chars too
illegal += CH_ILLEGAL_WIN
legal += CH_LEGAL_WIN
repl = sabnzbd.cfg.replace_illegal()
lst = []
for ch in name.strip():
if ch in illegal:
if repl:
ch = legal[illegal.find(ch)]
lst.append(ch)
else:
lst.append(ch)
name = "".join(lst)
name = name.strip()
if sabnzbd.WIN32 or sabnzbd.cfg.sanitize_safe():
name = replace_win_devices(name)
if len(name) >= sabnzbd.cfg.max_foldername_length():
name = name[: sabnzbd.cfg.max_foldername_length()]
# And finally, make sure it doesn't end in a dot
if name != "." and name != "..":
name = name.rstrip(".")
if not name:
name = "unknown"
return name
|
def sanitize_foldername(name):
"""Return foldername with dodgy chars converted to safe ones
Remove any leading and trailing dot and space characters
"""
if not name:
return name
illegal = CH_ILLEGAL + ':"'
legal = CH_LEGAL + "-'"
if sabnzbd.WIN32 or sabnzbd.cfg.sanitize_safe():
# Remove all bad Windows chars too
illegal += CH_ILLEGAL_WIN
legal += CH_LEGAL_WIN
repl = sabnzbd.cfg.replace_illegal()
lst = []
for ch in name.strip():
if ch in illegal:
if repl:
ch = legal[illegal.find(ch)]
lst.append(ch)
else:
lst.append(ch)
name = "".join(lst)
name = name.strip()
if sabnzbd.WIN32 or sabnzbd.cfg.sanitize_safe():
name = replace_win_devices(name)
# And finally, make sure it doesn't end in a dot
if name != "." and name != "..":
name = name.rstrip(".")
if not name:
name = "unknown"
return name
|
https://github.com/sabnzbd/sabnzbd/issues/1597
|
/_UNPACK_[Golumpa] Kino's Journey - The Beautiful World (2017) - 07 (Kino no Tabi) [English Dub] [FuniDub 1080p x264 AAC] [MKV] [1EB4BBFB] [1+8] - [Golumpa] Kino's Journey - The Beautiful World (2017) - 07 (Kino no Tabi) [FuniDub 1080p x264 AAC] [1EB4BBFB].mkv y)
Traceback (most recent call last):
File "/usr/lib/python3.8/shutil.py", line 788, in move
os.rename(src, real_dst)
OSError: [Errno 36] Filename too long: "/downloads/nzbs/sabnzbd/incomplete/[Golumpa] Kino's Journey - The Beautiful World (2017) - 07 (Kino no Tabi) [English Dub] [FuniDub 1080p x264 AAC] [MKV] [1EB4BBFB] [1+8] - [Golumpa] Kino's Journey - The Beautiful World (2017) - 07 (Kino no Tabi) [FuniDub 1080p x264 AAC] [1EB4BBFB].mkv y/[Golumpa] Kino's Journey - The Beautiful World (2017) - 07 (Kino no Tabi) [FuniDub 1080p x264 AAC] [1EB4BBFB].mkv" -> "/downloads/nzbs/sabnzbd/complete/sonarrenglish/_UNPACK_[Golumpa] Kino's Journey - The Beautiful World (2017) - 07 (Kino no Tabi) [English Dub] [FuniDub 1080p x264 AAC] [MKV] [1EB4BBFB] [1+8] - [Golumpa] Kino's Journey - The Beautiful World (2017) - 07 (Kino no Tabi) [FuniDub 1080p x264 AAC] [1EB4BBFB].mkv y/[Golumpa] Kino's Journey - The Beautiful World (2017) - 07 (Kino no Tabi) [FuniDub 1080p x264 AAC] [1EB4BBFB].mkv"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/app/sabnzbd/filesystem.py", line 655, in move_to_path
renamer(path, new_path)
File "/app/sabnzbd/decorators.py", line 36, in call_func
return f(*args, **kw)
File "/app/sabnzbd/filesystem.py", line 776, in renamer
shutil.move(old, new)
File "/usr/lib/python3.8/shutil.py", line 802, in move
copy_function(src, real_dst)
File "/usr/lib/python3.8/shutil.py", line 432, in copy2
copyfile(src, dst, follow_symlinks=follow_symlinks)
File "/usr/lib/python3.8/shutil.py", line 261, in copyfile
with open(src, 'rb') as fsrc, open(dst, 'wb') as fdst:
OSError: [Errno 36] Filename too long: "/downloads/nzbs/sabnzbd/complete/sonarrenglish/_UNPACK_[Golumpa] Kino's Journey - The Beautiful World (2017) - 07 (Kino no Tabi) [English Dub] [FuniDub 1080p x264 AAC] [MKV] [1EB4BBFB] [1+8] - [Golumpa] Kino's Journey - The Beautiful World (2017) - 07 (Kino no Tabi) [FuniDub 1080p x264 AAC] [1EB4BBFB].mkv y/[Golumpa] Kino's Journey - The Beautiful World (2017) - 07 (Kino no Tabi) [FuniDub 1080p x264 AAC] [1EB4BBFB].mkv"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/app/sabnzbd/filesystem.py", line 576, in create_all_dirs
os.mkdir(path_part_combined)
OSError: [Errno 36] Filename too long: "/downloads/nzbs/sabnzbd/complete/sonarrenglish/_UNPACK_[Golumpa] Kino's Journey - The Beautiful World (2017) - 07 (Kino no Tabi) [English Dub] [FuniDub 1080p x264 AAC] [MKV] [1EB4BBFB] [1+8] - [Golumpa] Kino's Journey - The Beautiful World (2017) - 07 (Kino no Tabi) [FuniDub 1080p x264 AAC] [1EB4BBFB].mkv y"
|
OSError
|
def __init__(
self,
filename,
pp=None,
script=None,
nzb=None,
futuretype=False,
cat=None,
url=None,
priority=DEFAULT_PRIORITY,
nzbname=None,
status=Status.QUEUED,
nzo_info=None,
reuse=None,
dup_check=True,
):
TryList.__init__(self)
self.filename = filename # Original filename
if nzbname and nzb:
self.work_name = nzbname # Use nzbname if set and only for non-future slot
else:
self.work_name = filename
# For future-slots we keep the name given by URLGrabber
if nzb is None:
self.final_name = self.work_name = filename
else:
# Remove trailing .nzb and .par(2)
self.work_name = create_work_name(self.work_name)
# Extract password
self.work_name, self.password = scan_password(self.work_name)
if not self.work_name:
# In case only /password was entered for nzbname
self.work_name = filename
self.final_name = self.work_name
# Check for password also in filename
if not self.password:
_, self.password = scan_password(os.path.splitext(filename)[0])
# Determine category and find pp/script values based on input
# Later will be re-evaluated based on import steps
if pp is None:
r = u = d = None
else:
r, u, d = pp_to_opts(pp)
self.set_priority(priority) # Parse priority of input
self.repair = r # True if we want to repair this set
self.unpack = u # True if we want to unpack this set
self.delete = d # True if we want to delete this set
self.script = script # External script for this set
self.cat = cat # User-set category
# Information fields
self.url = url or filename
self.groups = []
self.avg_date = datetime.datetime(1970, 1, 1, 1, 0)
self.avg_stamp = 0.0 # Avg age in seconds (calculated from avg_age)
# Bookkeeping values
self.meta = {}
self.servercount = {} # Dict to keep bytes per server
self.created = False # dirprefixes + work_name created
self.direct_unpacker = None # Holds the DirectUnpacker instance
self.bytes = 0 # Original bytesize
self.bytes_par2 = 0 # Bytes available for repair
self.bytes_downloaded = 0 # Downloaded byte
self.bytes_tried = 0 # Which bytes did we try
self.bytes_missing = 0 # Bytes missing
self.bad_articles = 0 # How many bad (non-recoverable) articles
self.partable = {} # Holds one parfile-name for each set
self.extrapars = {} # Holds the extra parfile names for all sets
self.md5packs = {} # Holds the md5pack for each set (name: hash)
self.md5of16k = {} # Holds the md5s of the first-16k of all files in the NZB (hash: name)
self.files = [] # List of all NZFs
self.files_table = {} # Dictionary of NZFs indexed using NZF_ID
self.renames = {} # Dictionary of all renamed files
self.finished_files = [] # List of all finished NZFs
# The current status of the nzo eg:
# Queued, Downloading, Repairing, Unpacking, Failed, Complete
self.status = status
self.avg_bps_freq = 0
self.avg_bps_total = 0
self.first_articles = []
self.first_articles_count = 0
self.saved_articles = []
self.nzo_id = None
self.futuretype = futuretype
self.deleted = False
self.to_be_removed = False
self.parsed = False
self.duplicate = False
self.oversized = False
self.precheck = False
self.incomplete = False
self.unwanted_ext = 0
self.rating_filtered = 0
self.reuse = reuse
if self.status == Status.QUEUED and not reuse:
self.precheck = cfg.pre_check()
if self.precheck:
self.status = Status.CHECKING
# Store one line responses for filejoin/par2/unrar/unzip here for history display
self.action_line = ""
# Store the results from various filejoin/par2/unrar/unzip stages
self.unpack_info = {}
# Stores one line containing the last failure
self.fail_msg = ""
# Stores various info about the nzo to be
self.nzo_info = nzo_info or {}
# Temporary store for custom foldername - needs to be stored because of url fetching
self.custom_name = nzbname
self.next_save = None
self.save_timeout = None
self.encrypted = 0
self.url_wait = None
self.url_tries = 0
self.pp_active = False # Signals active post-processing (not saved)
self.md5sum = None
if nzb is None and not reuse:
# This is a slot for a future NZB, ready now
# It can also be a retry of a failed job with no extra NZB-file
return
# Apply conversion option to final folder
if cfg.replace_spaces():
logging.info("Replacing spaces with underscores in %s", self.final_name)
self.final_name = self.final_name.replace(" ", "_")
if cfg.replace_dots():
logging.info("Replacing dots with spaces in %s", self.final_name)
self.final_name = self.final_name.replace(".", " ")
# Check against identical checksum or series/season/episode
if (not reuse) and nzb and dup_check and priority != REPAIR_PRIORITY:
duplicate, series = self.has_duplicates()
else:
duplicate = series = 0
# Reuse the existing directory
if reuse and os.path.exists(reuse):
work_dir = long_path(reuse)
else:
# Determine "incomplete" folder and trim path on Windows to prevent long-path unrar errors
work_dir = long_path(os.path.join(cfg.download_dir.get_path(), self.work_name))
work_dir = trim_win_path(work_dir)
work_dir = get_unique_path(work_dir, create_dir=True)
set_permissions(work_dir)
# Always create the admin-directory, just to be sure
admin_dir = os.path.join(work_dir, JOB_ADMIN)
if not os.path.exists(admin_dir):
os.mkdir(admin_dir)
_, self.work_name = os.path.split(work_dir)
self.created = True
# When doing a retry or repair, remove old cache-files
if reuse:
remove_all(admin_dir, "SABnzbd_nz?_*", keep_folder=True)
remove_all(admin_dir, "SABnzbd_article_*", keep_folder=True)
if nzb and "<nzb" in nzb:
try:
sabnzbd.nzbparser.nzbfile_parser(nzb, self)
except Exception as err:
self.incomplete = True
logging.warning(
T("Invalid NZB file %s, skipping (reason=%s, line=%s)"),
filename,
err,
"1",
)
logging.info("Traceback: ", exc_info=True)
# Some people want to keep the broken files
if cfg.allow_incomplete_nzb():
self.pause()
else:
self.purge_data()
raise ValueError
sabnzbd.backup_nzb(filename, nzb)
sabnzbd.save_compressed(admin_dir, filename, nzb)
if not self.files and not reuse:
self.purge_data()
if self.url:
logging.warning(T("Empty NZB file %s") + " [%s]", filename, self.url)
else:
logging.warning(T("Empty NZB file %s"), filename)
raise ValueError
if cat is None:
for metacat in self.meta.get("category", ()):
metacat = cat_convert(metacat)
if metacat:
cat = metacat
break
if cat is None:
for grp in self.groups:
cat = cat_convert(grp)
if cat:
break
# Pickup backed-up attributes when re-using
if reuse:
cat, pp, script, priority = self.load_attribs()
# Determine category and find pp/script values
self.cat, pp_tmp, self.script, priority = cat_to_opts(cat, pp, script, priority)
self.set_priority(priority)
self.repair, self.unpack, self.delete = pp_to_opts(pp_tmp)
# Run user pre-queue script if set and valid
if not reuse and make_script_path(cfg.pre_script()):
# Call the script
accept, name, pp, cat_pp, script_pp, priority, group = (
sabnzbd.newsunpack.pre_queue(self, pp, cat)
)
# Accept or reject
accept = int_conv(accept)
if accept < 1:
self.purge_data()
raise TypeError
if accept == 2:
self.fail_msg = T("Pre-queue script marked job as failed")
# Process all options, only over-write if set by script
# Beware that cannot do "if priority/pp", because those can
# also have a valid value of 0, which shouldn't be ignored
if name:
self.set_final_name_and_scan_password(name)
try:
pp = int(pp)
except:
pp = None
if cat_pp:
cat = cat_pp
try:
priority = int(priority)
except:
priority = DEFAULT_PRIORITY
if script_pp:
script = script_pp
if group:
self.groups = [str(group)]
# Re-evaluate results from pre-queue script
self.cat, pp, self.script, priority = cat_to_opts(cat, pp, script, priority)
self.set_priority(priority)
self.repair, self.unpack, self.delete = pp_to_opts(pp)
else:
accept = 1
# Pause job when above size limit
limit = cfg.size_limit.get_int()
if not reuse and abs(limit) > 0.5 and self.bytes > limit:
logging.info("Job too large, forcing low prio and paused (%s)", self.final_name)
self.pause()
self.oversized = True
self.priority = LOW_PRIORITY
if duplicate and (
(not series and cfg.no_dupes() == 1) or (series and cfg.no_series_dupes() == 1)
):
if cfg.warn_dupl_jobs():
logging.warning(T('Ignoring duplicate NZB "%s"'), filename)
self.purge_data()
raise TypeError
if duplicate and (
(not series and cfg.no_dupes() == 3) or (series and cfg.no_series_dupes() == 3)
):
if cfg.warn_dupl_jobs():
logging.warning(T('Failing duplicate NZB "%s"'), filename)
# Move to history, utilizing the same code as accept&fail from pre-queue script
self.fail_msg = T("Duplicate NZB")
accept = 2
duplicate = False
if duplicate or self.priority == DUP_PRIORITY:
if cfg.no_dupes() == 4 or cfg.no_series_dupes() == 4:
if cfg.warn_dupl_jobs():
logging.warning('%s: "%s"', T("Duplicate NZB"), filename)
self.duplicate = True
self.priority = NORMAL_PRIORITY
else:
if cfg.warn_dupl_jobs():
logging.warning(T('Pausing duplicate NZB "%s"'), filename)
self.duplicate = True
self.pause()
self.priority = NORMAL_PRIORITY
# Check if there is any unwanted extension in plain sight in the NZB itself
for nzf in self.files:
if (
cfg.action_on_unwanted_extensions() >= 1
and get_ext(nzf.filename).replace(".", "") in cfg.unwanted_extensions()
):
# ... we found an unwanted extension
logging.warning(
T("Unwanted Extension in file %s (%s)"), nzf.filename, self.final_name
)
# Pause, or Abort:
if cfg.action_on_unwanted_extensions() == 1:
logging.debug("Unwanted extension ... pausing")
self.unwanted_ext = 1
self.pause()
if cfg.action_on_unwanted_extensions() == 2:
logging.debug("Unwanted extension ... aborting")
self.fail_msg = T("Aborted, unwanted extension detected")
accept = 2
if self.priority == PAUSED_PRIORITY:
self.pause()
self.priority = NORMAL_PRIORITY
if reuse:
self.check_existing_files(work_dir)
if cfg.auto_sort():
self.files.sort(key=functools.cmp_to_key(nzf_cmp_date))
else:
self.files.sort(key=functools.cmp_to_key(nzf_cmp_name))
# Copy meta fields to nzo_info, if not already set
for kw in self.meta:
if not self.nzo_info.get(kw):
self.nzo_info[kw] = self.meta[kw][0]
# Show first meta-password (if any), when there's no explicit password
if not self.password and self.meta.get("password"):
self.password = self.meta.get("password", [None])[0]
# Set nzo save-delay to minimum 120 seconds
self.save_timeout = max(120, min(6.0 * self.bytes / GIGI, 300.0))
# In case pre-queue script or duplicate check want to move
# to history we first need an nzo_id by entering the NzbQueue
if accept == 2:
self.deleted = True
self.status = Status.FAILED
sabnzbd.NzbQueue.do.add(self, quiet=True)
sabnzbd.NzbQueue.do.end_job(self)
# Raise error, so it's not added
raise TypeError
|
def __init__(
self,
filename,
pp=None,
script=None,
nzb=None,
futuretype=False,
cat=None,
url=None,
priority=DEFAULT_PRIORITY,
nzbname=None,
status=Status.QUEUED,
nzo_info=None,
reuse=None,
dup_check=True,
):
TryList.__init__(self)
self.filename = filename # Original filename
if nzbname and nzb:
self.work_name = nzbname # Use nzbname if set and only for non-future slot
else:
self.work_name = filename
# For future-slots we keep the name given by URLGrabber
if nzb is None:
self.final_name = self.work_name = filename
else:
# Remove trailing .nzb and .par(2)
self.work_name = create_work_name(self.work_name)
# Extract password
self.work_name, self.password = scan_password(self.work_name)
if not self.work_name:
# In case only /password was entered for nzbname
self.work_name = filename
self.final_name = self.work_name
# Check for password also in filename
if not self.password:
_, self.password = scan_password(os.path.splitext(filename)[0])
# Determine category and find pp/script values based on input
# Later will be re-evaluated based on import steps
if pp is None:
r = u = d = None
else:
r, u, d = pp_to_opts(pp)
self.set_priority(priority) # Parse priority of input
self.repair = r # True if we want to repair this set
self.unpack = u # True if we want to unpack this set
self.delete = d # True if we want to delete this set
self.script = script # External script for this set
self.cat = cat # User-set category
# Information fields
self.url = url or filename
self.groups = []
self.avg_date = datetime.datetime(1970, 1, 1, 1, 0)
self.avg_stamp = 0.0 # Avg age in seconds (calculated from avg_age)
# Bookkeeping values
self.meta = {}
self.servercount = {} # Dict to keep bytes per server
self.created = False # dirprefixes + work_name created
self.direct_unpacker = None # Holds the DirectUnpacker instance
self.bytes = 0 # Original bytesize
self.bytes_par2 = 0 # Bytes available for repair
self.bytes_downloaded = 0 # Downloaded byte
self.bytes_tried = 0 # Which bytes did we try
self.bytes_missing = 0 # Bytes missing
self.bad_articles = 0 # How many bad (non-recoverable) articles
self.partable = {} # Holds one parfile-name for each set
self.extrapars = {} # Holds the extra parfile names for all sets
self.md5packs = {} # Holds the md5pack for each set (name: hash)
self.md5of16k = {} # Holds the md5s of the first-16k of all files in the NZB (hash: name)
self.files = [] # List of all NZFs
self.files_table = {} # Dictionary of NZFs indexed using NZF_ID
self.renames = {} # Dictionary of all renamed files
self.finished_files = [] # List of all finished NZFs
# The current status of the nzo eg:
# Queued, Downloading, Repairing, Unpacking, Failed, Complete
self.status = status
self.avg_bps_freq = 0
self.avg_bps_total = 0
self.first_articles = []
self.first_articles_count = 0
self.saved_articles = []
self.nzo_id = None
self.futuretype = futuretype
self.deleted = False
self.to_be_removed = False
self.parsed = False
self.duplicate = False
self.oversized = False
self.precheck = False
self.incomplete = False
self.unwanted_ext = 0
self.rating_filtered = 0
self.reuse = reuse
if self.status == Status.QUEUED and not reuse:
self.precheck = cfg.pre_check()
if self.precheck:
self.status = Status.CHECKING
# Store one line responses for filejoin/par2/unrar/unzip here for history display
self.action_line = ""
# Store the results from various filejoin/par2/unrar/unzip stages
self.unpack_info = {}
# Stores one line containing the last failure
self.fail_msg = ""
# Stores various info about the nzo to be
self.nzo_info = nzo_info or {}
# Temporary store for custom foldername - needs to be stored because of url fetching
self.custom_name = nzbname
self.next_save = None
self.save_timeout = None
self.encrypted = 0
self.url_wait = None
self.url_tries = 0
self.pp_active = False # Signals active post-processing (not saved)
self.md5sum = None
if nzb is None and not reuse:
# This is a slot for a future NZB, ready now
# It can also be a retry of a failed job with no extra NZB-file
return
# Apply conversion option to final folder
if cfg.replace_spaces():
logging.info("Replacing spaces with underscores in %s", self.final_name)
self.final_name = self.final_name.replace(" ", "_")
if cfg.replace_dots():
logging.info("Replacing dots with spaces in %s", self.final_name)
self.final_name = self.final_name.replace(".", " ")
# Check against identical checksum or series/season/episode
if (not reuse) and nzb and dup_check and priority != REPAIR_PRIORITY:
duplicate, series = self.has_duplicates()
else:
duplicate = series = 0
# Reuse the existing directory
if reuse and os.path.exists(reuse):
work_dir = long_path(reuse)
else:
# Determine "incomplete" folder and trim path on Windows to prevent long-path unrar errors
work_dir = long_path(os.path.join(cfg.download_dir.get_path(), self.work_name))
work_dir = trim_win_path(work_dir)
work_dir = get_unique_path(work_dir, create_dir=True)
set_permissions(work_dir)
# Always create the admin-directory, just to be sure
admin_dir = os.path.join(work_dir, JOB_ADMIN)
if not os.path.exists(admin_dir):
os.mkdir(admin_dir)
_, self.work_name = os.path.split(work_dir)
self.created = True
# When doing a retry or repair, remove old cache-files
if reuse:
remove_all(admin_dir, "SABnzbd_nz?_*", keep_folder=True)
remove_all(admin_dir, "SABnzbd_article_*", keep_folder=True)
if nzb and "<nzb" in nzb:
try:
sabnzbd.nzbparser.nzbfile_parser(nzb, self)
except Exception as err:
self.incomplete = True
logging.warning(
T("Invalid NZB file %s, skipping (reason=%s, line=%s)"),
filename,
err,
"1",
)
logging.info("Traceback: ", exc_info=True)
# Some people want to keep the broken files
if cfg.allow_incomplete_nzb():
self.pause()
else:
self.purge_data()
raise ValueError
sabnzbd.backup_nzb(filename, nzb)
sabnzbd.save_compressed(admin_dir, filename, nzb)
if not self.files and not reuse:
self.purge_data()
if self.url:
logging.warning(T("Empty NZB file %s") + " [%s]", filename, self.url)
else:
logging.warning(T("Empty NZB file %s"), filename)
raise ValueError
if cat is None:
for metacat in self.meta.get("category", ()):
metacat = cat_convert(metacat)
if metacat:
cat = metacat
break
if cat is None:
for grp in self.groups:
cat = cat_convert(grp)
if cat:
break
# Pickup backed-up attributes when re-using
if reuse:
cat, pp, script, priority, name, password, self.url = get_attrib_file(
self.workpath, 7
)
if name:
self.final_name = name
if password:
self.password = password
# Determine category and find pp/script values
self.cat, pp_tmp, self.script, priority = cat_to_opts(cat, pp, script, priority)
self.set_priority(priority)
self.repair, self.unpack, self.delete = pp_to_opts(pp_tmp)
# Run user pre-queue script if set and valid
if not reuse and make_script_path(cfg.pre_script()):
# Call the script
accept, name, pp, cat_pp, script_pp, priority, group = (
sabnzbd.newsunpack.pre_queue(self, pp, cat)
)
# Accept or reject
accept = int_conv(accept)
if accept < 1:
self.purge_data()
raise TypeError
if accept == 2:
self.fail_msg = T("Pre-queue script marked job as failed")
# Process all options, only over-write if set by script
# Beware that cannot do "if priority/pp", because those can
# also have a valid value of 0, which shouldn't be ignored
if name:
self.set_final_name_and_scan_password(name)
try:
pp = int(pp)
except:
pp = None
if cat_pp:
cat = cat_pp
try:
priority = int(priority)
except:
priority = DEFAULT_PRIORITY
if script_pp:
script = script_pp
if group:
self.groups = [str(group)]
# Re-evaluate results from pre-queue script
self.cat, pp, self.script, priority = cat_to_opts(cat, pp, script, priority)
self.set_priority(priority)
self.repair, self.unpack, self.delete = pp_to_opts(pp)
else:
accept = 1
# Pause job when above size limit
limit = cfg.size_limit.get_int()
if not reuse and abs(limit) > 0.5 and self.bytes > limit:
logging.info("Job too large, forcing low prio and paused (%s)", self.final_name)
self.pause()
self.oversized = True
self.priority = LOW_PRIORITY
if duplicate and (
(not series and cfg.no_dupes() == 1) or (series and cfg.no_series_dupes() == 1)
):
if cfg.warn_dupl_jobs():
logging.warning(T('Ignoring duplicate NZB "%s"'), filename)
self.purge_data()
raise TypeError
if duplicate and (
(not series and cfg.no_dupes() == 3) or (series and cfg.no_series_dupes() == 3)
):
if cfg.warn_dupl_jobs():
logging.warning(T('Failing duplicate NZB "%s"'), filename)
# Move to history, utilizing the same code as accept&fail from pre-queue script
self.fail_msg = T("Duplicate NZB")
accept = 2
duplicate = False
if duplicate or self.priority == DUP_PRIORITY:
if cfg.no_dupes() == 4 or cfg.no_series_dupes() == 4:
if cfg.warn_dupl_jobs():
logging.warning('%s: "%s"', T("Duplicate NZB"), filename)
self.duplicate = True
self.priority = NORMAL_PRIORITY
else:
if cfg.warn_dupl_jobs():
logging.warning(T('Pausing duplicate NZB "%s"'), filename)
self.duplicate = True
self.pause()
self.priority = NORMAL_PRIORITY
# Check if there is any unwanted extension in plain sight in the NZB itself
for nzf in self.files:
if (
cfg.action_on_unwanted_extensions() >= 1
and get_ext(nzf.filename).replace(".", "") in cfg.unwanted_extensions()
):
# ... we found an unwanted extension
logging.warning(
T("Unwanted Extension in file %s (%s)"), nzf.filename, self.final_name
)
# Pause, or Abort:
if cfg.action_on_unwanted_extensions() == 1:
logging.debug("Unwanted extension ... pausing")
self.unwanted_ext = 1
self.pause()
if cfg.action_on_unwanted_extensions() == 2:
logging.debug("Unwanted extension ... aborting")
self.fail_msg = T("Aborted, unwanted extension detected")
accept = 2
if self.priority == PAUSED_PRIORITY:
self.pause()
self.priority = NORMAL_PRIORITY
if reuse:
self.check_existing_files(work_dir)
if cfg.auto_sort():
self.files.sort(key=functools.cmp_to_key(nzf_cmp_date))
else:
self.files.sort(key=functools.cmp_to_key(nzf_cmp_name))
# Copy meta fields to nzo_info, if not already set
for kw in self.meta:
if not self.nzo_info.get(kw):
self.nzo_info[kw] = self.meta[kw][0]
# Show first meta-password (if any), when there's no explicit password
if not self.password and self.meta.get("password"):
self.password = self.meta.get("password", [None])[0]
# Set nzo save-delay to minimum 120 seconds
self.save_timeout = max(120, min(6.0 * self.bytes / GIGI, 300.0))
# In case pre-queue script or duplicate check want to move
# to history we first need an nzo_id by entering the NzbQueue
if accept == 2:
self.deleted = True
self.status = Status.FAILED
sabnzbd.NzbQueue.do.add(self, quiet=True)
sabnzbd.NzbQueue.do.end_job(self)
# Raise error, so it's not added
raise TypeError
|
https://github.com/sabnzbd/sabnzbd/issues/1575
|
2020-08-15 09:52:02,675::INFO::[postproc:367] Starting Post-Processing on 293894103 => Repair:True, Unpack:True, Delete:True, Script:None, Cat:*
2020-08-15 09:52:02,680::INFO::[postproc:722] Starting verification and repair of 293894103
2020-08-15 09:52:02,708::INFO::[postproc:794] Verification and repair finished for 293894103
2020-08-15 09:52:02,709::ERROR::[postproc:600] Post Processing Failed for 293894103 (see logfile)
2020-08-15 09:52:02,714::INFO::[postproc:601] Traceback:
Traceback (most recent call last):
File "/opt/sabnzbd/sabnzbd/postproc.py", line 415, in process_job
tmp_workdir_complete, workdir_complete, file_sorter, one_folder, marker_file = prepare_extraction_path(
File "/opt/sabnzbd/sabnzbd/postproc.py", line 693, in prepare_extraction_path
workdir_complete = get_unique_path(os.path.join(complete_dir, nzo.final_name), create_dir=True)
File "/usr/lib64/python3.8/posixpath.py", line 90, in join
genericpath._check_arg_types('join', a, *p)
File "/usr/lib64/python3.8/genericpath.py", line 152, in _check_arg_types
raise TypeError(f'{funcname}() argument must be str, bytes, or '
TypeError: join() argument must be str, bytes, or os.PathLike object, not 'int'
|
TypeError
|
def save_attribs(self):
"""Save specific attributes for Retry"""
attribs = {}
for attrib in NzoAttributeSaver:
attribs[attrib] = getattr(self, attrib)
logging.debug("Saving attributes %s for %s", attribs, self.final_name)
sabnzbd.save_data(attribs, ATTRIB_FILE, self.workpath)
|
def save_attribs(self):
set_attrib_file(
self.workpath,
(
self.cat,
self.pp,
self.script,
self.priority,
self.final_name,
self.password,
self.url,
),
)
|
https://github.com/sabnzbd/sabnzbd/issues/1575
|
2020-08-15 09:52:02,675::INFO::[postproc:367] Starting Post-Processing on 293894103 => Repair:True, Unpack:True, Delete:True, Script:None, Cat:*
2020-08-15 09:52:02,680::INFO::[postproc:722] Starting verification and repair of 293894103
2020-08-15 09:52:02,708::INFO::[postproc:794] Verification and repair finished for 293894103
2020-08-15 09:52:02,709::ERROR::[postproc:600] Post Processing Failed for 293894103 (see logfile)
2020-08-15 09:52:02,714::INFO::[postproc:601] Traceback:
Traceback (most recent call last):
File "/opt/sabnzbd/sabnzbd/postproc.py", line 415, in process_job
tmp_workdir_complete, workdir_complete, file_sorter, one_folder, marker_file = prepare_extraction_path(
File "/opt/sabnzbd/sabnzbd/postproc.py", line 693, in prepare_extraction_path
workdir_complete = get_unique_path(os.path.join(complete_dir, nzo.final_name), create_dir=True)
File "/usr/lib64/python3.8/posixpath.py", line 90, in join
genericpath._check_arg_types('join', a, *p)
File "/usr/lib64/python3.8/genericpath.py", line 152, in _check_arg_types
raise TypeError(f'{funcname}() argument must be str, bytes, or '
TypeError: join() argument must be str, bytes, or os.PathLike object, not 'int'
|
TypeError
|
def __init__(
self,
filename,
pp=None,
script=None,
nzb=None,
futuretype=False,
cat=None,
url=None,
priority=DEFAULT_PRIORITY,
nzbname=None,
status=Status.QUEUED,
nzo_info=None,
reuse=None,
dup_check=True,
):
TryList.__init__(self)
self.filename = filename # Original filename
if nzbname and nzb:
self.work_name = nzbname # Use nzbname if set and only for non-future slot
else:
self.work_name = filename
# For future-slots we keep the name given by URLGrabber
if nzb is None:
self.final_name = self.work_name = filename
else:
# Remove trailing .nzb and .par(2)
self.work_name = create_work_name(self.work_name)
# Extract password
self.work_name, self.password = scan_password(self.work_name)
if not self.work_name:
# In case only /password was entered for nzbname
self.work_name = filename
self.final_name = self.work_name
# Check for password also in filename
if not self.password:
_, self.password = scan_password(os.path.splitext(filename)[0])
# Determine category and find pp/script values based on input
# Later will be re-evaluated based on import steps
if pp is None:
r = u = d = None
else:
r, u, d = pp_to_opts(pp)
self.set_priority(priority) # Parse priority of input
self.repair = r # True if we want to repair this set
self.unpack = u # True if we want to unpack this set
self.delete = d # True if we want to delete this set
self.script = script # External script for this set
self.cat = cat # User-set category
# Information fields
self.url = url or filename
self.groups = []
self.avg_date = datetime.datetime(1970, 1, 1, 1, 0)
self.avg_stamp = 0.0 # Avg age in seconds (calculated from avg_age)
# Bookkeeping values
self.meta = {}
self.servercount = {} # Dict to keep bytes per server
self.created = False # dirprefixes + work_name created
self.direct_unpacker = None # Holds the DirectUnpacker instance
self.bytes = 0 # Original bytesize
self.bytes_downloaded = 0 # Downloaded byte
self.bytes_tried = 0 # Which bytes did we try
self.bytes_missing = 0 # Bytes missing
self.bad_articles = 0 # How many bad (non-recoverable) articles
self.partable = {} # Holds one parfile-name for each set
self.extrapars = {} # Holds the extra parfile names for all sets
self.md5packs = {} # Holds the md5pack for each set (name: hash)
self.md5of16k = {} # Holds the md5s of the first-16k of all files in the NZB (hash: name)
self.files = [] # List of all NZFs
self.files_table = {} # Dictionary of NZFs indexed using NZF_ID
self.renames = {} # Dictionary of all renamed files
self.finished_files = [] # List of all finished NZFs
# The current status of the nzo eg:
# Queued, Downloading, Repairing, Unpacking, Failed, Complete
self.status = status
self.avg_bps_freq = 0
self.avg_bps_total = 0
self.first_articles = []
self.first_articles_count = 0
self.saved_articles = []
self.nzo_id = None
self.futuretype = futuretype
self.deleted = False
self.to_be_removed = False
self.parsed = False
self.duplicate = False
self.oversized = False
self.precheck = False
self.incomplete = False
self.unwanted_ext = 0
self.rating_filtered = 0
self.reuse = reuse
if self.status == Status.QUEUED and not reuse:
self.precheck = cfg.pre_check()
if self.precheck:
self.status = Status.CHECKING
# Store one line responses for filejoin/par2/unrar/unzip here for history display
self.action_line = ""
# Store the results from various filejoin/par2/unrar/unzip stages
self.unpack_info = {}
# Stores one line containing the last failure
self.fail_msg = ""
# Stores various info about the nzo to be
self.nzo_info = nzo_info or {}
# Temporary store for custom foldername - needs to be stored because of url fetching
self.custom_name = nzbname
self.next_save = None
self.save_timeout = None
self.encrypted = 0
self.url_wait = None
self.url_tries = 0
self.pp_active = False # Signals active post-processing (not saved)
self.md5sum = None
if nzb is None and not reuse:
# This is a slot for a future NZB, ready now
# It can also be a retry of a failed job with no extra NZB-file
return
# Apply conversion option to final folder
if cfg.replace_spaces():
logging.info("Replacing spaces with underscores in %s", self.final_name)
self.final_name = self.final_name.replace(" ", "_")
if cfg.replace_dots():
logging.info("Replacing dots with spaces in %s", self.final_name)
self.final_name = self.final_name.replace(".", " ")
# Check against identical checksum or series/season/episode
if (not reuse) and nzb and dup_check and priority != REPAIR_PRIORITY:
duplicate, series = self.has_duplicates()
else:
duplicate = series = 0
# Reuse the existing directory
if reuse and os.path.exists(reuse):
work_dir = long_path(reuse)
else:
# Determine "incomplete" folder and trim path on Windows to prevent long-path unrar errors
work_dir = long_path(os.path.join(cfg.download_dir.get_path(), self.work_name))
work_dir = trim_win_path(work_dir)
work_dir = get_unique_path(work_dir, create_dir=True)
set_permissions(work_dir)
# Always create the admin-directory, just to be sure
admin_dir = os.path.join(work_dir, JOB_ADMIN)
if not os.path.exists(admin_dir):
os.mkdir(admin_dir)
_, self.work_name = os.path.split(work_dir)
self.created = True
# When doing a retry or repair, remove old cache-files
if reuse:
remove_all(admin_dir, "SABnzbd_nz?_*", keep_folder=True)
remove_all(admin_dir, "SABnzbd_article_*", keep_folder=True)
if nzb and "<nzb" in nzb:
try:
sabnzbd.nzbparser.nzbfile_parser(nzb, self)
except Exception as err:
self.incomplete = True
logging.warning(
T("Invalid NZB file %s, skipping (reason=%s, line=%s)"),
filename,
err,
"1",
)
logging.info("Traceback: ", exc_info=True)
# Some people want to keep the broken files
if cfg.allow_incomplete_nzb():
self.pause()
else:
self.purge_data()
raise ValueError
sabnzbd.backup_nzb(filename, nzb)
sabnzbd.save_compressed(admin_dir, filename, nzb)
if not self.files and not reuse:
self.purge_data()
if cfg.warn_empty_nzb():
mylog = logging.warning
else:
mylog = logging.info
if self.url:
mylog(T("Empty NZB file %s") + " [%s]", filename, self.url)
else:
mylog(T("Empty NZB file %s"), filename)
raise ValueError
if cat is None:
for metacat in self.meta.get("category", ()):
metacat = cat_convert(metacat)
if metacat:
cat = metacat
break
if cat is None:
for grp in self.groups:
cat = cat_convert(grp)
if cat:
break
# Pickup backed-up attributes when re-using
if reuse:
cat, pp, script, priority = self.load_attribs()
# Determine category and find pp/script values
self.cat, pp_tmp, self.script, priority = cat_to_opts(cat, pp, script, priority)
self.set_priority(priority)
self.repair, self.unpack, self.delete = pp_to_opts(pp_tmp)
# Run user pre-queue script if set and valid
if not reuse and make_script_path(cfg.pre_script()):
# Call the script
accept, name, pp, cat_pp, script_pp, priority, group = (
sabnzbd.newsunpack.pre_queue(self, pp, cat)
)
# Accept or reject
accept = int_conv(accept)
if accept < 1:
self.purge_data()
raise TypeError
if accept == 2:
self.fail_msg = T("Pre-queue script marked job as failed")
# Process all options, only over-write if set by script
# Beware that cannot do "if priority/pp", because those can
# also have a valid value of 0, which shouldn't be ignored
if name:
self.set_final_name_and_scan_password(name)
try:
pp = int(pp)
except:
pp = None
if cat_pp:
cat = cat_pp
try:
priority = int(priority)
except:
priority = DEFAULT_PRIORITY
if script_pp:
script = script_pp
if group:
self.groups = [str(group)]
# Re-evaluate results from pre-queue script
self.cat, pp, self.script, priority = cat_to_opts(cat, pp, script, priority)
self.set_priority(priority)
self.repair, self.unpack, self.delete = pp_to_opts(pp)
else:
accept = 1
# Pause job when above size limit
limit = cfg.size_limit.get_int()
if not reuse and abs(limit) > 0.5 and self.bytes > limit:
logging.info("Job too large, forcing low prio and paused (%s)", self.final_name)
self.pause()
self.oversized = True
self.priority = LOW_PRIORITY
if duplicate and (
(not series and cfg.no_dupes() == 1) or (series and cfg.no_series_dupes() == 1)
):
if cfg.warn_dupl_jobs():
logging.warning(T('Ignoring duplicate NZB "%s"'), filename)
self.purge_data()
raise TypeError
if duplicate and (
(not series and cfg.no_dupes() == 3) or (series and cfg.no_series_dupes() == 3)
):
if cfg.warn_dupl_jobs():
logging.warning(T('Failing duplicate NZB "%s"'), filename)
# Move to history, utilizing the same code as accept&fail from pre-queue script
self.fail_msg = T("Duplicate NZB")
accept = 2
duplicate = False
if duplicate or self.priority == DUP_PRIORITY:
if cfg.no_dupes() == 4 or cfg.no_series_dupes() == 4:
if cfg.warn_dupl_jobs():
logging.warning('%s: "%s"', T("Duplicate NZB"), filename)
self.duplicate = True
self.priority = NORMAL_PRIORITY
else:
if cfg.warn_dupl_jobs():
logging.warning(T('Pausing duplicate NZB "%s"'), filename)
self.duplicate = True
self.pause()
self.priority = NORMAL_PRIORITY
# Check if there is any unwanted extension in plain sight in the NZB itself
for nzf in self.files:
if (
cfg.action_on_unwanted_extensions() >= 1
and get_ext(nzf.filename).replace(".", "") in cfg.unwanted_extensions()
):
# ... we found an unwanted extension
logging.warning(
T("Unwanted Extension in file %s (%s)"), nzf.filename, self.final_name
)
# Pause, or Abort:
if cfg.action_on_unwanted_extensions() == 1:
logging.debug("Unwanted extension ... pausing")
self.unwanted_ext = 1
self.pause()
if cfg.action_on_unwanted_extensions() == 2:
logging.debug("Unwanted extension ... aborting")
self.fail_msg = T("Aborted, unwanted extension detected")
accept = 2
if self.priority == PAUSED_PRIORITY:
self.pause()
self.priority = NORMAL_PRIORITY
if reuse:
self.check_existing_files(work_dir)
if cfg.auto_sort():
self.files.sort(key=functools.cmp_to_key(nzf_cmp_date))
else:
self.files.sort(key=functools.cmp_to_key(nzf_cmp_name))
# Copy meta fields to nzo_info, if not already set
for kw in self.meta:
if not self.nzo_info.get(kw):
self.nzo_info[kw] = self.meta[kw][0]
# Show first meta-password (if any), when there's no explicit password
if not self.password and self.meta.get("password"):
self.password = self.meta.get("password", [None])[0]
# Set nzo save-delay to minimum 120 seconds
self.save_timeout = max(120, min(6.0 * float(self.bytes) / GIGI, 300.0))
# In case pre-queue script or duplicate check want to move
# to history we first need an nzo_id by entering the NzbQueue
if accept == 2:
self.deleted = True
self.status = Status.FAILED
sabnzbd.NzbQueue.do.add(self, quiet=True)
sabnzbd.NzbQueue.do.end_job(self)
# Raise error, so it's not added
raise TypeError
|
def __init__(
self,
filename,
pp=None,
script=None,
nzb=None,
futuretype=False,
cat=None,
url=None,
priority=DEFAULT_PRIORITY,
nzbname=None,
status=Status.QUEUED,
nzo_info=None,
reuse=None,
dup_check=True,
):
TryList.__init__(self)
self.filename = filename # Original filename
if nzbname and nzb:
self.work_name = nzbname # Use nzbname if set and only for non-future slot
else:
self.work_name = filename
# For future-slots we keep the name given by URLGrabber
if nzb is None:
self.final_name = self.work_name = filename
else:
# Remove trailing .nzb and .par(2)
self.work_name = create_work_name(self.work_name)
# Extract password
self.work_name, self.password = scan_password(self.work_name)
if not self.work_name:
# In case only /password was entered for nzbname
self.work_name = filename
self.final_name = self.work_name
# Check for password also in filename
if not self.password:
_, self.password = scan_password(os.path.splitext(filename)[0])
# Determine category and find pp/script values based on input
# Later will be re-evaluated based on import steps
if pp is None:
r = u = d = None
else:
r, u, d = pp_to_opts(pp)
self.set_priority(priority) # Parse priority of input
self.repair = r # True if we want to repair this set
self.unpack = u # True if we want to unpack this set
self.delete = d # True if we want to delete this set
self.script = script # External script for this set
self.cat = cat # User-set category
# Information fields
self.url = url or filename
self.groups = []
self.avg_date = datetime.datetime(1970, 1, 1, 1, 0)
self.avg_stamp = 0.0 # Avg age in seconds (calculated from avg_age)
# Bookkeeping values
self.meta = {}
self.servercount = {} # Dict to keep bytes per server
self.created = False # dirprefixes + work_name created
self.direct_unpacker = None # Holds the DirectUnpacker instance
self.bytes = 0 # Original bytesize
self.bytes_downloaded = 0 # Downloaded byte
self.bytes_tried = 0 # Which bytes did we try
self.bytes_missing = 0 # Bytes missing
self.bad_articles = 0 # How many bad (non-recoverable) articles
self.partable = {} # Holds one parfile-name for each set
self.extrapars = {} # Holds the extra parfile names for all sets
self.md5packs = {} # Holds the md5pack for each set (name: hash)
self.md5of16k = {} # Holds the md5s of the first-16k of all files in the NZB (hash: name)
self.files = [] # List of all NZFs
self.files_table = {} # Dictionary of NZFs indexed using NZF_ID
self.renames = {} # Dictionary of all renamed files
self.finished_files = [] # List of all finished NZFs
# The current status of the nzo eg:
# Queued, Downloading, Repairing, Unpacking, Failed, Complete
self.status = status
self.avg_bps_freq = 0
self.avg_bps_total = 0
self.first_articles = []
self.first_articles_count = 0
self.saved_articles = []
self.nzo_id = None
self.futuretype = futuretype
self.deleted = False
self.to_be_removed = False
self.parsed = False
self.duplicate = False
self.oversized = False
self.precheck = False
self.incomplete = False
self.unwanted_ext = 0
self.rating_filtered = 0
self.reuse = reuse
if self.status == Status.QUEUED and not reuse:
self.precheck = cfg.pre_check()
if self.precheck:
self.status = Status.CHECKING
# Store one line responses for filejoin/par2/unrar/unzip here for history display
self.action_line = ""
# Store the results from various filejoin/par2/unrar/unzip stages
self.unpack_info = {}
# Stores one line containing the last failure
self.fail_msg = ""
# Stores various info about the nzo to be
self.nzo_info = nzo_info or {}
# Temporary store for custom foldername - needs to be stored because of url fetching
self.custom_name = nzbname
self.next_save = None
self.save_timeout = None
self.encrypted = 0
self.url_wait = None
self.url_tries = 0
self.pp_active = False # Signals active post-processing (not saved)
self.md5sum = None
if nzb is None and not reuse:
# This is a slot for a future NZB, ready now
# It can also be a retry of a failed job with no extra NZB-file
return
# Apply conversion option to final folder
if cfg.replace_spaces():
logging.info("Replacing spaces with underscores in %s", self.final_name)
self.final_name = self.final_name.replace(" ", "_")
if cfg.replace_dots():
logging.info("Replacing dots with spaces in %s", self.final_name)
self.final_name = self.final_name.replace(".", " ")
# Check against identical checksum or series/season/episode
if (not reuse) and nzb and dup_check and priority != REPAIR_PRIORITY:
duplicate, series = self.has_duplicates()
else:
duplicate = series = 0
# Reuse the existing directory
if reuse and os.path.exists(reuse):
work_dir = long_path(reuse)
else:
# Determine "incomplete" folder and trim path on Windows to prevent long-path unrar errors
work_dir = long_path(os.path.join(cfg.download_dir.get_path(), self.work_name))
work_dir = trim_win_path(work_dir)
work_dir = get_unique_path(work_dir, create_dir=True)
set_permissions(work_dir)
# Always create the admin-directory, just to be sure
admin_dir = os.path.join(work_dir, JOB_ADMIN)
if not os.path.exists(admin_dir):
os.mkdir(admin_dir)
_, self.work_name = os.path.split(work_dir)
self.created = True
# When doing a retry or repair, remove old cache-files
if reuse:
remove_all(admin_dir, "SABnzbd_nz?_*", keep_folder=True)
remove_all(admin_dir, "SABnzbd_article_*", keep_folder=True)
if nzb and "<nzb" in nzb:
try:
sabnzbd.nzbparser.nzbfile_parser(nzb, self)
except Exception as err:
self.incomplete = True
logging.warning(
T("Invalid NZB file %s, skipping (reason=%s, line=%s)"),
filename,
err,
"1",
)
logging.info("Traceback: ", exc_info=True)
# Some people want to keep the broken files
if cfg.allow_incomplete_nzb():
self.pause()
else:
self.purge_data()
raise ValueError
sabnzbd.backup_nzb(filename, nzb)
sabnzbd.save_compressed(admin_dir, filename, nzb)
if not self.files and not reuse:
self.purge_data()
if cfg.warn_empty_nzb():
mylog = logging.warning
else:
mylog = logging.info
if self.url:
mylog(T("Empty NZB file %s") + " [%s]", filename, self.url)
else:
mylog(T("Empty NZB file %s"), filename)
raise ValueError
if cat is None:
for metacat in self.meta.get("category", ()):
metacat = cat_convert(metacat)
if metacat:
cat = metacat
break
if cat is None:
for grp in self.groups:
cat = cat_convert(grp)
if cat:
break
# Pickup backed-up attributes when re-using
if reuse:
cat, pp, script, priority, name, password, self.url = get_attrib_file(
self.workpath, 7
)
if name:
self.final_name = name
if password:
self.password = password
# Determine category and find pp/script values
self.cat, pp_tmp, self.script, priority = cat_to_opts(cat, pp, script, priority)
self.set_priority(priority)
self.repair, self.unpack, self.delete = pp_to_opts(pp_tmp)
# Run user pre-queue script if set and valid
if not reuse and make_script_path(cfg.pre_script()):
# Call the script
accept, name, pp, cat_pp, script_pp, priority, group = (
sabnzbd.newsunpack.pre_queue(self, pp, cat)
)
# Accept or reject
accept = int_conv(accept)
if accept < 1:
self.purge_data()
raise TypeError
if accept == 2:
self.fail_msg = T("Pre-queue script marked job as failed")
# Process all options, only over-write if set by script
# Beware that cannot do "if priority/pp", because those can
# also have a valid value of 0, which shouldn't be ignored
if name:
self.set_final_name_and_scan_password(name)
try:
pp = int(pp)
except:
pp = None
if cat_pp:
cat = cat_pp
try:
priority = int(priority)
except:
priority = DEFAULT_PRIORITY
if script_pp:
script = script_pp
if group:
self.groups = [str(group)]
# Re-evaluate results from pre-queue script
self.cat, pp, self.script, priority = cat_to_opts(cat, pp, script, priority)
self.set_priority(priority)
self.repair, self.unpack, self.delete = pp_to_opts(pp)
else:
accept = 1
# Pause job when above size limit
limit = cfg.size_limit.get_int()
if not reuse and abs(limit) > 0.5 and self.bytes > limit:
logging.info("Job too large, forcing low prio and paused (%s)", self.final_name)
self.pause()
self.oversized = True
self.priority = LOW_PRIORITY
if duplicate and (
(not series and cfg.no_dupes() == 1) or (series and cfg.no_series_dupes() == 1)
):
if cfg.warn_dupl_jobs():
logging.warning(T('Ignoring duplicate NZB "%s"'), filename)
self.purge_data()
raise TypeError
if duplicate and (
(not series and cfg.no_dupes() == 3) or (series and cfg.no_series_dupes() == 3)
):
if cfg.warn_dupl_jobs():
logging.warning(T('Failing duplicate NZB "%s"'), filename)
# Move to history, utilizing the same code as accept&fail from pre-queue script
self.fail_msg = T("Duplicate NZB")
accept = 2
duplicate = False
if duplicate or self.priority == DUP_PRIORITY:
if cfg.no_dupes() == 4 or cfg.no_series_dupes() == 4:
if cfg.warn_dupl_jobs():
logging.warning('%s: "%s"', T("Duplicate NZB"), filename)
self.duplicate = True
self.priority = NORMAL_PRIORITY
else:
if cfg.warn_dupl_jobs():
logging.warning(T('Pausing duplicate NZB "%s"'), filename)
self.duplicate = True
self.pause()
self.priority = NORMAL_PRIORITY
# Check if there is any unwanted extension in plain sight in the NZB itself
for nzf in self.files:
if (
cfg.action_on_unwanted_extensions() >= 1
and get_ext(nzf.filename).replace(".", "") in cfg.unwanted_extensions()
):
# ... we found an unwanted extension
logging.warning(
T("Unwanted Extension in file %s (%s)"), nzf.filename, self.final_name
)
# Pause, or Abort:
if cfg.action_on_unwanted_extensions() == 1:
logging.debug("Unwanted extension ... pausing")
self.unwanted_ext = 1
self.pause()
if cfg.action_on_unwanted_extensions() == 2:
logging.debug("Unwanted extension ... aborting")
self.fail_msg = T("Aborted, unwanted extension detected")
accept = 2
if self.priority == PAUSED_PRIORITY:
self.pause()
self.priority = NORMAL_PRIORITY
if reuse:
self.check_existing_files(work_dir)
if cfg.auto_sort():
self.files.sort(key=functools.cmp_to_key(nzf_cmp_date))
else:
self.files.sort(key=functools.cmp_to_key(nzf_cmp_name))
# Copy meta fields to nzo_info, if not already set
for kw in self.meta:
if not self.nzo_info.get(kw):
self.nzo_info[kw] = self.meta[kw][0]
# Show first meta-password (if any), when there's no explicit password
if not self.password and self.meta.get("password"):
self.password = self.meta.get("password", [None])[0]
# Set nzo save-delay to minimum 120 seconds
self.save_timeout = max(120, min(6.0 * float(self.bytes) / GIGI, 300.0))
# In case pre-queue script or duplicate check want to move
# to history we first need an nzo_id by entering the NzbQueue
if accept == 2:
self.deleted = True
self.status = Status.FAILED
sabnzbd.NzbQueue.do.add(self, quiet=True)
sabnzbd.NzbQueue.do.end_job(self)
# Raise error, so it's not added
raise TypeError
|
https://github.com/sabnzbd/sabnzbd/issues/1575
|
2020-08-15 09:52:02,675::INFO::[postproc:367] Starting Post-Processing on 293894103 => Repair:True, Unpack:True, Delete:True, Script:None, Cat:*
2020-08-15 09:52:02,680::INFO::[postproc:722] Starting verification and repair of 293894103
2020-08-15 09:52:02,708::INFO::[postproc:794] Verification and repair finished for 293894103
2020-08-15 09:52:02,709::ERROR::[postproc:600] Post Processing Failed for 293894103 (see logfile)
2020-08-15 09:52:02,714::INFO::[postproc:601] Traceback:
Traceback (most recent call last):
File "/opt/sabnzbd/sabnzbd/postproc.py", line 415, in process_job
tmp_workdir_complete, workdir_complete, file_sorter, one_folder, marker_file = prepare_extraction_path(
File "/opt/sabnzbd/sabnzbd/postproc.py", line 693, in prepare_extraction_path
workdir_complete = get_unique_path(os.path.join(complete_dir, nzo.final_name), create_dir=True)
File "/usr/lib64/python3.8/posixpath.py", line 90, in join
genericpath._check_arg_types('join', a, *p)
File "/usr/lib64/python3.8/genericpath.py", line 152, in _check_arg_types
raise TypeError(f'{funcname}() argument must be str, bytes, or '
TypeError: join() argument must be str, bytes, or os.PathLike object, not 'int'
|
TypeError
|
def _handle_node_cache(self, node, keep_build, processed_package_references, remotes):
pref = node.pref
assert pref.id, "Package-ID without value"
assert pref.id != PACKAGE_ID_UNKNOWN, "Package-ID error: %s" % str(pref)
conanfile = node.conanfile
output = conanfile.output
layout = self._cache.package_layout(pref.ref, conanfile.short_paths)
with layout.package_lock(pref):
if pref not in processed_package_references:
processed_package_references.add(pref)
if node.binary == BINARY_BUILD:
assert node.prev is None, (
"PREV for %s to be built should be None" % str(pref)
)
layout.package_remove(pref)
with layout.set_dirty_context_manager(pref):
pref = self._build_package(node, output, keep_build, remotes)
assert node.prev, "Node PREV shouldn't be empty"
assert node.pref.revision, "Node PREF revision shouldn't be empty"
assert pref.revision is not None, (
"PREV for %s to be built is None" % str(pref)
)
elif node.binary in (BINARY_UPDATE, BINARY_DOWNLOAD):
# this can happen after a re-evaluation of packageID with Package_ID_unknown
self._download_pkg(layout, node)
elif node.binary == BINARY_CACHE:
assert node.prev, "PREV for %s is None" % str(pref)
output.success("Already installed!")
log_package_got_from_local_cache(pref)
self._recorder.package_fetched_from_cache(pref)
package_folder = layout.package(pref)
assert os.path.isdir(package_folder), "Package '%s' folder must exist: %s\n" % (
str(pref),
package_folder,
)
# Call the info method
self._call_package_info(conanfile, package_folder, ref=pref.ref)
self._recorder.package_cpp_info(pref, conanfile.cpp_info)
|
def _handle_node_cache(self, node, keep_build, processed_package_references, remotes):
pref = node.pref
assert pref.id, "Package-ID without value"
assert pref.id != PACKAGE_ID_UNKNOWN, "Package-ID error: %s" % str(pref)
conanfile = node.conanfile
output = conanfile.output
layout = self._cache.package_layout(pref.ref, conanfile.short_paths)
with layout.package_lock(pref):
if pref not in processed_package_references:
processed_package_references.add(pref)
if node.binary == BINARY_BUILD:
assert node.prev is None, (
"PREV for %s to be built should be None" % str(pref)
)
layout.package_remove(pref)
with layout.set_dirty_context_manager(pref):
pref = self._build_package(node, output, keep_build, remotes)
assert node.prev, "Node PREV shouldn't be empty"
assert node.pref.revision, "Node PREF revision shouldn't be empty"
assert pref.revision is not None, (
"PREV for %s to be built is None" % str(pref)
)
elif node.binary in (BINARY_UPDATE, BINARY_DOWNLOAD):
# this can happen after a re-evaluation of packageID with Package_ID_unknown
self._download_pkg(layout, node)
elif node.binary == BINARY_CACHE:
assert node.prev, "PREV for %s is None" % str(pref)
output.success("Already installed!")
log_package_got_from_local_cache(pref)
self._recorder.package_fetched_from_cache(pref)
package_folder = layout.package(pref)
if not os.path.isdir(package_folder):
raise ConanException(
"Package '%s' corrupted. Package folder must exist: %s\n"
"Try removing the package with 'conan remove'"
% (str(pref), package_folder)
)
# Call the info method
self._call_package_info(conanfile, package_folder, ref=pref.ref)
self._recorder.package_cpp_info(pref, conanfile.cpp_info)
|
https://github.com/conan-io/conan/issues/8172
|
conan info poco/1.9.4@ --paths
Traceback (most recent call last):
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\command.py", line 2115, in run
method(args[0][1:])
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\command.py", line 747, in info
lockfile=args.lockfile)
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\conan_api.py", line 94, in wrapper
return f(api, *args, **kwargs)
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\conan_api.py", line 743, in info
update, False, remotes, recorder)
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\graph\graph_manager.py", line 116, in load_graph
apply_build_requires=apply_build_requires)
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\graph\graph_manager.py", line 244, in _resolve_graph
graph_lock=graph_lock)
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\graph\graph_manager.py", line 363, in _load_graph
apply_build_requires=apply_build_requires)
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\graph\graph_manager.py", line 284, in _recurse_build_requires
self._binary_analyzer.evaluate_graph(graph, build_mode, update, remotes, nodes_subset, root)
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\graph\graph_binaries.py", line 384, in evaluate_graph
self._evaluate_node(node, build_mode, update, remotes)
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\graph\graph_binaries.py", line 176, in _evaluate_node
self._process_node(node, pref, build_mode, update, remotes)
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\graph\graph_binaries.py", line 242, in _process_node
self._evaluate_cache_pkg(node, package_layout, pref, metadata, remote, remotes, update)
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\graph\graph_binaries.py", line 95, in _evaluate_cache_pkg
assert node.prev, "PREV for %s is None: %s" % (str(pref), metadata.dumps())
AssertionError: PREV for poco/1.9.4:e800054a13ea39bf75eb479d61dc6ce27dd9c321 is None: {"recipe": {"revision": "0", "remote": "conan-center", "properties": {}, "checksums": {"conanmanifest.txt": {"md5": "1c74dc82b5b19f764f9e39ef3530fac9", "sha1": "c0dce2f8e0394be5932455f35d3103877d184bdc"}, "conanfile.py": {"md5": "8315dcc96b131b21d4d64d7f56e73d99", "sha1": "b768bea077de938ea0b95baf916bf6b96101e694"}, "conan_export.tgz": {"md5": "5819fbc0b48456426c49daba6b4d85be", "sha1": "33bca842766631e007b7f5fa21b9770217328b7f"}}}, "packages": {"e800054a13ea39bf75eb479d61dc6ce27dd9c321": {"revision": null, "recipe_revision": null, "remote": null, "properties": {}, "checksums": {}}}}
ERROR: PREV for poco/1.9.4:e800054a13ea39bf75eb479d61dc6ce27dd9c321 is None: {"recipe": {"revision": "0", "remote": "conan-center", "properties": {}, "checksums": {"conanmanifest.txt": {"md5": "1c74dc82b5b19f764f9e39ef3530fac9", "sha1": "c0dce2f8e0394be5932455f35d3103877d184bdc"}, "conanfile.py": {"md5": "8315dcc96b131b21d4d64d7f56e73d99", "sha1": "b768bea077de938ea0b95baf916bf6b96101e694"}, "conan_export.tgz": {"md5": "5819fbc0b48456426c49daba6b4d85be", "sha1": "33bca842766631e007b7f5fa21b9770217328b7f"}}}, "packages": {"e800054a13ea39bf75eb479d61dc6ce27dd9c321": {"revision": null, "recipe_revision": null, "remote": null, "properties": {}, "checksums": {}}}}
|
AssertionError
|
def package_id_exists(self, package_id):
# The package exists if the folder exists, also for short_paths case
pkg_folder = self.package(PackageReference(self._ref, package_id))
return os.path.isdir(pkg_folder)
|
def package_id_exists(self, package_id):
# This is NOT the short paths, but the standard cache one
pkg_folder = os.path.join(self._base_folder, PACKAGES_FOLDER, package_id)
return os.path.isdir(pkg_folder)
|
https://github.com/conan-io/conan/issues/8172
|
conan info poco/1.9.4@ --paths
Traceback (most recent call last):
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\command.py", line 2115, in run
method(args[0][1:])
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\command.py", line 747, in info
lockfile=args.lockfile)
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\conan_api.py", line 94, in wrapper
return f(api, *args, **kwargs)
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\conan_api.py", line 743, in info
update, False, remotes, recorder)
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\graph\graph_manager.py", line 116, in load_graph
apply_build_requires=apply_build_requires)
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\graph\graph_manager.py", line 244, in _resolve_graph
graph_lock=graph_lock)
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\graph\graph_manager.py", line 363, in _load_graph
apply_build_requires=apply_build_requires)
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\graph\graph_manager.py", line 284, in _recurse_build_requires
self._binary_analyzer.evaluate_graph(graph, build_mode, update, remotes, nodes_subset, root)
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\graph\graph_binaries.py", line 384, in evaluate_graph
self._evaluate_node(node, build_mode, update, remotes)
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\graph\graph_binaries.py", line 176, in _evaluate_node
self._process_node(node, pref, build_mode, update, remotes)
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\graph\graph_binaries.py", line 242, in _process_node
self._evaluate_cache_pkg(node, package_layout, pref, metadata, remote, remotes, update)
File "C:\Users\Daniel\AppData\Roaming\Python\Python37\site-packages\conans\client\graph\graph_binaries.py", line 95, in _evaluate_cache_pkg
assert node.prev, "PREV for %s is None: %s" % (str(pref), metadata.dumps())
AssertionError: PREV for poco/1.9.4:e800054a13ea39bf75eb479d61dc6ce27dd9c321 is None: {"recipe": {"revision": "0", "remote": "conan-center", "properties": {}, "checksums": {"conanmanifest.txt": {"md5": "1c74dc82b5b19f764f9e39ef3530fac9", "sha1": "c0dce2f8e0394be5932455f35d3103877d184bdc"}, "conanfile.py": {"md5": "8315dcc96b131b21d4d64d7f56e73d99", "sha1": "b768bea077de938ea0b95baf916bf6b96101e694"}, "conan_export.tgz": {"md5": "5819fbc0b48456426c49daba6b4d85be", "sha1": "33bca842766631e007b7f5fa21b9770217328b7f"}}}, "packages": {"e800054a13ea39bf75eb479d61dc6ce27dd9c321": {"revision": null, "recipe_revision": null, "remote": null, "properties": {}, "checksums": {}}}}
ERROR: PREV for poco/1.9.4:e800054a13ea39bf75eb479d61dc6ce27dd9c321 is None: {"recipe": {"revision": "0", "remote": "conan-center", "properties": {}, "checksums": {"conanmanifest.txt": {"md5": "1c74dc82b5b19f764f9e39ef3530fac9", "sha1": "c0dce2f8e0394be5932455f35d3103877d184bdc"}, "conanfile.py": {"md5": "8315dcc96b131b21d4d64d7f56e73d99", "sha1": "b768bea077de938ea0b95baf916bf6b96101e694"}, "conan_export.tgz": {"md5": "5819fbc0b48456426c49daba6b4d85be", "sha1": "33bca842766631e007b7f5fa21b9770217328b7f"}}}, "packages": {"e800054a13ea39bf75eb479d61dc6ce27dd9c321": {"revision": null, "recipe_revision": null, "remote": null, "properties": {}, "checksums": {}}}}
|
AssertionError
|
def config(self, *args):
"""
Manages Conan configuration.
Used to edit conan.conf, or install config files.
"""
parser = argparse.ArgumentParser(
description=self.config.__doc__,
prog="conan config",
formatter_class=SmartFormatter,
)
subparsers = parser.add_subparsers(dest="subcommand", help="sub-command help")
subparsers.required = True
get_subparser = subparsers.add_parser(
"get", help="Get the value of configuration item"
)
home_subparser = subparsers.add_parser(
"home", help="Retrieve the Conan home directory"
)
install_subparser = subparsers.add_parser(
"install", help="Install a full configuration from a local or remote zip file"
)
rm_subparser = subparsers.add_parser("rm", help="Remove an existing config element")
set_subparser = subparsers.add_parser(
"set", help="Set a value for a configuration item"
)
init_subparser = subparsers.add_parser(
"init", help="Initializes Conan configuration files"
)
get_subparser.add_argument("item", nargs="?", help="Item to print")
home_subparser.add_argument(
"-j",
"--json",
default=None,
action=OnceArgument,
help="json file path where the config home will be written to",
)
install_subparser.add_argument(
"item",
nargs="?",
help="git repository, local folder or zip file (local or "
"http) where the configuration is stored",
)
install_subparser.add_argument(
"--verify-ssl",
nargs="?",
default="True",
help="Verify SSL connection when downloading file",
)
install_subparser.add_argument(
"--type", "-t", choices=["git"], help="Type of remote config"
)
install_subparser.add_argument(
"--args", "-a", help='String with extra arguments for "git clone"'
)
install_subparser.add_argument(
"-sf",
"--source-folder",
help="Install files only from a source subfolder from the specified origin",
)
install_subparser.add_argument(
"-tf", "--target-folder", help="Install to that path in the conan cache"
)
install_subparser.add_argument(
"-l",
"--list",
default=False,
action="store_true",
help="List stored configuration origins",
)
install_subparser.add_argument(
"-r",
"--remove",
type=int,
help="Remove configuration origin by index in list (index "
"provided by --list argument)",
)
rm_subparser.add_argument("item", help="Item to remove")
set_subparser.add_argument("item", help="'item=value' to set")
init_subparser.add_argument(
"-f",
"--force",
default=False,
action="store_true",
help="Overwrite existing Conan configuration files",
)
args = parser.parse_args(*args)
if args.subcommand == "set":
try:
key, value = args.item.split("=", 1)
except ValueError:
if "hooks." in args.item:
key, value = args.item.split("=", 1)[0], None
else:
raise ConanException("Please specify 'key=value'")
return self._conan.config_set(key, value)
elif args.subcommand == "get":
return self._conan.config_get(args.item)
elif args.subcommand == "rm":
return self._conan.config_rm(args.item)
elif args.subcommand == "home":
conan_home = self._conan.config_home()
self._out.info(conan_home)
if args.json:
self._outputer.json_output({"home": conan_home}, args.json, os.getcwd())
return conan_home
elif args.subcommand == "install":
if args.list:
configs = self._conan.config_install_list()
for index, config in enumerate(configs):
self._out.writeln("%s: %s" % (index, config))
return
elif args.remove is not None:
self._conan.config_install_remove(index=args.remove)
return
verify_ssl = get_bool_from_text(args.verify_ssl)
return self._conan.config_install(
args.item,
verify_ssl,
args.type,
args.args,
source_folder=args.source_folder,
target_folder=args.target_folder,
)
elif args.subcommand == "init":
return self._conan.config_init(force=args.force)
|
def config(self, *args):
"""
Manages Conan configuration.
Used to edit conan.conf, or install config files.
"""
parser = argparse.ArgumentParser(
description=self.config.__doc__,
prog="conan config",
formatter_class=SmartFormatter,
)
subparsers = parser.add_subparsers(dest="subcommand", help="sub-command help")
subparsers.required = True
get_subparser = subparsers.add_parser(
"get", help="Get the value of configuration item"
)
home_subparser = subparsers.add_parser(
"home", help="Retrieve the Conan home directory"
)
install_subparser = subparsers.add_parser(
"install", help="Install a full configuration from a local or remote zip file"
)
rm_subparser = subparsers.add_parser("rm", help="Remove an existing config element")
set_subparser = subparsers.add_parser(
"set", help="Set a value for a configuration item"
)
init_subparser = subparsers.add_parser(
"init", help="Initializes Conan configuration files"
)
get_subparser.add_argument("item", nargs="?", help="Item to print")
home_subparser.add_argument(
"-j",
"--json",
default=None,
action=OnceArgument,
help="json file path where the config home will be written to",
)
install_subparser.add_argument(
"item",
nargs="?",
help="git repository, local folder or zip file (local or "
"http) where the configuration is stored",
)
install_subparser.add_argument(
"--verify-ssl",
nargs="?",
default="True",
help="Verify SSL connection when downloading file",
)
install_subparser.add_argument(
"--type", "-t", choices=["git"], help="Type of remote config"
)
install_subparser.add_argument(
"--args", "-a", help='String with extra arguments for "git clone"'
)
install_subparser.add_argument(
"-sf",
"--source-folder",
help="Install files only from a source subfolder from the specified origin",
)
install_subparser.add_argument(
"-tf", "--target-folder", help="Install to that path in the conan cache"
)
rm_subparser.add_argument("item", help="Item to remove")
set_subparser.add_argument("item", help="'item=value' to set")
init_subparser.add_argument(
"-f",
"--force",
default=False,
action="store_true",
help="Overwrite existing Conan configuration files",
)
args = parser.parse_args(*args)
if args.subcommand == "set":
try:
key, value = args.item.split("=", 1)
except ValueError:
if "hooks." in args.item:
key, value = args.item.split("=", 1)[0], None
else:
raise ConanException("Please specify 'key=value'")
return self._conan.config_set(key, value)
elif args.subcommand == "get":
return self._conan.config_get(args.item)
elif args.subcommand == "rm":
return self._conan.config_rm(args.item)
elif args.subcommand == "home":
conan_home = self._conan.config_home()
self._out.info(conan_home)
if args.json:
self._outputer.json_output({"home": conan_home}, args.json, os.getcwd())
return conan_home
elif args.subcommand == "install":
verify_ssl = get_bool_from_text(args.verify_ssl)
return self._conan.config_install(
args.item,
verify_ssl,
args.type,
args.args,
source_folder=args.source_folder,
target_folder=args.target_folder,
)
elif args.subcommand == "init":
return self._conan.config_init(force=args.force)
|
https://github.com/conan-io/conan/issues/7046
|
PS D:\temp\conan> conan search
Traceback (most recent call last):
File "conan\conans\client\command.py", line 2016, in run
File "conan\conans\client\conan_api.py", line 92, in wrapper
File "conan\conans\client\conan_api.py", line 608, in config_install
File "conan\conans\client\conf\config_installer.py", line 234, in configuration_install
File "conan\conans\client\conf\config_installer.py", line 201, in _process_config
File "conan\conans\client\conf\config_installer.py", line 66, in _process_zip_file
File "conan\conans\client\tools\files.py", line 104, in unzip
File "zipfile.py", line 1204, in __init__
FileNotFoundError: [Errno 2] No such file or directory: 'D:\\temp\\conan\\config-buildserver.zip'
ERROR: [Errno 2] No such file or directory: 'D:\\temp\\conan\\config-buildserver.zip'
|
FileNotFoundError
|
def _process_config(config, cache, output, requester):
try:
if config.type == "git":
_process_git_repo(config, cache, output)
elif config.type == "dir":
_process_folder(config, config.uri, cache, output)
elif config.type == "file":
with tmp_config_install_folder(cache) as tmp_folder:
_process_zip_file(config, config.uri, cache, output, tmp_folder)
elif config.type == "url":
_process_download(config, cache, output, requester=requester)
else:
raise ConanException("Unable to process config install: %s" % config.uri)
except Exception as e:
raise ConanException("Failed conan config install: %s" % str(e))
|
def _process_config(config, cache, output, requester):
if config.type == "git":
_process_git_repo(config, cache, output)
elif config.type == "dir":
_process_folder(config, config.uri, cache, output)
elif config.type == "file":
with tmp_config_install_folder(cache) as tmp_folder:
_process_zip_file(config, config.uri, cache, output, tmp_folder)
elif config.type == "url":
_process_download(config, cache, output, requester=requester)
else:
raise ConanException("Unable to process config install: %s" % config.uri)
|
https://github.com/conan-io/conan/issues/7046
|
PS D:\temp\conan> conan search
Traceback (most recent call last):
File "conan\conans\client\command.py", line 2016, in run
File "conan\conans\client\conan_api.py", line 92, in wrapper
File "conan\conans\client\conan_api.py", line 608, in config_install
File "conan\conans\client\conf\config_installer.py", line 234, in configuration_install
File "conan\conans\client\conf\config_installer.py", line 201, in _process_config
File "conan\conans\client\conf\config_installer.py", line 66, in _process_zip_file
File "conan\conans\client\tools\files.py", line 104, in unzip
File "zipfile.py", line 1204, in __init__
FileNotFoundError: [Errno 2] No such file or directory: 'D:\\temp\\conan\\config-buildserver.zip'
ERROR: [Errno 2] No such file or directory: 'D:\\temp\\conan\\config-buildserver.zip'
|
FileNotFoundError
|
def __init__(self, ref, conanfile, context, recipe=None, path=None):
self.ref = ref
self.path = path # path to the consumer conanfile.xx for consumer, None otherwise
self._package_id = None
self.prev = None
self.conanfile = conanfile
self.dependencies = [] # Ordered Edges
self.dependants = set() # Edges
self.binary = None
self.recipe = recipe
self.remote = None
self.binary_remote = None
self.revision_pinned = False # The revision has been specified by the user
self.context = context
# A subset of the graph that will conflict by package name
self._public_deps = _NodeOrderedDict() # {ref.name: Node}
# all the public deps only in the closure of this node
# The dependencies that will be part of deps_cpp_info, can't conflict
self._public_closure = _NodeOrderedDict() # {ref.name: Node}
# The dependencies of this node that will be propagated to consumers when they depend
# on this node. It includes regular (not private and not build requires) dependencies
self._transitive_closure = OrderedDict()
self.inverse_closure = set() # set of nodes that have this one in their public
self._ancestors = _NodeOrderedDict() # set{ref.name}
self._id = None # Unique ID (uuid at the moment) of a node in the graph
self.graph_lock_node = None # the locking information can be None
self.id_direct_prefs = None
self.id_indirect_prefs = None
|
def __init__(self, ref, conanfile, context, recipe=None, path=None):
self.ref = ref
self.path = path # path to the consumer conanfile.xx for consumer, None otherwise
self._package_id = None
self.prev = None
self.conanfile = conanfile
self.dependencies = [] # Ordered Edges
self.dependants = set() # Edges
self.binary = None
self.recipe = recipe
self.remote = None
self.binary_remote = None
self.revision_pinned = False # The revision has been specified by the user
self.context = context
# A subset of the graph that will conflict by package name
self._public_deps = _NodeOrderedDict() # {ref.name: Node}
# all the public deps only in the closure of this node
# The dependencies that will be part of deps_cpp_info, can't conflict
self._public_closure = _NodeOrderedDict() # {ref.name: Node}
# The dependencies of this node that will be propagated to consumers when they depend
# on this node. It includes regular (not private and not build requires) dependencies
self._transitive_closure = OrderedDict()
self.inverse_closure = set() # set of nodes that have this one in their public
self._ancestors = _NodeOrderedDict() # set{ref.name}
self._id = None # Unique ID (uuid at the moment) of a node in the graph
self.graph_lock_node = None # the locking information can be None
|
https://github.com/conan-io/conan/issues/6942
|
30-Apr-2020 16:37:19 xyz/abc: Unknown binary for xyz/abc, computing updated ID
30-Apr-2020 16:37:19 Traceback (most recent call last):
30-Apr-2020 16:37:19 File "conan/conans/client/command.py", line 2002, in run
30-Apr-2020 16:37:19 File "conan/conans/client/command.py", line 369, in create
30-Apr-2020 16:37:19 File "conan/conans/client/conan_api.py", line 89, in wrapper
30-Apr-2020 16:37:19 File "conan/conans/client/conan_api.py", line 368, in create
30-Apr-2020 16:37:19 File "conan/conans/client/cmd/create.py", line 57, in create
30-Apr-2020 16:37:19 File "conan/conans/client/manager.py", line 75, in deps_install
30-Apr-2020 16:37:19 File "conan/conans/client/installer.py", line 309, in install
30-Apr-2020 16:37:19 File "conan/conans/client/installer.py", line 404, in _build
30-Apr-2020 16:37:19 File "conan/conans/client/graph/graph_binaries.py", line 347, in reevaluate_node
30-Apr-2020 16:37:19 File "conan/conans/client/graph/graph_binaries.py", line 319, in _compute_package_id
30-Apr-2020 16:37:19 File "conan/conans/model/info.py", line 540, in package_id
30-Apr-2020 16:37:19 File "conan/conans/model/info.py", line 216, in sha
30-Apr-2020 16:37:19 TypeError: '<' not supported between instances of 'NoneType' and 'str'
|
TypeError
|
def _compute_package_id(
self, node, default_package_id_mode, default_python_requires_id_mode
):
"""
Compute the binary package ID of this node
:param node: the node to compute the package-ID
:param default_package_id_mode: configuration of the package-ID mode
"""
# TODO Conan 2.0. To separate the propagation of the graph (options) of the package-ID
# A bit risky to be done now
conanfile = node.conanfile
neighbors = node.neighbors()
if not self._fixed_package_id:
direct_reqs = [] # of PackageReference
indirect_reqs = set() # of PackageReference, avoid duplicates
for neighbor in neighbors:
ref, nconan = neighbor.ref, neighbor.conanfile
direct_reqs.append(neighbor.pref)
indirect_reqs.update(nconan.info.requires.refs())
# Make sure not duplicated
indirect_reqs.difference_update(direct_reqs)
else:
direct_reqs, indirect_reqs = self.package_id_transitive_reqs(node)
python_requires = getattr(conanfile, "python_requires", None)
if python_requires:
if isinstance(python_requires, dict):
python_requires = None # Legacy python-requires do not change package-ID
else:
python_requires = python_requires.all_refs()
conanfile.info = ConanInfo.create(
conanfile.settings.values,
conanfile.options.values,
direct_reqs,
indirect_reqs,
default_package_id_mode=default_package_id_mode,
python_requires=python_requires,
default_python_requires_id_mode=default_python_requires_id_mode,
)
# Once we are done, call package_id() to narrow and change possible values
with conanfile_exception_formatter(str(conanfile), "package_id"):
with conan_v2_property(
conanfile,
"cpp_info",
"'self.cpp_info' access in package_id() method is deprecated",
):
conanfile.package_id()
info = conanfile.info
node.package_id = info.package_id()
|
def _compute_package_id(
self, node, default_package_id_mode, default_python_requires_id_mode
):
"""
Compute the binary package ID of this node
:param node: the node to compute the package-ID
:param default_package_id_mode: configuration of the package-ID mode
"""
# TODO Conan 2.0. To separate the propagation of the graph (options) of the package-ID
# A bit risky to be done now
conanfile = node.conanfile
neighbors = node.neighbors()
if not self._fixed_package_id:
direct_reqs = [] # of PackageReference
indirect_reqs = set() # of PackageReference, avoid duplicates
for neighbor in neighbors:
ref, nconan = neighbor.ref, neighbor.conanfile
direct_reqs.append(neighbor.pref)
indirect_reqs.update(nconan.info.requires.refs())
# Make sure not duplicated
indirect_reqs.difference_update(direct_reqs)
else:
node.id_direct_prefs = set() # of PackageReference
node.id_indirect_prefs = set() # of PackageReference, avoid duplicates
for neighbor in neighbors:
node.id_direct_prefs.add(neighbor.pref)
node.id_indirect_prefs.update(neighbor.id_direct_prefs)
node.id_indirect_prefs.update(neighbor.id_indirect_prefs)
# Make sure not duplicated, totally necessary
node.id_indirect_prefs.difference_update(node.id_direct_prefs)
direct_reqs = node.id_direct_prefs
indirect_reqs = node.id_indirect_prefs
python_requires = getattr(conanfile, "python_requires", None)
if python_requires:
if isinstance(python_requires, dict):
python_requires = None # Legacy python-requires do not change package-ID
else:
python_requires = python_requires.all_refs()
conanfile.info = ConanInfo.create(
conanfile.settings.values,
conanfile.options.values,
direct_reqs,
indirect_reqs,
default_package_id_mode=default_package_id_mode,
python_requires=python_requires,
default_python_requires_id_mode=default_python_requires_id_mode,
)
# Once we are done, call package_id() to narrow and change possible values
with conanfile_exception_formatter(str(conanfile), "package_id"):
with conan_v2_property(
conanfile,
"cpp_info",
"'self.cpp_info' access in package_id() method is deprecated",
):
conanfile.package_id()
info = conanfile.info
node.package_id = info.package_id()
|
https://github.com/conan-io/conan/issues/6942
|
30-Apr-2020 16:37:19 xyz/abc: Unknown binary for xyz/abc, computing updated ID
30-Apr-2020 16:37:19 Traceback (most recent call last):
30-Apr-2020 16:37:19 File "conan/conans/client/command.py", line 2002, in run
30-Apr-2020 16:37:19 File "conan/conans/client/command.py", line 369, in create
30-Apr-2020 16:37:19 File "conan/conans/client/conan_api.py", line 89, in wrapper
30-Apr-2020 16:37:19 File "conan/conans/client/conan_api.py", line 368, in create
30-Apr-2020 16:37:19 File "conan/conans/client/cmd/create.py", line 57, in create
30-Apr-2020 16:37:19 File "conan/conans/client/manager.py", line 75, in deps_install
30-Apr-2020 16:37:19 File "conan/conans/client/installer.py", line 309, in install
30-Apr-2020 16:37:19 File "conan/conans/client/installer.py", line 404, in _build
30-Apr-2020 16:37:19 File "conan/conans/client/graph/graph_binaries.py", line 347, in reevaluate_node
30-Apr-2020 16:37:19 File "conan/conans/client/graph/graph_binaries.py", line 319, in _compute_package_id
30-Apr-2020 16:37:19 File "conan/conans/model/info.py", line 540, in package_id
30-Apr-2020 16:37:19 File "conan/conans/model/info.py", line 216, in sha
30-Apr-2020 16:37:19 TypeError: '<' not supported between instances of 'NoneType' and 'str'
|
TypeError
|
def _build(
self, nodes_by_level, keep_build, root_node, graph_info, remotes, build_mode, update
):
using_build_profile = bool(graph_info.profile_build)
missing, downloads = self._classify(nodes_by_level)
self._raise_missing(missing)
processed_package_refs = set()
self._download(downloads, processed_package_refs)
fix_package_id = self._cache.config.full_transitive_package_id
for level in nodes_by_level:
for node in level:
ref, conan_file = node.ref, node.conanfile
output = conan_file.output
self._propagate_info(node, using_build_profile, fix_package_id)
if node.binary == BINARY_EDITABLE:
self._handle_node_editable(node, graph_info)
# Need a temporary package revision for package_revision_mode
# Cannot be PREV_UNKNOWN otherwise the consumers can't compute their packageID
node.prev = "editable"
else:
if node.binary == BINARY_SKIP: # Privates not necessary
continue
assert ref.revision is not None, "Installer should receive RREV always"
if node.binary == BINARY_UNKNOWN:
self._binaries_analyzer.reevaluate_node(
node, remotes, build_mode, update
)
_handle_system_requirements(conan_file, node.pref, self._cache, output)
self._handle_node_cache(
node, keep_build, processed_package_refs, remotes
)
# Finally, propagate information to root node (ref=None)
self._propagate_info(root_node, using_build_profile, fix_package_id)
|
def _build(
self, nodes_by_level, keep_build, root_node, graph_info, remotes, build_mode, update
):
using_build_profile = bool(graph_info.profile_build)
missing, downloads = self._classify(nodes_by_level)
self._raise_missing(missing)
processed_package_refs = set()
self._download(downloads, processed_package_refs)
for level in nodes_by_level:
for node in level:
ref, conan_file = node.ref, node.conanfile
output = conan_file.output
self._propagate_info(node, using_build_profile)
if node.binary == BINARY_EDITABLE:
self._handle_node_editable(node, graph_info)
# Need a temporary package revision for package_revision_mode
# Cannot be PREV_UNKNOWN otherwise the consumers can't compute their packageID
node.prev = "editable"
else:
if node.binary == BINARY_SKIP: # Privates not necessary
continue
assert ref.revision is not None, "Installer should receive RREV always"
if node.binary == BINARY_UNKNOWN:
self._binaries_analyzer.reevaluate_node(
node, remotes, build_mode, update
)
_handle_system_requirements(conan_file, node.pref, self._cache, output)
self._handle_node_cache(
node, keep_build, processed_package_refs, remotes
)
# Finally, propagate information to root node (ref=None)
self._propagate_info(root_node, using_build_profile)
|
https://github.com/conan-io/conan/issues/6942
|
30-Apr-2020 16:37:19 xyz/abc: Unknown binary for xyz/abc, computing updated ID
30-Apr-2020 16:37:19 Traceback (most recent call last):
30-Apr-2020 16:37:19 File "conan/conans/client/command.py", line 2002, in run
30-Apr-2020 16:37:19 File "conan/conans/client/command.py", line 369, in create
30-Apr-2020 16:37:19 File "conan/conans/client/conan_api.py", line 89, in wrapper
30-Apr-2020 16:37:19 File "conan/conans/client/conan_api.py", line 368, in create
30-Apr-2020 16:37:19 File "conan/conans/client/cmd/create.py", line 57, in create
30-Apr-2020 16:37:19 File "conan/conans/client/manager.py", line 75, in deps_install
30-Apr-2020 16:37:19 File "conan/conans/client/installer.py", line 309, in install
30-Apr-2020 16:37:19 File "conan/conans/client/installer.py", line 404, in _build
30-Apr-2020 16:37:19 File "conan/conans/client/graph/graph_binaries.py", line 347, in reevaluate_node
30-Apr-2020 16:37:19 File "conan/conans/client/graph/graph_binaries.py", line 319, in _compute_package_id
30-Apr-2020 16:37:19 File "conan/conans/model/info.py", line 540, in package_id
30-Apr-2020 16:37:19 File "conan/conans/model/info.py", line 216, in sha
30-Apr-2020 16:37:19 TypeError: '<' not supported between instances of 'NoneType' and 'str'
|
TypeError
|
def _propagate_info(self, node, using_build_profile, fixed_package_id):
if fixed_package_id:
# if using config.full_transitive_package_id, it is necessary to recompute
# the node transitive information necessary to compute the package_id
# as it will be used by reevaluate_node() when package_revision_mode is used and
# PACKAGE_ID_UNKNOWN happens due to unknown revisions
self._binaries_analyzer.package_id_transitive_reqs(node)
# Get deps_cpp_info from upstream nodes
node_order = [n for n in node.public_closure if n.binary != BINARY_SKIP]
# List sort is stable, will keep the original order of the closure, but prioritize levels
conan_file = node.conanfile
# FIXME: Not the best place to assign the _conan_using_build_profile
conan_file._conan_using_build_profile = using_build_profile
transitive = [it for it in node.transitive_closure.values()]
br_host = []
for it in node.dependencies:
if it.require.build_require_context == CONTEXT_HOST:
br_host.extend(it.dst.transitive_closure.values())
for n in node_order:
if n not in transitive:
conan_file.output.info("Applying build-requirement: %s" % str(n.ref))
if not using_build_profile: # Do not touch anything
conan_file.deps_user_info[n.ref.name] = n.conanfile.user_info
conan_file.deps_cpp_info.update(n.conanfile._conan_dep_cpp_info, n.ref.name)
conan_file.deps_env_info.update(n.conanfile.env_info, n.ref.name)
else:
if n in transitive or n in br_host:
conan_file.deps_cpp_info.update(
n.conanfile._conan_dep_cpp_info, n.ref.name
)
else:
env_info = EnvInfo()
env_info._values_ = n.conanfile.env_info._values_.copy()
# Add cpp_info.bin_paths/lib_paths to env_info (it is needed for runtime)
env_info.DYLD_LIBRARY_PATH.extend(
n.conanfile._conan_dep_cpp_info.lib_paths
)
env_info.DYLD_LIBRARY_PATH.extend(
n.conanfile._conan_dep_cpp_info.framework_paths
)
env_info.LD_LIBRARY_PATH.extend(
n.conanfile._conan_dep_cpp_info.lib_paths
)
env_info.PATH.extend(n.conanfile._conan_dep_cpp_info.bin_paths)
conan_file.deps_env_info.update(env_info, n.ref.name)
# Update the info but filtering the package values that not apply to the subtree
# of this current node and its dependencies.
subtree_libnames = [node.ref.name for node in node_order]
add_env_conaninfo(conan_file, subtree_libnames)
|
def _propagate_info(node, using_build_profile):
# Get deps_cpp_info from upstream nodes
node_order = [n for n in node.public_closure if n.binary != BINARY_SKIP]
# List sort is stable, will keep the original order of the closure, but prioritize levels
conan_file = node.conanfile
conan_file._conan_using_build_profile = (
using_build_profile # FIXME: Not the best place to assign it
)
transitive = [it for it in node.transitive_closure.values()]
br_host = []
for it in node.dependencies:
if it.require.build_require_context == CONTEXT_HOST:
br_host.extend(it.dst.transitive_closure.values())
for n in node_order:
if n not in transitive:
conan_file.output.info("Applying build-requirement: %s" % str(n.ref))
if not using_build_profile: # Do not touch anything
conan_file.deps_user_info[n.ref.name] = n.conanfile.user_info
conan_file.deps_cpp_info.update(n.conanfile._conan_dep_cpp_info, n.ref.name)
conan_file.deps_env_info.update(n.conanfile.env_info, n.ref.name)
else:
if n in transitive or n in br_host:
conan_file.deps_cpp_info.update(
n.conanfile._conan_dep_cpp_info, n.ref.name
)
else:
env_info = EnvInfo()
env_info._values_ = n.conanfile.env_info._values_.copy()
# Add cpp_info.bin_paths/lib_paths to env_info (it is needed for runtime)
env_info.DYLD_LIBRARY_PATH.extend(
n.conanfile._conan_dep_cpp_info.lib_paths
)
env_info.DYLD_LIBRARY_PATH.extend(
n.conanfile._conan_dep_cpp_info.framework_paths
)
env_info.LD_LIBRARY_PATH.extend(
n.conanfile._conan_dep_cpp_info.lib_paths
)
env_info.PATH.extend(n.conanfile._conan_dep_cpp_info.bin_paths)
conan_file.deps_env_info.update(env_info, n.ref.name)
# Update the info but filtering the package values that not apply to the subtree
# of this current node and its dependencies.
subtree_libnames = [node.ref.name for node in node_order]
add_env_conaninfo(conan_file, subtree_libnames)
|
https://github.com/conan-io/conan/issues/6942
|
30-Apr-2020 16:37:19 xyz/abc: Unknown binary for xyz/abc, computing updated ID
30-Apr-2020 16:37:19 Traceback (most recent call last):
30-Apr-2020 16:37:19 File "conan/conans/client/command.py", line 2002, in run
30-Apr-2020 16:37:19 File "conan/conans/client/command.py", line 369, in create
30-Apr-2020 16:37:19 File "conan/conans/client/conan_api.py", line 89, in wrapper
30-Apr-2020 16:37:19 File "conan/conans/client/conan_api.py", line 368, in create
30-Apr-2020 16:37:19 File "conan/conans/client/cmd/create.py", line 57, in create
30-Apr-2020 16:37:19 File "conan/conans/client/manager.py", line 75, in deps_install
30-Apr-2020 16:37:19 File "conan/conans/client/installer.py", line 309, in install
30-Apr-2020 16:37:19 File "conan/conans/client/installer.py", line 404, in _build
30-Apr-2020 16:37:19 File "conan/conans/client/graph/graph_binaries.py", line 347, in reevaluate_node
30-Apr-2020 16:37:19 File "conan/conans/client/graph/graph_binaries.py", line 319, in _compute_package_id
30-Apr-2020 16:37:19 File "conan/conans/model/info.py", line 540, in package_id
30-Apr-2020 16:37:19 File "conan/conans/model/info.py", line 216, in sha
30-Apr-2020 16:37:19 TypeError: '<' not supported between instances of 'NoneType' and 'str'
|
TypeError
|
def inspect(self, path, attributes, remote_name=None):
remotes = self.app.load_remotes(remote_name=remote_name)
try:
ref = ConanFileReference.loads(path)
except ConanException:
conanfile_path = _get_conanfile_path(path, get_cwd(), py=True)
conanfile = self.app.loader.load_named(conanfile_path, None, None, None, None)
else:
update = True if remote_name else False
result = self.app.proxy.get_recipe(
ref, update, update, remotes, ActionRecorder()
)
conanfile_path, _, _, ref = result
conanfile = self.app.loader.load_basic(conanfile_path)
conanfile.name = ref.name
conanfile.version = (
str(ref.version)
if os.environ.get(CONAN_V2_MODE_ENVVAR, False)
else ref.version
)
result = OrderedDict()
if not attributes:
attributes = [
"name",
"version",
"url",
"homepage",
"license",
"author",
"description",
"topics",
"generators",
"exports",
"exports_sources",
"short_paths",
"apply_env",
"build_policy",
"revision_mode",
"settings",
"options",
"default_options",
]
for attribute in attributes:
try:
attr = getattr(conanfile, attribute)
result[attribute] = attr
except AttributeError:
result[attribute] = ""
return result
|
def inspect(self, path, attributes, remote_name=None):
remotes = self.app.load_remotes(remote_name=remote_name)
try:
ref = ConanFileReference.loads(path)
except ConanException:
conanfile_path = _get_conanfile_path(path, get_cwd(), py=True)
conanfile = self.app.loader.load_named(conanfile_path, None, None, None, None)
else:
update = True if remote_name else False
result = self.app.proxy.get_recipe(
ref, update, update, remotes, ActionRecorder()
)
conanfile_path, _, _, ref = result
conanfile = self.app.loader.load_basic(conanfile_path)
conanfile.name = ref.name
conanfile.version = ref.version
result = OrderedDict()
if not attributes:
attributes = [
"name",
"version",
"url",
"homepage",
"license",
"author",
"description",
"topics",
"generators",
"exports",
"exports_sources",
"short_paths",
"apply_env",
"build_policy",
"revision_mode",
"settings",
"options",
"default_options",
]
for attribute in attributes:
try:
attr = getattr(conanfile, attribute)
result[attribute] = attr
except AttributeError:
result[attribute] = ""
return result
|
https://github.com/conan-io/conan/issues/6776
|
ERROR: Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/conans/errors.py", line 34, in conanfile_exception_formatter
yield
File "/usr/local/lib/python3.6/site-packages/conans/client/source.py", line 143, in _run_source
conanfile.source()
File "/home/rdesmond/Desktop/output/version_test/exception/conanfile.py", line 8, in source
minor = self.version.minor()
AttributeError: 'str' object has no attribute 'minor'
|
AttributeError
|
def _load_pyreq_conanfile(self, loader, lock_python_requires, ref):
recipe = self._proxy.get_recipe(
ref,
self._check_updates,
self._update,
remotes=self._remotes,
recorder=ActionRecorder(),
)
path, _, _, new_ref = recipe
conanfile, module = loader.load_basic_module(
path, lock_python_requires, user=new_ref.user, channel=new_ref.channel
)
conanfile.name = new_ref.name
conanfile.version = (
str(new_ref.version)
if os.environ.get(CONAN_V2_MODE_ENVVAR, False)
else new_ref.version
)
if getattr(conanfile, "alias", None):
ref = ConanFileReference.loads(conanfile.alias)
conanfile, module, new_ref, path = self._load_pyreq_conanfile(
loader, lock_python_requires, ref
)
return conanfile, module, new_ref, os.path.dirname(path)
|
def _load_pyreq_conanfile(self, loader, lock_python_requires, ref):
recipe = self._proxy.get_recipe(
ref,
self._check_updates,
self._update,
remotes=self._remotes,
recorder=ActionRecorder(),
)
path, _, _, new_ref = recipe
conanfile, module = loader.load_basic_module(
path, lock_python_requires, user=new_ref.user, channel=new_ref.channel
)
conanfile.name = new_ref.name
conanfile.version = new_ref.version
if getattr(conanfile, "alias", None):
ref = ConanFileReference.loads(conanfile.alias)
conanfile, module, new_ref, path = self._load_pyreq_conanfile(
loader, lock_python_requires, ref
)
return conanfile, module, new_ref, os.path.dirname(path)
|
https://github.com/conan-io/conan/issues/6776
|
ERROR: Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/conans/errors.py", line 34, in conanfile_exception_formatter
yield
File "/usr/local/lib/python3.6/site-packages/conans/client/source.py", line 143, in _run_source
conanfile.source()
File "/home/rdesmond/Desktop/output/version_test/exception/conanfile.py", line 8, in source
minor = self.version.minor()
AttributeError: 'str' object has no attribute 'minor'
|
AttributeError
|
def load_export(
self, conanfile_path, name, version, user, channel, lock_python_requires=None
):
"""loads the conanfile and evaluates its name, version, and enforce its existence"""
conanfile = self.load_named(
conanfile_path, name, version, user, channel, lock_python_requires
)
if not conanfile.name:
raise ConanException("conanfile didn't specify name")
if not conanfile.version:
raise ConanException("conanfile didn't specify version")
if os.environ.get(CONAN_V2_MODE_ENVVAR, False):
conanfile.version = str(conanfile.version)
ref = ConanFileReference(conanfile.name, conanfile.version, user, channel)
conanfile.display_name = str(ref)
conanfile.output.scope = conanfile.display_name
return conanfile
|
def load_export(
self, conanfile_path, name, version, user, channel, lock_python_requires=None
):
"""loads the conanfile and evaluates its name, version, and enforce its existence"""
conanfile = self.load_named(
conanfile_path, name, version, user, channel, lock_python_requires
)
if not conanfile.name:
raise ConanException("conanfile didn't specify name")
if not conanfile.version:
raise ConanException("conanfile didn't specify version")
ref = ConanFileReference(conanfile.name, conanfile.version, user, channel)
conanfile.display_name = str(ref)
conanfile.output.scope = conanfile.display_name
return conanfile
|
https://github.com/conan-io/conan/issues/6776
|
ERROR: Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/conans/errors.py", line 34, in conanfile_exception_formatter
yield
File "/usr/local/lib/python3.6/site-packages/conans/client/source.py", line 143, in _run_source
conanfile.source()
File "/home/rdesmond/Desktop/output/version_test/exception/conanfile.py", line 8, in source
minor = self.version.minor()
AttributeError: 'str' object has no attribute 'minor'
|
AttributeError
|
def load_conanfile(self, conanfile_path, profile, ref, lock_python_requires=None):
"""load a conanfile with a full reference, name, version, user and channel are obtained
from the reference, not evaluated. Main way to load from the cache
"""
conanfile, _ = self.load_basic_module(
conanfile_path, lock_python_requires, ref.user, ref.channel, str(ref)
)
conanfile.name = ref.name
conanfile.version = (
str(ref.version) if os.environ.get(CONAN_V2_MODE_ENVVAR, False) else ref.version
)
if profile.dev_reference and profile.dev_reference == ref:
conanfile.develop = True
try:
self._initialize_conanfile(conanfile, profile)
return conanfile
except ConanInvalidConfiguration:
raise
except Exception as e: # re-raise with file name
raise ConanException("%s: %s" % (conanfile_path, str(e)))
|
def load_conanfile(self, conanfile_path, profile, ref, lock_python_requires=None):
"""load a conanfile with a full reference, name, version, user and channel are obtained
from the reference, not evaluated. Main way to load from the cache
"""
conanfile, _ = self.load_basic_module(
conanfile_path, lock_python_requires, ref.user, ref.channel, str(ref)
)
conanfile.name = ref.name
conanfile.version = ref.version
if profile.dev_reference and profile.dev_reference == ref:
conanfile.develop = True
try:
self._initialize_conanfile(conanfile, profile)
return conanfile
except ConanInvalidConfiguration:
raise
except Exception as e: # re-raise with file name
raise ConanException("%s: %s" % (conanfile_path, str(e)))
|
https://github.com/conan-io/conan/issues/6776
|
ERROR: Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/conans/errors.py", line 34, in conanfile_exception_formatter
yield
File "/usr/local/lib/python3.6/site-packages/conans/client/source.py", line 143, in _run_source
conanfile.source()
File "/home/rdesmond/Desktop/output/version_test/exception/conanfile.py", line 8, in source
minor = self.version.minor()
AttributeError: 'str' object has no attribute 'minor'
|
AttributeError
|
def check_output_runner(cmd, stderr=None):
# Used to run several utilities, like Pacman detect, AIX version, uname, SCM
tmp_file = tempfile.mktemp()
try:
# We don't want stderr to print warnings that will mess the pristine outputs
stderr = stderr or subprocess.PIPE
cmd = cmd if isinstance(cmd, six.string_types) else subprocess.list2cmdline(cmd)
command = '{} > "{}"'.format(cmd, tmp_file)
logger.info("Calling command: {}".format(command))
process = subprocess.Popen(command, shell=True, stderr=stderr)
stdout, stderr = process.communicate()
logger.info("Return code: {}".format(int(process.returncode)))
if process.returncode:
# Only in case of error, we print also the stderr to know what happened
raise CalledProcessErrorWithStderr(process.returncode, cmd, output=stderr)
output = load(tmp_file)
try:
logger.info(
"Output: in file:{}\nstdout: {}\nstderr:{}".format(
output, stdout, stderr
)
)
except Exception as exc:
logger.error("Error logging command output: {}".format(exc))
return output
finally:
try:
os.unlink(tmp_file)
except OSError:
pass
|
def check_output_runner(cmd, stderr=None):
# Used to run several utilities, like Pacman detect, AIX version, uname, SCM
tmp_file = tempfile.mktemp()
try:
# We don't want stderr to print warnings that will mess the pristine outputs
stderr = stderr or subprocess.PIPE
cmd = cmd if isinstance(cmd, six.string_types) else subprocess.list2cmdline(cmd)
command = "{} > {}".format(cmd, tmp_file)
logger.info("Calling command: {}".format(command))
process = subprocess.Popen(command, shell=True, stderr=stderr)
stdout, stderr = process.communicate()
logger.info("Return code: {}".format(int(process.returncode)))
if process.returncode:
# Only in case of error, we print also the stderr to know what happened
raise CalledProcessErrorWithStderr(process.returncode, cmd, output=stderr)
output = load(tmp_file)
try:
logger.info(
"Output: in file:{}\nstdout: {}\nstderr:{}".format(
output, stdout, stderr
)
)
except Exception as exc:
logger.error("Error logging command output: {}".format(exc))
return output
finally:
try:
os.unlink(tmp_file)
except OSError:
pass
|
https://github.com/conan-io/conan/issues/6694
|
Conan:vcvars already set
Traceback (most recent call last):
File "build.py", line 7, in <module>
builder.run()
File "C:\Program Files\Python37\lib\site-packages\cpt\packager.py", line 597, in run
self.run_builds(base_profile_name=base_profile_name)
File "C:\Program Files\Python37\lib\site-packages\cpt\packager.py", line 690, in run_builds
r.run()
File "C:\Program Files\Python37\lib\site-packages\cpt\runner.py", line 86, in run
with context:
File "C:\Program Files\Python37\lib\contextlib.py", line 112, in __enter__
return next(self.gen)
File "C:\Program Files\Python37\lib\site-packages\conans\tools.py", line 200, in vcvars
with tools_win.vcvars(output=_global_output, *args, **kwargs):
File "C:\Program Files\Python37\lib\contextlib.py", line 112, in __enter__
return next(self.gen)
File "C:\Program Files\Python37\lib\site-packages\conans\client\tools\win.py", line 493, in vcvars
new_env = vcvars_dict(*args, **kwargs)
File "C:\Program Files\Python37\lib\site-packages\conans\client\tools\win.py", line 444, in vcvars_dict
ret = check_output_runner(cmd)
File "C:\Program Files\Python37\lib\site-packages\conans\util\runners.py", line 92, in check_output_runner
raise CalledProcessErrorWithStderr(process.returncode, cmd, output=stderr)
conans.errors.CalledProcessErrorWithStderr: Command 'echo Conan:vcvars already set && set' returned non-zero exit status 1.
Environment variable Mikkonen\AppData\Local\Temp\tmplzxq9vhi not defined
|
conans.errors.CalledProcessErrorWithStderr
|
def vcvars_command(
settings,
arch=None,
compiler_version=None,
force=False,
vcvars_ver=None,
winsdk_version=None,
output=None,
):
output = default_output(output, "conans.client.tools.win.vcvars_command")
arch_setting = arch or settings.get_safe("arch")
compiler = settings.get_safe("compiler")
if compiler == "Visual Studio":
compiler_version = compiler_version or settings.get_safe("compiler.version")
else:
# vcvars might be still needed for other compilers, e.g. clang-cl or Intel C++,
# as they might be using Microsoft STL and other tools
# (e.g. resource compiler, manifest tool, etc)
# in this case, use the latest Visual Studio available on the machine
last_version = latest_vs_version_installed(output=output)
compiler_version = compiler_version or last_version
os_setting = settings.get_safe("os")
if not compiler_version:
raise ConanException("compiler.version setting required for vcvars not defined")
# https://msdn.microsoft.com/en-us/library/f2ccy3wt.aspx
vcvars_arch = None
arch_setting = arch_setting or "x86_64"
arch_build = settings.get_safe("arch_build") or detected_architecture()
if os_setting == "WindowsCE":
vcvars_arch = "x86"
elif arch_build == "x86_64":
# Only uses x64 tooling if arch_build explicitly defines it, otherwise
# Keep the VS default, which is x86 toolset
# This will probably be changed in conan 2.0
if (
settings.get_safe("arch_build")
or os.getenv("PreferredToolArchitecture") == "x64"
) and int(compiler_version) >= 12:
x86_cross = "amd64_x86"
else:
x86_cross = "x86"
vcvars_arch = {
"x86": x86_cross,
"x86_64": "amd64",
"armv7": "amd64_arm",
"armv8": "amd64_arm64",
}.get(arch_setting)
elif arch_build == "x86":
vcvars_arch = {
"x86": "x86",
"x86_64": "x86_amd64",
"armv7": "x86_arm",
"armv8": "x86_arm64",
}.get(arch_setting)
if not vcvars_arch:
raise ConanException("unsupported architecture %s" % arch_setting)
existing_version = os.environ.get("VisualStudioVersion")
if existing_version:
command = ["echo Conan:vcvars already set"]
existing_version = existing_version.split(".")[0]
if existing_version != compiler_version:
message = (
"Visual environment already set to %s\n "
"Current settings visual version: %s"
% (existing_version, compiler_version)
)
if not force:
raise ConanException("Error, %s" % message)
else:
output.warn(message)
else:
vs_path = vs_installation_path(str(compiler_version))
if not vs_path or not os.path.isdir(vs_path):
raise ConanException(
"VS non-existing installation: Visual Studio %s" % str(compiler_version)
)
else:
if int(compiler_version) > 14:
vcvars_path = os.path.join(vs_path, "VC/Auxiliary/Build/vcvarsall.bat")
command = [
'set "VSCMD_START_DIR=%%CD%%" && '
'call "%s" %s' % (vcvars_path, vcvars_arch)
]
else:
vcvars_path = os.path.join(vs_path, "VC/vcvarsall.bat")
command = ['call "%s" %s' % (vcvars_path, vcvars_arch)]
if int(compiler_version) >= 14:
if winsdk_version:
command.append(winsdk_version)
if vcvars_ver:
command.append("-vcvars_ver=%s" % vcvars_ver)
if os_setting == "WindowsStore":
os_version_setting = settings.get_safe("os.version")
if os_version_setting == "8.1":
command.append("store 8.1")
elif os_version_setting == "10.0":
windows_10_sdk = find_windows_10_sdk()
if not windows_10_sdk:
raise ConanException(
"cross-compiling for WindowsStore 10 (UWP), "
"but Windows 10 SDK wasn't found"
)
command.append("store %s" % windows_10_sdk)
else:
raise ConanException(
"unsupported Windows Store version %s" % os_version_setting
)
return " ".join(command)
|
def vcvars_command(
settings,
arch=None,
compiler_version=None,
force=False,
vcvars_ver=None,
winsdk_version=None,
output=None,
):
output = default_output(output, "conans.client.tools.win.vcvars_command")
arch_setting = arch or settings.get_safe("arch")
compiler = settings.get_safe("compiler")
if compiler == "Visual Studio":
compiler_version = compiler_version or settings.get_safe("compiler.version")
else:
# vcvars might be still needed for other compilers, e.g. clang-cl or Intel C++,
# as they might be using Microsoft STL and other tools
# (e.g. resource compiler, manifest tool, etc)
# in this case, use the latest Visual Studio available on the machine
last_version = latest_vs_version_installed(output=output)
compiler_version = compiler_version or last_version
os_setting = settings.get_safe("os")
if not compiler_version:
raise ConanException("compiler.version setting required for vcvars not defined")
# https://msdn.microsoft.com/en-us/library/f2ccy3wt.aspx
arch_setting = arch_setting or "x86_64"
arch_build = settings.get_safe("arch_build") or detected_architecture()
if os_setting == "WindowsCE":
vcvars_arch = "x86"
elif arch_build == "x86_64":
# Only uses x64 tooling if arch_build explicitly defines it, otherwise
# Keep the VS default, which is x86 toolset
# This will probably be changed in conan 2.0
if (
settings.get_safe("arch_build")
or os.getenv("PreferredToolArchitecture") == "x64"
) and int(compiler_version) >= 12:
x86_cross = "amd64_x86"
else:
x86_cross = "x86"
vcvars_arch = {
"x86": x86_cross,
"x86_64": "amd64",
"armv7": "amd64_arm",
"armv8": "amd64_arm64",
}.get(arch_setting)
elif arch_build == "x86":
vcvars_arch = {
"x86": "x86",
"x86_64": "x86_amd64",
"armv7": "x86_arm",
"armv8": "x86_arm64",
}.get(arch_setting)
if not vcvars_arch:
raise ConanException("unsupported architecture %s" % arch_setting)
existing_version = os.environ.get("VisualStudioVersion")
if existing_version:
command = ["echo Conan:vcvars already set"]
existing_version = existing_version.split(".")[0]
if existing_version != compiler_version:
message = (
"Visual environment already set to %s\n "
"Current settings visual version: %s"
% (existing_version, compiler_version)
)
if not force:
raise ConanException("Error, %s" % message)
else:
output.warn(message)
else:
vs_path = vs_installation_path(str(compiler_version))
if not vs_path or not os.path.isdir(vs_path):
raise ConanException(
"VS non-existing installation: Visual Studio %s" % str(compiler_version)
)
else:
if int(compiler_version) > 14:
vcvars_path = os.path.join(vs_path, "VC/Auxiliary/Build/vcvarsall.bat")
command = [
'set "VSCMD_START_DIR=%%CD%%" && '
'call "%s" %s' % (vcvars_path, vcvars_arch)
]
else:
vcvars_path = os.path.join(vs_path, "VC/vcvarsall.bat")
command = ['call "%s" %s' % (vcvars_path, vcvars_arch)]
if int(compiler_version) >= 14:
if winsdk_version:
command.append(winsdk_version)
if vcvars_ver:
command.append("-vcvars_ver=%s" % vcvars_ver)
if os_setting == "WindowsStore":
os_version_setting = settings.get_safe("os.version")
if os_version_setting == "8.1":
command.append("store 8.1")
elif os_version_setting == "10.0":
windows_10_sdk = find_windows_10_sdk()
if not windows_10_sdk:
raise ConanException(
"cross-compiling for WindowsStore 10 (UWP), "
"but Windows 10 SDK wasn't found"
)
command.append("store %s" % windows_10_sdk)
else:
raise ConanException(
"unsupported Windows Store version %s" % os_version_setting
)
return " ".join(command)
|
https://github.com/conan-io/conan/issues/6674
|
Found Visual Studio 15
Profile created with detected settings: J:\j\conan_build_cache\.conan\profiles\default
Traceback (most recent call last):
File "build.py", line 7, in <module>
builder.run()
File "J:\j\workspace\vendor-someSDK-pipeline\VendorSomeSDK\VendorSomeSDK_venv\lib\site-packages\cpt\packager.py", line 541, in run
self.run_builds(base_profile_name=base_profile_name)
File "J:\j\workspace\vendor-someSDK-pipeline\VendorSomeSDK\VendorSomeSDK_venv\lib\site-packages\cpt\packager.py", line 633, in run_builds
r.run()
File "J:\j\workspace\vendor-someSDK-pipeline\VendorSomeSDK\VendorSomeSDK_venv\lib\site-packages\cpt\runner.py", line 85, in run
with context:
File "C:\python3\lib\contextlib.py", line 112, in __enter__
return next(self.gen)
File "J:\j\workspace\vendor-someSDK-pipeline\VendorSomeSDK\VendorSomeSDK_venv\lib\site-packages\conans\tools.py", line 200, in vcvars
with tools_win.vcvars(output=_global_output, *args, **kwargs):
File "C:\python3\lib\contextlib.py", line 112, in __enter__
return next(self.gen)
File "J:\j\workspace\vendor-someSDK-pipeline\VendorSomeSDK\VendorSomeSDK_venv\lib\site-packages\conans\client\tools\win.py", line 495, in vcvars
new_env = vcvars_dict(*args, **kwargs)
File "J:\j\workspace\vendor-someSDK-pipeline\VendorSomeSDK\VendorSomeSDK_venv\lib\site-packages\conans\client\tools\win.py", line 444, in vcvars_dict
vcvars_ver=vcvars_ver, winsdk_version=winsdk_version, output=output)
File "J:\j\workspace\vendor-someSDK-pipeline\VendorSomeSDK\VendorSomeSDK_venv\lib\site-packages\conans\client\tools\win.py", line 389, in vcvars_command
if not vcvars_arch:
UnboundLocalError: local variable 'vcvars_arch' referenced before assignment
|
UnboundLocalError
|
def _upload_package(
self,
pref,
retry=None,
retry_wait=None,
integrity_check=False,
policy=None,
p_remote=None,
):
assert pref.revision is not None, "Cannot upload a package without PREV"
assert pref.ref.revision is not None, "Cannot upload a package without RREV"
pkg_layout = self._cache.package_layout(pref.ref)
conanfile_path = pkg_layout.conanfile()
self._hook_manager.execute(
"pre_upload_package",
conanfile_path=conanfile_path,
reference=pref.ref,
package_id=pref.id,
remote=p_remote,
)
t1 = time.time()
the_files = self._compress_package_files(pref, integrity_check)
if policy == UPLOAD_POLICY_SKIP:
return None
files_to_upload, deleted = self._package_files_to_upload(
pref, policy, the_files, p_remote
)
if files_to_upload or deleted:
self._remote_manager.upload_package(
pref, files_to_upload, deleted, p_remote, retry, retry_wait
)
logger.debug("UPLOAD: Time upload package: %f" % (time.time() - t1))
else:
self._output.info("Package is up to date, upload skipped")
duration = time.time() - t1
log_package_upload(pref, duration, the_files, p_remote)
self._hook_manager.execute(
"post_upload_package",
conanfile_path=conanfile_path,
reference=pref.ref,
package_id=pref.id,
remote=p_remote,
)
logger.debug("UPLOAD: Time uploader upload_package: %f" % (time.time() - t1))
# Update the package metadata
checksums = calc_files_checksum(the_files)
with pkg_layout.update_metadata() as metadata:
cur_package_remote = metadata.packages[pref.id].remote
if not cur_package_remote:
metadata.packages[pref.id].remote = p_remote.name
metadata.packages[pref.id].checksums = checksums
return pref
|
def _upload_package(
self,
pref,
retry=None,
retry_wait=None,
integrity_check=False,
policy=None,
p_remote=None,
):
assert pref.revision is not None, "Cannot upload a package without PREV"
assert pref.ref.revision is not None, "Cannot upload a package without RREV"
conanfile_path = self._cache.package_layout(pref.ref).conanfile()
self._hook_manager.execute(
"pre_upload_package",
conanfile_path=conanfile_path,
reference=pref.ref,
package_id=pref.id,
remote=p_remote,
)
t1 = time.time()
the_files = self._compress_package_files(pref, integrity_check)
with self._cache.package_layout(pref.ref).update_metadata() as metadata:
metadata.packages[pref.id].checksums = calc_files_checksum(the_files)
if policy == UPLOAD_POLICY_SKIP:
return None
files_to_upload, deleted = self._package_files_to_upload(
pref, policy, the_files, p_remote
)
if files_to_upload or deleted:
self._remote_manager.upload_package(
pref, files_to_upload, deleted, p_remote, retry, retry_wait
)
logger.debug("UPLOAD: Time upload package: %f" % (time.time() - t1))
else:
self._output.info("Package is up to date, upload skipped")
duration = time.time() - t1
log_package_upload(pref, duration, the_files, p_remote)
self._hook_manager.execute(
"post_upload_package",
conanfile_path=conanfile_path,
reference=pref.ref,
package_id=pref.id,
remote=p_remote,
)
logger.debug("UPLOAD: Time uploader upload_package: %f" % (time.time() - t1))
metadata = self._cache.package_layout(pref.ref).load_metadata()
cur_package_remote = metadata.packages[pref.id].remote
if not cur_package_remote and policy != UPLOAD_POLICY_SKIP:
with self._cache.package_layout(pref.ref).update_metadata() as metadata:
metadata.packages[pref.id].remote = p_remote.name
return pref
|
https://github.com/conan-io/conan/issues/6521
|
[b] $ cmd.exe /C "conan upload "*" --confirm --all --no-overwrite --parallel && exit %%ERRORLEVEL%%"
ERROR: Extra data: line 1 column 4508 (char 4507)
Traceback (most recent call last):
File "e:\j\e1\b\venv\lib\site-packages\conans\client\command.py", line 1969, in run
method(args[0][1:])
File "e:\j\e1\b\venv\lib\site-packages\conans\client\command.py", line 1410, in upload
parallel_upload=args.parallel)
File "e:\j\e1\b\venv\lib\site-packages\conans\client\conan_api.py", line 81, in wrapper
return f(api, *args, **kwargs)
File "e:\j\e1\b\venv\lib\site-packages\conans\client\conan_api.py", line 887, in upload
parallel_upload=parallel_upload)
File "e:\j\e1\b\venv\lib\site-packages\conans\client\cmd\uploader.py", line 87, in upload
query, package_id)
File "e:\j\e1\b\venv\lib\site-packages\conans\client\cmd\uploader.py", line 145, in _collect_packages_to_upload
metadata = self._cache.package_layout(ref).load_metadata()
File "e:\j\e1\b\venv\lib\site-packages\conans\paths\package_layouts\package_cache_layout.py", line 171, in load_metadata
return PackageMetadata.loads(text)
File "e:\j\e1\b\venv\lib\site-packages\conans\model\package_metadata.py", line 95, in loads
data = json.loads(content)
File "C:\Program Files\Python37\Lib\json\__init__.py", line 348, in loads
return _default_decoder.decode(s)
File "C:\Program Files\Python37\Lib\json\decoder.py", line 340, in decode
raise JSONDecodeError("Extra data", s, end)
json.decoder.JSONDecodeError: Extra data: line 1 column 4508 (char 4507)
Conan build failed
|
json.decoder.JSONDecodeError
|
def update_metadata(self):
metadata_path = self.package_metadata()
lockfile = metadata_path + ".lock"
with fasteners.InterProcessLock(lockfile, logger=logger):
lock_name = self.package_metadata() # The path is the thing that defines mutex
thread_lock = PackageCacheLayout._metadata_locks.setdefault(
lock_name, threading.Lock()
)
thread_lock.acquire()
try:
try:
metadata = self.load_metadata()
except RecipeNotFoundException:
metadata = PackageMetadata()
yield metadata
save(metadata_path, metadata.dumps())
finally:
thread_lock.release()
|
def update_metadata(self):
lockfile = self.package_metadata() + ".lock"
with fasteners.InterProcessLock(lockfile, logger=logger):
try:
metadata = self.load_metadata()
except RecipeNotFoundException:
metadata = PackageMetadata()
yield metadata
save(self.package_metadata(), metadata.dumps())
|
https://github.com/conan-io/conan/issues/6521
|
[b] $ cmd.exe /C "conan upload "*" --confirm --all --no-overwrite --parallel && exit %%ERRORLEVEL%%"
ERROR: Extra data: line 1 column 4508 (char 4507)
Traceback (most recent call last):
File "e:\j\e1\b\venv\lib\site-packages\conans\client\command.py", line 1969, in run
method(args[0][1:])
File "e:\j\e1\b\venv\lib\site-packages\conans\client\command.py", line 1410, in upload
parallel_upload=args.parallel)
File "e:\j\e1\b\venv\lib\site-packages\conans\client\conan_api.py", line 81, in wrapper
return f(api, *args, **kwargs)
File "e:\j\e1\b\venv\lib\site-packages\conans\client\conan_api.py", line 887, in upload
parallel_upload=parallel_upload)
File "e:\j\e1\b\venv\lib\site-packages\conans\client\cmd\uploader.py", line 87, in upload
query, package_id)
File "e:\j\e1\b\venv\lib\site-packages\conans\client\cmd\uploader.py", line 145, in _collect_packages_to_upload
metadata = self._cache.package_layout(ref).load_metadata()
File "e:\j\e1\b\venv\lib\site-packages\conans\paths\package_layouts\package_cache_layout.py", line 171, in load_metadata
return PackageMetadata.loads(text)
File "e:\j\e1\b\venv\lib\site-packages\conans\model\package_metadata.py", line 95, in loads
data = json.loads(content)
File "C:\Program Files\Python37\Lib\json\__init__.py", line 348, in loads
return _default_decoder.decode(s)
File "C:\Program Files\Python37\Lib\json\decoder.py", line 340, in decode
raise JSONDecodeError("Extra data", s, end)
json.decoder.JSONDecodeError: Extra data: line 1 column 4508 (char 4507)
Conan build failed
|
json.decoder.JSONDecodeError
|
def api_method(f):
def wrapper(api, *args, **kwargs):
quiet = kwargs.pop("quiet", False)
old_curdir = get_cwd()
old_output = api.user_io.out
quiet_output = ConanOutput(StringIO(), color=api.color) if quiet else None
try:
api.create_app(quiet_output=quiet_output)
log_command(f.__name__, kwargs)
with environment_append(api.app.cache.config.env_vars):
return f(api, *args, **kwargs)
except Exception as exc:
if quiet_output:
old_output.write(quiet_output._stream.getvalue())
old_output.flush()
msg = exception_message_safe(exc)
try:
log_exception(exc, msg)
except BaseException:
pass
raise
finally:
os.chdir(old_curdir)
return wrapper
|
def api_method(f):
def wrapper(api, *args, **kwargs):
quiet = kwargs.pop("quiet", False)
old_curdir = get_cwd()
old_output = api.user_io.out
quiet_output = ConanOutput(StringIO(), api.color) if quiet else None
try:
api.create_app(quiet_output=quiet_output)
log_command(f.__name__, kwargs)
with environment_append(api.app.cache.config.env_vars):
return f(api, *args, **kwargs)
except Exception as exc:
if quiet_output:
old_output.write(quiet_output._stream.getvalue())
old_output.flush()
msg = exception_message_safe(exc)
try:
log_exception(exc, msg)
except BaseException:
pass
raise
finally:
os.chdir(old_curdir)
return wrapper
|
https://github.com/conan-io/conan/issues/6558
|
$ rm -rf .conan\remotes.json
$ conan inspect zlib/1.2.11@ --raw=version
Traceback (most recent call last):
File "c:\users\memsharded\envs\conanpip37\lib\site-packages\conans\client\command.py", line 1969, in run
method(args[0][1:])
File "c:\users\memsharded\envs\conanpip37\lib\site-packages\conans\client\command.py", line 255, in inspect
result = self._conan.inspect(args.path_or_reference, attributes, args.remote, quiet=quiet)
File "c:\users\memsharded\envs\conanpip37\lib\site-packages\conans\client\conan_api.py", line 81, in wrapper
return f(api, *args, **kwargs)
File "c:\users\memsharded\envs\conanpip37\lib\site-packages\conans\client\conan_api.py", line 260, in inspect
remotes = self.app.load_remotes(remote_name=remote_name)
File "c:\users\memsharded\envs\conanpip37\lib\site-packages\conans\client\conan_api.py", line 198, in load_remotes
remotes = self.cache.registry.load_remotes()
File "c:\users\memsharded\envs\conanpip37\lib\site-packages\conans\client\cache\remote_registry.py", line 313, in load_remotes
"creating default one in %s" % self._filename)
File "c:\users\memsharded\envs\conanpip37\lib\site-packages\conans\client\output.py", line 121, in warn
self.writeln("WARN: {}".format(data), Color.BRIGHT_YELLOW, error=True)
File "c:\users\memsharded\envs\conanpip37\lib\site-packages\conans\client\output.py", line 74, in writeln
self.write(data, front, back, newline=True, error=error)
File "c:\users\memsharded\envs\conanpip37\lib\site-packages\conans\client\output.py", line 99, in write
self._write_err(data, newline)
File "c:\users\memsharded\envs\conanpip37\lib\site-packages\conans\client\output.py", line 84, in _write_err
self._stream_err.write(data)
AttributeError: 'bool' object has no attribute 'write'
ERROR: 'bool' object has no attribute 'write'
|
AttributeError
|
def wrapper(api, *args, **kwargs):
quiet = kwargs.pop("quiet", False)
old_curdir = get_cwd()
old_output = api.user_io.out
quiet_output = ConanOutput(StringIO(), color=api.color) if quiet else None
try:
api.create_app(quiet_output=quiet_output)
log_command(f.__name__, kwargs)
with environment_append(api.app.cache.config.env_vars):
return f(api, *args, **kwargs)
except Exception as exc:
if quiet_output:
old_output.write(quiet_output._stream.getvalue())
old_output.flush()
msg = exception_message_safe(exc)
try:
log_exception(exc, msg)
except BaseException:
pass
raise
finally:
os.chdir(old_curdir)
|
def wrapper(api, *args, **kwargs):
quiet = kwargs.pop("quiet", False)
old_curdir = get_cwd()
old_output = api.user_io.out
quiet_output = ConanOutput(StringIO(), api.color) if quiet else None
try:
api.create_app(quiet_output=quiet_output)
log_command(f.__name__, kwargs)
with environment_append(api.app.cache.config.env_vars):
return f(api, *args, **kwargs)
except Exception as exc:
if quiet_output:
old_output.write(quiet_output._stream.getvalue())
old_output.flush()
msg = exception_message_safe(exc)
try:
log_exception(exc, msg)
except BaseException:
pass
raise
finally:
os.chdir(old_curdir)
|
https://github.com/conan-io/conan/issues/6558
|
$ rm -rf .conan\remotes.json
$ conan inspect zlib/1.2.11@ --raw=version
Traceback (most recent call last):
File "c:\users\memsharded\envs\conanpip37\lib\site-packages\conans\client\command.py", line 1969, in run
method(args[0][1:])
File "c:\users\memsharded\envs\conanpip37\lib\site-packages\conans\client\command.py", line 255, in inspect
result = self._conan.inspect(args.path_or_reference, attributes, args.remote, quiet=quiet)
File "c:\users\memsharded\envs\conanpip37\lib\site-packages\conans\client\conan_api.py", line 81, in wrapper
return f(api, *args, **kwargs)
File "c:\users\memsharded\envs\conanpip37\lib\site-packages\conans\client\conan_api.py", line 260, in inspect
remotes = self.app.load_remotes(remote_name=remote_name)
File "c:\users\memsharded\envs\conanpip37\lib\site-packages\conans\client\conan_api.py", line 198, in load_remotes
remotes = self.cache.registry.load_remotes()
File "c:\users\memsharded\envs\conanpip37\lib\site-packages\conans\client\cache\remote_registry.py", line 313, in load_remotes
"creating default one in %s" % self._filename)
File "c:\users\memsharded\envs\conanpip37\lib\site-packages\conans\client\output.py", line 121, in warn
self.writeln("WARN: {}".format(data), Color.BRIGHT_YELLOW, error=True)
File "c:\users\memsharded\envs\conanpip37\lib\site-packages\conans\client\output.py", line 74, in writeln
self.write(data, front, back, newline=True, error=error)
File "c:\users\memsharded\envs\conanpip37\lib\site-packages\conans\client\output.py", line 99, in write
self._write_err(data, newline)
File "c:\users\memsharded\envs\conanpip37\lib\site-packages\conans\client\output.py", line 84, in _write_err
self._stream_err.write(data)
AttributeError: 'bool' object has no attribute 'write'
ERROR: 'bool' object has no attribute 'write'
|
AttributeError
|
def update_check_graph(self, deps_graph, output):
"""update the lockfile, checking for security that only nodes that are being built
from sources can change their PREF, or nodes that depend on some other "modified"
package, because their binary-id can change too
"""
affected = self._closure_affected()
for node in deps_graph.nodes:
if node.recipe == RECIPE_VIRTUAL:
continue
try:
lock_node = self._nodes[node.id]
except KeyError:
if node.recipe == RECIPE_CONSUMER:
continue # If the consumer node is not found, could be a test_package
raise
if lock_node.pref:
pref = (
lock_node.pref
if self.revisions_enabled
else lock_node.pref.copy_clear_revs()
)
node_pref = (
node.pref if self.revisions_enabled else node.pref.copy_clear_revs()
)
# If the update is compatible (resolved complete PREV) or if the node has
# been build, then update the graph
if (
pref.id == PACKAGE_ID_UNKNOWN
or pref.is_compatible_with(node_pref)
or node.binary == BINARY_BUILD
or node.id in affected
or node.recipe == RECIPE_CONSUMER
):
lock_node.pref = node.pref
else:
raise ConanException(
"Mismatch between lock and graph:\nLock: %s\nGraph: %s"
% (repr(pref), repr(node.pref))
)
|
def update_check_graph(self, deps_graph, output):
"""update the lockfile, checking for security that only nodes that are being built
from sources can change their PREF, or nodes that depend on some other "modified"
package, because their binary-id can change too
"""
affected = self._closure_affected()
for node in deps_graph.nodes:
if node.recipe == RECIPE_VIRTUAL:
continue
try:
lock_node = self._nodes[node.id]
except KeyError:
if node.recipe == RECIPE_CONSUMER:
continue # If the consumer node is not found, could be a test_package
raise
if lock_node.pref:
pref = (
lock_node.pref
if self.revisions_enabled
else lock_node.pref.copy_clear_revs()
)
node_pref = (
node.pref if self.revisions_enabled else node.pref.copy_clear_revs()
)
# If the update is compatible (resolved complete PREV) or if the node has
# been build, then update the graph
if (
pref.id == PACKAGE_ID_UNKNOWN
or pref.is_compatible_with(node_pref)
or node.binary == BINARY_BUILD
or node.id in affected
or node.recipe == RECIPE_CONSUMER
):
self._upsert_node(node)
else:
raise ConanException(
"Mismatch between lock and graph:\nLock: %s\nGraph: %s"
% (repr(pref), repr(node.pref))
)
|
https://github.com/conan-io/conan/issues/6484
|
D:\JenkinsCI\workspace\Concepts\CI-PkgA\build>conan graph build-order ./integration_deps --json=build_order.json --build=missing
Using lockfile: 'D:\JenkinsCI\workspace\Concepts\CI-PkgA\build\integration_deps\conan.lock'
Traceback (most recent call last):
File "c:\python37\lib\site-packages\conans\client\command.py", line 1969, in run
method(args[0][1:])
File "c:\python37\lib\site-packages\conans\client\command.py", line 1801, in graph
build_order = self._conan.build_order(args.lockfile, args.build)
File "c:\python37\lib\site-packages\conans\client\conan_api.py", line 81, in wrapper
return f(api, *args, **kwargs)
File "c:\python37\lib\site-packages\conans\client\conan_api.py", line 1216, in build_order
reference = graph_info.graph_lock.root_node_ref()
File "c:\python37\lib\site-packages\conans\model\graph_lock.py", line 185, in root_node_ref
assert len(roots) == 1
AssertionError
ERROR:
|
AssertionError
|
def _execute(command):
proc = Popen(
command,
shell=True,
bufsize=1,
universal_newlines=True,
stdout=PIPE,
stderr=STDOUT,
)
output_buffer = []
while True:
line = proc.stdout.readline()
if not line:
break
# output.write(line)
output_buffer.append(str(line))
proc.communicate()
return proc.returncode, "".join(output_buffer)
|
def _execute(command):
proc = Popen(command, shell=True, bufsize=1, stdout=PIPE, stderr=STDOUT)
output_buffer = []
while True:
line = proc.stdout.readline()
if not line:
break
# output.write(line)
output_buffer.append(str(line))
proc.communicate()
return proc.returncode, "".join(output_buffer)
|
https://github.com/conan-io/conan/issues/6332
|
$ nosetests conans.test.functional.settings
............F............F.F.............
======================================================================
FAIL: test_only_cppstd (conan.conans.test.functional.settings.cppstd.compiler_cppstd_test.UseCompilerCppStdSettingTests)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/siu/src/extern/conan/conans/test/functional/settings/cppstd/compiler_cppstd_test.py", line 140, in test_only_cppstd
self.t.run("info . -s cppstd=14")
File "/usr/lib/python3.8/contextlib.py", line 120, in __exit__
next(self.gen)
File "/home/siu/src/extern/conan/conans/test/utils/deprecation.py", line 13, in catch_deprecation_warning
test_suite.assertEqual(len(w), n)
AssertionError: 2 != 1
======================================================================
FAIL: gcc_8_std_20_test (conan.conans.test.functional.settings.cppstd_test.StdCppTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/siu/src/extern/conan/conans/test/functional/settings/cppstd_test.py", line 47, in gcc_8_std_20_test
client.run('create . user/testing -s compiler="gcc" '
File "/usr/lib/python3.8/contextlib.py", line 120, in __exit__
next(self.gen)
File "/home/siu/src/extern/conan/conans/test/utils/deprecation.py", line 13, in catch_deprecation_warning
test_suite.assertEqual(len(w), n)
AssertionError: 2 != 1
======================================================================
FAIL: use_wrong_setting_for_compiler_test (conan.conans.test.functional.settings.cppstd_test.StdCppTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/siu/src/extern/conan/conans/test/functional/settings/cppstd_test.py", line 23, in use_wrong_setting_for_compiler_test
client.run('create . user/testing -s compiler="gcc" '
File "/usr/lib/python3.8/contextlib.py", line 120, in __exit__
next(self.gen)
File "/home/siu/src/extern/conan/conans/test/utils/deprecation.py", line 13, in catch_deprecation_warning
test_suite.assertEqual(len(w), n)
AssertionError: 2 != 1
----------------------------------------------------------------------
Ran 41 tests in 4.690s
FAILED (failures=3)
|
AssertionError
|
def checkout(self, element, submodule=None):
# Element can be a tag, branch or commit
self.check_repo()
output = self.run('checkout "%s"' % element)
output += self.checkout_submodules(submodule)
return output
|
def checkout(self, element, submodule=None):
self.check_repo()
output = self.run('checkout "%s"' % element)
if submodule:
if submodule == "shallow":
output += self.run("submodule sync")
output += self.run("submodule update --init")
elif submodule == "recursive":
output += self.run("submodule sync --recursive")
output += self.run("submodule update --init --recursive")
else:
raise ConanException(
"Invalid 'submodule' attribute value in the 'scm'. "
"Unknown value '%s'. Allowed values: ['shallow', 'recursive']"
% submodule
)
# Element can be a tag, branch or commit
return output
|
https://github.com/conan-io/conan/issues/5587
|
Traceback (most recent call last):
File "/opt/pyenv/versions/3.7.1/lib/python3.7/site-packages/conans/client/source.py", line 182, in _run_source
_run_scm(conanfile, src_folder, local_sources_path, output, cache=cache)
File "/opt/pyenv/versions/3.7.1/lib/python3.7/site-packages/conans/client/source.py", line 243, in _run_scm
scm.checkout()
File "/opt/pyenv/versions/3.7.1/lib/python3.7/site-packages/conans/model/scm.py", line 99, in checkout
submodule=self._data.submodule)
File "/opt/pyenv/versions/3.7.1/lib/python3.7/site-packages/conans/client/tools/scm.py", line 149, in checkout
output = self.run('checkout "%s"' % element)
File "/opt/pyenv/versions/3.7.1/lib/python3.7/site-packages/conans/client/tools/scm.py", line 100, in run
return super(Git, self).run(command)
File "/opt/pyenv/versions/3.7.1/lib/python3.7/site-packages/conans/client/tools/scm.py", line 66, in run
return check_output(command).strip()
File "/opt/pyenv/versions/3.7.1/lib/python3.7/site-packages/conans/client/tools/oss.py", line 507, in check_output
raise CalledProcessErrorWithStderr(process.returncode, cmd, output=stderr)
conans.errors.CalledProcessErrorWithStderr: Command 'git -c http.sslVerify=false checkout "0.5.1"' returned non-zero exit status 1.
error: pathspec '0.5.1' did not match any file(s) known to git.
|
conans.errors.CalledProcessErrorWithStderr
|
def checkout(self):
output = ""
if self._data.type == "git":
try:
output += self.repo.clone(
url=self._data.url, branch=self._data.revision, shallow=True
)
except subprocess.CalledProcessError:
# remove the .git directory, otherwise, fallback clone cannot be successful
# it's completely safe to do here, as clone without branch expects empty directory
rmdir(os.path.join(self.repo_folder, ".git"))
output += self.repo.clone(url=self._data.url, shallow=False)
output += self.repo.checkout(
element=self._data.revision, submodule=self._data.submodule
)
else:
output += self.repo.checkout_submodules(submodule=self._data.submodule)
else:
output += self.repo.checkout(url=self._data.url, revision=self._data.revision)
return output
|
def checkout(self):
output = ""
if self._data.type == "git":
try:
output += self.repo.clone(
url=self._data.url, branch=self._data.revision, shallow=True
)
except subprocess.CalledProcessError:
# remove the .git directory, otherwise, fallback clone cannot be successful
# it's completely safe to do here, as clone without branch expects empty directory
rmdir(os.path.join(self.repo_folder, ".git"))
output += self.repo.clone(url=self._data.url, shallow=False)
output += self.repo.checkout(
element=self._data.revision, submodule=self._data.submodule
)
else:
output += self.repo.checkout(url=self._data.url, revision=self._data.revision)
return output
|
https://github.com/conan-io/conan/issues/5587
|
Traceback (most recent call last):
File "/opt/pyenv/versions/3.7.1/lib/python3.7/site-packages/conans/client/source.py", line 182, in _run_source
_run_scm(conanfile, src_folder, local_sources_path, output, cache=cache)
File "/opt/pyenv/versions/3.7.1/lib/python3.7/site-packages/conans/client/source.py", line 243, in _run_scm
scm.checkout()
File "/opt/pyenv/versions/3.7.1/lib/python3.7/site-packages/conans/model/scm.py", line 99, in checkout
submodule=self._data.submodule)
File "/opt/pyenv/versions/3.7.1/lib/python3.7/site-packages/conans/client/tools/scm.py", line 149, in checkout
output = self.run('checkout "%s"' % element)
File "/opt/pyenv/versions/3.7.1/lib/python3.7/site-packages/conans/client/tools/scm.py", line 100, in run
return super(Git, self).run(command)
File "/opt/pyenv/versions/3.7.1/lib/python3.7/site-packages/conans/client/tools/scm.py", line 66, in run
return check_output(command).strip()
File "/opt/pyenv/versions/3.7.1/lib/python3.7/site-packages/conans/client/tools/oss.py", line 507, in check_output
raise CalledProcessErrorWithStderr(process.returncode, cmd, output=stderr)
conans.errors.CalledProcessErrorWithStderr: Command 'git -c http.sslVerify=false checkout "0.5.1"' returned non-zero exit status 1.
error: pathspec '0.5.1' did not match any file(s) known to git.
|
conans.errors.CalledProcessErrorWithStderr
|
def add(self, remote_name, url, verify_ssl=True, insert=None, force=None):
self._validate_url(url)
remotes = self.load_remotes()
renamed = remotes.add(remote_name, url, verify_ssl, insert, force)
remotes.save(self._filename)
if renamed:
with self._cache.editable_packages.disable_editables():
for ref in self._cache.all_refs():
with self._cache.package_layout(ref).update_metadata() as metadata:
if metadata.recipe.remote == renamed:
metadata.recipe.remote = remote_name
for pkg_metadata in metadata.packages.values():
if pkg_metadata.remote == renamed:
pkg_metadata.remote = remote_name
|
def add(self, remote_name, url, verify_ssl=True, insert=None, force=None):
self._validate_url(url)
remotes = self.load_remotes()
renamed = remotes.add(remote_name, url, verify_ssl, insert, force)
remotes.save(self._filename)
if renamed:
for ref in self._cache.all_refs():
with self._cache.package_layout(ref).update_metadata() as metadata:
if metadata.recipe.remote == renamed:
metadata.recipe.remote = remote_name
for pkg_metadata in metadata.packages.values():
if pkg_metadata.remote == renamed:
pkg_metadata.remote = remote_name
|
https://github.com/conan-io/conan/issues/5424
|
Traceback (most recent call last):
File "/home/luism/workspace/conan_sources/conans/client/command.py", line 1832, in run
method(args[0][1:])
File "/home/luism/workspace/conan_sources/conans/client/command.py", line 1423, in remote
return self._conan.remote_remove(remote_name)
File "/home/luism/workspace/conan_sources/conans/client/conan_api.py", line 77, in wrapper
return f(*args, **kwargs)
File "/home/luism/workspace/conan_sources/conans/client/conan_api.py", line 922, in remote_remove
return self._cache.registry.remove(remote_name)
File "/home/luism/workspace/conan_sources/conans/client/cache/remote_registry.py", line 301, in remove
with self._cache.package_layout(ref).update_metadata() as metadata:
AttributeError: 'PackageEditableLayout' object has no attribute 'update_metadata'
|
AttributeError
|
def clear(self):
remotes = self.load_remotes()
remotes.clear()
with self._cache.editable_packages.disable_editables():
for ref in self._cache.all_refs():
with self._cache.package_layout(ref).update_metadata() as metadata:
metadata.recipe.remote = None
for pkg_metadata in metadata.packages.values():
pkg_metadata.remote = None
remotes.save(self._filename)
|
def clear(self):
remotes = self.load_remotes()
remotes.clear()
for ref in self._cache.all_refs():
with self._cache.package_layout(ref).update_metadata() as metadata:
metadata.recipe.remote = None
for pkg_metadata in metadata.packages.values():
pkg_metadata.remote = None
remotes.save(self._filename)
|
https://github.com/conan-io/conan/issues/5424
|
Traceback (most recent call last):
File "/home/luism/workspace/conan_sources/conans/client/command.py", line 1832, in run
method(args[0][1:])
File "/home/luism/workspace/conan_sources/conans/client/command.py", line 1423, in remote
return self._conan.remote_remove(remote_name)
File "/home/luism/workspace/conan_sources/conans/client/conan_api.py", line 77, in wrapper
return f(*args, **kwargs)
File "/home/luism/workspace/conan_sources/conans/client/conan_api.py", line 922, in remote_remove
return self._cache.registry.remove(remote_name)
File "/home/luism/workspace/conan_sources/conans/client/cache/remote_registry.py", line 301, in remove
with self._cache.package_layout(ref).update_metadata() as metadata:
AttributeError: 'PackageEditableLayout' object has no attribute 'update_metadata'
|
AttributeError
|
def remove(self, remote_name):
remotes = self.load_remotes()
del remotes[remote_name]
with self._cache.editable_packages.disable_editables():
for ref in self._cache.all_refs():
with self._cache.package_layout(ref).update_metadata() as metadata:
if metadata.recipe.remote == remote_name:
metadata.recipe.remote = None
for pkg_metadata in metadata.packages.values():
if pkg_metadata.remote == remote_name:
pkg_metadata.remote = None
remotes.save(self._filename)
|
def remove(self, remote_name):
remotes = self.load_remotes()
del remotes[remote_name]
for ref in self._cache.all_refs():
with self._cache.package_layout(ref).update_metadata() as metadata:
if metadata.recipe.remote == remote_name:
metadata.recipe.remote = None
for pkg_metadata in metadata.packages.values():
if pkg_metadata.remote == remote_name:
pkg_metadata.remote = None
remotes.save(self._filename)
|
https://github.com/conan-io/conan/issues/5424
|
Traceback (most recent call last):
File "/home/luism/workspace/conan_sources/conans/client/command.py", line 1832, in run
method(args[0][1:])
File "/home/luism/workspace/conan_sources/conans/client/command.py", line 1423, in remote
return self._conan.remote_remove(remote_name)
File "/home/luism/workspace/conan_sources/conans/client/conan_api.py", line 77, in wrapper
return f(*args, **kwargs)
File "/home/luism/workspace/conan_sources/conans/client/conan_api.py", line 922, in remote_remove
return self._cache.registry.remove(remote_name)
File "/home/luism/workspace/conan_sources/conans/client/cache/remote_registry.py", line 301, in remove
with self._cache.package_layout(ref).update_metadata() as metadata:
AttributeError: 'PackageEditableLayout' object has no attribute 'update_metadata'
|
AttributeError
|
def define(self, remotes):
# For definition from conan config install
with self._cache.editable_packages.disable_editables():
for ref in self._cache.all_refs():
with self._cache.package_layout(ref).update_metadata() as metadata:
if metadata.recipe.remote not in remotes:
metadata.recipe.remote = None
for pkg_metadata in metadata.packages.values():
if pkg_metadata.remote not in remotes:
pkg_metadata.remote = None
remotes.save(self._filename)
|
def define(self, remotes):
# For definition from conan config install
for ref in self._cache.all_refs():
with self._cache.package_layout(ref).update_metadata() as metadata:
if metadata.recipe.remote not in remotes:
metadata.recipe.remote = None
for pkg_metadata in metadata.packages.values():
if pkg_metadata.remote not in remotes:
pkg_metadata.remote = None
remotes.save(self._filename)
|
https://github.com/conan-io/conan/issues/5424
|
Traceback (most recent call last):
File "/home/luism/workspace/conan_sources/conans/client/command.py", line 1832, in run
method(args[0][1:])
File "/home/luism/workspace/conan_sources/conans/client/command.py", line 1423, in remote
return self._conan.remote_remove(remote_name)
File "/home/luism/workspace/conan_sources/conans/client/conan_api.py", line 77, in wrapper
return f(*args, **kwargs)
File "/home/luism/workspace/conan_sources/conans/client/conan_api.py", line 922, in remote_remove
return self._cache.registry.remove(remote_name)
File "/home/luism/workspace/conan_sources/conans/client/cache/remote_registry.py", line 301, in remove
with self._cache.package_layout(ref).update_metadata() as metadata:
AttributeError: 'PackageEditableLayout' object has no attribute 'update_metadata'
|
AttributeError
|
def rename(self, remote_name, new_remote_name):
remotes = self.load_remotes()
remotes.rename(remote_name, new_remote_name)
with self._cache.editable_packages.disable_editables():
for ref in self._cache.all_refs():
with self._cache.package_layout(ref).update_metadata() as metadata:
if metadata.recipe.remote == remote_name:
metadata.recipe.remote = new_remote_name
for pkg_metadata in metadata.packages.values():
if pkg_metadata.remote == remote_name:
pkg_metadata.remote = new_remote_name
remotes.save(self._filename)
|
def rename(self, remote_name, new_remote_name):
remotes = self.load_remotes()
remotes.rename(remote_name, new_remote_name)
for ref in self._cache.all_refs():
with self._cache.package_layout(ref).update_metadata() as metadata:
if metadata.recipe.remote == remote_name:
metadata.recipe.remote = new_remote_name
for pkg_metadata in metadata.packages.values():
if pkg_metadata.remote == remote_name:
pkg_metadata.remote = new_remote_name
remotes.save(self._filename)
|
https://github.com/conan-io/conan/issues/5424
|
Traceback (most recent call last):
File "/home/luism/workspace/conan_sources/conans/client/command.py", line 1832, in run
method(args[0][1:])
File "/home/luism/workspace/conan_sources/conans/client/command.py", line 1423, in remote
return self._conan.remote_remove(remote_name)
File "/home/luism/workspace/conan_sources/conans/client/conan_api.py", line 77, in wrapper
return f(*args, **kwargs)
File "/home/luism/workspace/conan_sources/conans/client/conan_api.py", line 922, in remote_remove
return self._cache.registry.remove(remote_name)
File "/home/luism/workspace/conan_sources/conans/client/cache/remote_registry.py", line 301, in remove
with self._cache.package_layout(ref).update_metadata() as metadata:
AttributeError: 'PackageEditableLayout' object has no attribute 'update_metadata'
|
AttributeError
|
def _process_folder(config, folder, cache, output):
if config.source_folder:
folder = os.path.join(folder, config.source_folder)
for root, dirs, files in walk(folder):
dirs[:] = [d for d in dirs if d != ".git"]
if ".git" in root:
continue
for f in files:
if f == "settings.yml":
output.info("Installing settings.yml")
settings_path = cache.settings_path
shutil.copy(os.path.join(root, f), settings_path)
elif f == "conan.conf":
output.info("Processing conan.conf")
_handle_conan_conf(cache.config, os.path.join(root, f))
elif f == "remotes.txt":
output.info("Defining remotes from remotes.txt")
_handle_remotes(cache, os.path.join(root, f))
elif f in ("registry.txt", "registry.json"):
try:
os.remove(cache.registry_path)
except OSError:
pass
finally:
shutil.copy(os.path.join(root, f), cache.cache_folder)
migrate_registry_file(cache, output)
elif f == "remotes.json":
# Fix for Conan 2.0
raise ConanException(
"remotes.json install is not supported yet. Use 'remotes.txt'"
)
else:
# This is ugly, should be removed in Conan 2.0
if root == folder and f in ("README.md", "LICENSE.txt"):
output.info("Skip %s" % f)
continue
relpath = os.path.relpath(root, folder)
if config.target_folder:
target_folder = os.path.join(
cache.cache_folder, config.target_folder, relpath
)
else:
target_folder = os.path.join(cache.cache_folder, relpath)
mkdir(target_folder)
output.info("Copying file %s to %s" % (f, target_folder))
shutil.copy(os.path.join(root, f), target_folder)
|
def _process_folder(config, folder, cache, output):
if config.source_folder:
folder = os.path.join(folder, config.source_folder)
for root, dirs, files in walk(folder):
dirs[:] = [d for d in dirs if d != ".git"]
if ".git" in root:
continue
for f in files:
if f == "settings.yml":
output.info("Installing settings.yml")
settings_path = cache.settings_path
shutil.copy(os.path.join(root, f), settings_path)
elif f == "conan.conf":
output.info("Processing conan.conf")
_handle_conan_conf(cache.config, os.path.join(root, f))
elif f == "remotes.txt":
output.info("Defining remotes from remotes.txt")
_handle_remotes(cache, os.path.join(root, f))
elif f in ("registry.txt", "registry.json"):
os.remove(cache.registry_path)
shutil.copy(os.path.join(root, f), cache.cache_folder)
migrate_registry_file(cache, output)
elif f == "remotes.json":
# Fix for Conan 2.0
raise ConanException(
"remotes.json install is not supported yet. Use 'remotes.txt'"
)
else:
# This is ugly, should be removed in Conan 2.0
if root == folder and f in ("README.md", "LICENSE.txt"):
output.info("Skip %s" % f)
continue
relpath = os.path.relpath(root, folder)
if config.target_folder:
target_folder = os.path.join(
cache.cache_folder, config.target_folder, relpath
)
else:
target_folder = os.path.join(cache.cache_folder, relpath)
mkdir(target_folder)
output.info("Copying file %s to %s" % (f, target_folder))
shutil.copy(os.path.join(root, f), target_folder)
|
https://github.com/conan-io/conan/issues/5214
|
j@ubuntu:~/tempconan$ conan config install ~/work/scripts/conan/config/Copying file version.txt to /home/j/tempconan/.conan/.
Copying file artifacts.properties to /home/j/tempconan/.conan/.
Processing conan.conf
Traceback (most recent call last):
File "/home/j/.local/lib/python3.6/site-packages/conans/client/command.py", line 1607, in run
method(args[0][1:])
File "/home/j/.local/lib/python3.6/site-packages/conans/client/command.py", line 478, in config
target_folder=args.target_folder)
File "/home/j/.local/lib/python3.6/site-packages/conans/client/conan_api.py", line 92, in wrapper
return f(*args, **kwargs)
File "/home/j/.local/lib/python3.6/site-packages/conans/client/conan_api.py", line 621, in config_install
source_folder=source_folder, target_folder=target_folder)
File "/home/j/.local/lib/python3.6/site-packages/conans/client/conf/config_installer.py", line 230, in configuration_install
_process_config(config, cache, output, requester)
File "/home/j/.local/lib/python3.6/site-packages/conans/client/conf/config_installer.py", line 182, in _process_config
_process_folder(config, config.uri, cache, output)
File "/home/j/.local/lib/python3.6/site-packages/conans/client/conf/config_installer.py", line 93, in _process_folder
os.remove(cache.registry_path)
FileNotFoundError: [Errno 2] No such file or directory: '/home/j/tempconan/.conan/remotes.json'
ERROR: [Errno 2] No such file or directory: '/home/j/tempconan/.conan/remotes.json'
|
FileNotFoundError
|
def imports(self, path, dest=None, info_folder=None, cwd=None):
"""
:param path: Path to the conanfile
:param dest: Dir to put the imported files. (Abs path or relative to cwd)
:param info_folder: Dir where the conaninfo.txt and conanbuildinfo.txt files are
:param cwd: Current working directory
:return: None
"""
cwd = cwd or get_cwd()
info_folder = _make_abs_path(info_folder, cwd)
dest = _make_abs_path(dest, cwd)
remotes = self._cache.registry.load_remotes()
self.python_requires.enable_remotes(remotes=remotes)
mkdir(dest)
conanfile_abs_path = _get_conanfile_path(path, cwd, py=None)
conanfile = self._graph_manager.load_consumer_conanfile(
conanfile_abs_path, info_folder, deps_info_required=True
)
run_imports(conanfile, dest)
|
def imports(self, path, dest=None, info_folder=None, cwd=None):
"""
:param path: Path to the conanfile
:param dest: Dir to put the imported files. (Abs path or relative to cwd)
:param info_folder: Dir where the conaninfo.txt and conanbuildinfo.txt files are
:param cwd: Current working directory
:return: None
"""
cwd = cwd or get_cwd()
info_folder = _make_abs_path(info_folder, cwd)
dest = _make_abs_path(dest, cwd)
mkdir(dest)
conanfile_abs_path = _get_conanfile_path(path, cwd, py=None)
conanfile = self._graph_manager.load_consumer_conanfile(
conanfile_abs_path, info_folder, deps_info_required=True
)
run_imports(conanfile, dest)
|
https://github.com/conan-io/conan/issues/5140
|
ERROR: Error loading conanfile at '/Users/xxx/Repos/NloSanbox/ProxyLib/conanfile.py': Unable to load conanfile in /Users/xxx/Repos/NloSanbox/ProxyLib/conanfile.py
KeyError: 'nlo-cmake-pythonlib/1.0@xxx/stable'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/conans/client/loader.py", line 269, in _parse_conanfile
loaded = imp.load_source(module_id, conan_file_path)
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/imp.py", line 171, in load_source
module = _load(spec)
File "<frozen importlib._bootstrap>", line 696, in _load
File "<frozen importlib._bootstrap>", line 677, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 728, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/Users/xxx/Repos/NloSanbox/ProxyLib/conanfile.py", line 2, in <module>
nlo = python_requires("nlo-cmake-pythonlib/1.0@xxx/stable")
File "/usr/local/lib/python3.7/site-packages/conans/client/graph/python_requires.py", line 75, in __call__
python_req = self._look_for_require(require)
File "/usr/local/lib/python3.7/site-packages/conans/client/graph/python_requires.py", line 49, in _look_for_require
remotes=self._remotes)
AttributeError: 'ConanPythonRequire' object has no attribute '_remotes'
|
KeyError
|
def copy(self, reference, user_channel, force=False, packages=None):
"""
param packages: None=No binaries, True=All binaries, else list of IDs
"""
from conans.client.cmd.copy import cmd_copy
remotes = self._cache.registry.load_remotes()
self.python_requires.enable_remotes(remotes=remotes)
# FIXME: conan copy does not support short-paths in Windows
ref = ConanFileReference.loads(reference)
cmd_copy(
ref,
user_channel,
packages,
self._cache,
self._user_io,
self._remote_manager,
self._loader,
remotes,
force=force,
)
|
def copy(self, reference, user_channel, force=False, packages=None):
"""
param packages: None=No binaries, True=All binaries, else list of IDs
"""
from conans.client.cmd.copy import cmd_copy
remotes = self._cache.registry.load_remotes()
# FIXME: conan copy does not support short-paths in Windows
ref = ConanFileReference.loads(reference)
cmd_copy(
ref,
user_channel,
packages,
self._cache,
self._user_io,
self._remote_manager,
self._loader,
remotes,
force=force,
)
|
https://github.com/conan-io/conan/issues/5140
|
ERROR: Error loading conanfile at '/Users/xxx/Repos/NloSanbox/ProxyLib/conanfile.py': Unable to load conanfile in /Users/xxx/Repos/NloSanbox/ProxyLib/conanfile.py
KeyError: 'nlo-cmake-pythonlib/1.0@xxx/stable'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/conans/client/loader.py", line 269, in _parse_conanfile
loaded = imp.load_source(module_id, conan_file_path)
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/imp.py", line 171, in load_source
module = _load(spec)
File "<frozen importlib._bootstrap>", line 696, in _load
File "<frozen importlib._bootstrap>", line 677, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 728, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/Users/xxx/Repos/NloSanbox/ProxyLib/conanfile.py", line 2, in <module>
nlo = python_requires("nlo-cmake-pythonlib/1.0@xxx/stable")
File "/usr/local/lib/python3.7/site-packages/conans/client/graph/python_requires.py", line 75, in __call__
python_req = self._look_for_require(require)
File "/usr/local/lib/python3.7/site-packages/conans/client/graph/python_requires.py", line 49, in _look_for_require
remotes=self._remotes)
AttributeError: 'ConanPythonRequire' object has no attribute '_remotes'
|
KeyError
|
def editable_add(self, path, reference, layout, cwd):
remotes = self._cache.registry.load_remotes()
self.python_requires.enable_remotes(remotes=remotes)
# Retrieve conanfile.py from target_path
target_path = _get_conanfile_path(path=path, cwd=cwd, py=True)
# Check the conanfile is there, and name/version matches
ref = ConanFileReference.loads(reference, validate=True)
target_conanfile = self._graph_manager._loader.load_class(target_path)
if (target_conanfile.name and target_conanfile.name != ref.name) or (
target_conanfile.version and target_conanfile.version != ref.version
):
raise ConanException(
"Name and version from reference ({}) and target "
"conanfile.py ({}/{}) must match".format(
ref, target_conanfile.name, target_conanfile.version
)
)
layout_abs_path = get_editable_abs_path(layout, cwd, self._cache.conan_folder)
if layout_abs_path:
self._user_io.out.success("Using layout file: %s" % layout_abs_path)
self._cache.editable_packages.add(
ref, os.path.dirname(target_path), layout_abs_path
)
|
def editable_add(self, path, reference, layout, cwd):
# Retrieve conanfile.py from target_path
target_path = _get_conanfile_path(path=path, cwd=cwd, py=True)
# Check the conanfile is there, and name/version matches
ref = ConanFileReference.loads(reference, validate=True)
target_conanfile = self._graph_manager._loader.load_class(target_path)
if (target_conanfile.name and target_conanfile.name != ref.name) or (
target_conanfile.version and target_conanfile.version != ref.version
):
raise ConanException(
"Name and version from reference ({}) and target "
"conanfile.py ({}/{}) must match".format(
ref, target_conanfile.name, target_conanfile.version
)
)
layout_abs_path = get_editable_abs_path(layout, cwd, self._cache.conan_folder)
if layout_abs_path:
self._user_io.out.success("Using layout file: %s" % layout_abs_path)
self._cache.editable_packages.add(
ref, os.path.dirname(target_path), layout_abs_path
)
|
https://github.com/conan-io/conan/issues/5140
|
ERROR: Error loading conanfile at '/Users/xxx/Repos/NloSanbox/ProxyLib/conanfile.py': Unable to load conanfile in /Users/xxx/Repos/NloSanbox/ProxyLib/conanfile.py
KeyError: 'nlo-cmake-pythonlib/1.0@xxx/stable'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/conans/client/loader.py", line 269, in _parse_conanfile
loaded = imp.load_source(module_id, conan_file_path)
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/imp.py", line 171, in load_source
module = _load(spec)
File "<frozen importlib._bootstrap>", line 696, in _load
File "<frozen importlib._bootstrap>", line 677, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 728, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/Users/xxx/Repos/NloSanbox/ProxyLib/conanfile.py", line 2, in <module>
nlo = python_requires("nlo-cmake-pythonlib/1.0@xxx/stable")
File "/usr/local/lib/python3.7/site-packages/conans/client/graph/python_requires.py", line 75, in __call__
python_req = self._look_for_require(require)
File "/usr/local/lib/python3.7/site-packages/conans/client/graph/python_requires.py", line 49, in _look_for_require
remotes=self._remotes)
AttributeError: 'ConanPythonRequire' object has no attribute '_remotes'
|
KeyError
|
def _upload_recipe(self, ref, conanfile, retry, retry_wait, policy, remote):
current_remote = self._registry.refs.get(ref)
if remote != current_remote:
complete_recipe_sources(self._remote_manager, self._cache, conanfile, ref)
conanfile_path = self._cache.conanfile(ref)
self._hook_manager.execute(
"pre_upload_recipe", conanfile_path=conanfile_path, reference=ref, remote=remote
)
t1 = time.time()
the_files = self._compress_recipe_files(ref)
local_manifest = FileTreeManifest.loads(load(the_files["conanmanifest.txt"]))
remote_manifest = None
if policy != UPLOAD_POLICY_FORCE:
remote_manifest = self._check_recipe_date(ref, remote, local_manifest)
if policy == UPLOAD_POLICY_SKIP:
return ref
files_to_upload, deleted = self._recipe_files_to_upload(
ref, policy, the_files, remote, remote_manifest, local_manifest
)
if files_to_upload or deleted:
self._remote_manager.upload_recipe(
ref, files_to_upload, deleted, remote, retry, retry_wait
)
self._upload_recipe_end_msg(ref, remote)
else:
self._user_io.out.info("Recipe is up to date, upload skipped")
duration = time.time() - t1
log_recipe_upload(ref, duration, the_files, remote.name)
self._hook_manager.execute(
"post_upload_recipe",
conanfile_path=conanfile_path,
reference=ref,
remote=remote,
)
# The recipe wasn't in the registry or it has changed the revision field only
if not current_remote:
self._registry.refs.set(ref, remote.name)
return ref
|
def _upload_recipe(self, ref, conanfile, retry, retry_wait, policy, remote):
if policy != UPLOAD_POLICY_FORCE:
remote_manifest = self._check_recipe_date(ref, remote)
else:
remote_manifest = None
current_remote = self._registry.refs.get(ref)
if remote != current_remote:
complete_recipe_sources(self._remote_manager, self._cache, conanfile, ref)
conanfile_path = self._cache.conanfile(ref)
self._hook_manager.execute(
"pre_upload_recipe", conanfile_path=conanfile_path, reference=ref, remote=remote
)
t1 = time.time()
the_files = self._compress_recipe_files(ref)
if policy == UPLOAD_POLICY_SKIP:
return ref
files_to_upload, deleted = self._recipe_files_to_upload(
ref, policy, the_files, remote, remote_manifest
)
if files_to_upload or deleted:
self._remote_manager.upload_recipe(
ref, files_to_upload, deleted, remote, retry, retry_wait
)
self._upload_recipe_end_msg(ref, remote)
else:
self._user_io.out.info("Recipe is up to date, upload skipped")
duration = time.time() - t1
log_recipe_upload(ref, duration, the_files, remote.name)
self._hook_manager.execute(
"post_upload_recipe",
conanfile_path=conanfile_path,
reference=ref,
remote=remote,
)
# The recipe wasn't in the registry or it has changed the revision field only
if not current_remote:
self._registry.refs.set(ref, remote.name)
return ref
|
https://github.com/conan-io/conan/issues/5009
|
[b] $ sh -c "conan upload \* -r 090a7942-cd0f-45af-9347-465cbbe94a6e --confirm --all --no-overwrite "
MicroblinkConanFile/3.0.2@microblink/stable: Not found in local cache, looking in remotes...
MicroblinkConanFile/3.0.2@microblink/stable: Trying with '090a7942-cd0f-45af-9347-465cbbe94a6e'...
Downloading conanmanifest.txt
Downloading conanfile.py
MicroblinkConanFile/3.0.2@microblink/stable: Downloaded recipe revision 0
Uploading to remote '090a7942-cd0f-45af-9347-465cbbe94a6e':
Uploading CoreUtils/2.2.0@microblink/master to remote '090a7942-cd0f-45af-9347-465cbbe94a6e'
Traceback (most recent call last):
File "/usr/lib/python3.7/site-packages/conans/client/command.py", line 1579, in run
method(args[0][1:])
File "/usr/lib/python3.7/site-packages/conans/client/command.py", line 1182, in upload
retry_wait=args.retry_wait, integrity_check=args.check)
File "/usr/lib/python3.7/site-packages/conans/client/conan_api.py", line 93, in wrapper
return f(*args, **kwargs)
File "/usr/lib/python3.7/site-packages/conans/client/conan_api.py", line 868, in upload
retry_wait, integrity_check, policy, remote_name, query=query)
File "/usr/lib/python3.7/site-packages/conans/client/cmd/uploader.py", line 88, in upload
integrity_check, policy, remote, upload_recorder)
File "/usr/lib/python3.7/site-packages/conans/client/cmd/uploader.py", line 190, in _upload_ref
self._upload_recipe(ref, conanfile, retry, retry_wait, policy, recipe_remote)
File "/usr/lib/python3.7/site-packages/conans/client/cmd/uploader.py", line 230, in _upload_recipe
remote, remote_manifest)
File "/usr/lib/python3.7/site-packages/conans/client/cmd/uploader.py", line 344, in _recipe_files_to_upload
if remote_manifest == local_manifest:
File "/usr/lib/python3.7/site-packages/conans/model/manifest.py", line 132, in __eq__
return self.file_sums == other.file_sums
AttributeError: 'NoneType' object has no attribute 'file_sums'
ERROR: 'NoneType' object has no attribute 'file_sums'
|
AttributeError
|
def _recipe_files_to_upload(
self, ref, policy, the_files, remote, remote_manifest, local_manifest
):
self._remote_manager.check_credentials(remote)
remote_snapshot = self._remote_manager.get_recipe_snapshot(ref, remote)
files_to_upload = {
filename.replace("\\", "/"): path for filename, path in the_files.items()
}
if not remote_snapshot:
return files_to_upload, set()
deleted = set(remote_snapshot).difference(the_files)
if policy != UPLOAD_POLICY_FORCE:
if remote_manifest is None:
# This is the weird scenario, we have a snapshot but don't have a manifest.
# Can be due to concurrency issues, so we can try retrieve it now
try:
remote_manifest, _ = self._remote_manager.get_recipe_manifest(
ref, remote
)
except NotFoundException:
# This is weird, the manifest still not there, better upload everything
self._user_io.out.warn(
"The remote recipe doesn't have the 'conanmanifest.txt' "
"file and will be uploaded: '{}'".format(ref)
)
return files_to_upload, deleted
if remote_manifest == local_manifest:
return None, None
if policy in (UPLOAD_POLICY_NO_OVERWRITE, UPLOAD_POLICY_NO_OVERWRITE_RECIPE):
raise ConanException(
"Local recipe is different from the remote recipe. Forbidden overwrite."
)
return files_to_upload, deleted
|
def _recipe_files_to_upload(self, ref, policy, the_files, remote, remote_manifest):
# Get the remote snapshot
self._remote_manager.check_credentials(remote)
remote_snapshot = self._remote_manager.get_recipe_snapshot(ref, remote)
if remote_snapshot and policy != UPLOAD_POLICY_FORCE:
local_manifest = FileTreeManifest.loads(load(the_files["conanmanifest.txt"]))
if remote_manifest == local_manifest:
return None, None
if policy in (UPLOAD_POLICY_NO_OVERWRITE, UPLOAD_POLICY_NO_OVERWRITE_RECIPE):
raise ConanException(
"Local recipe is different from the remote recipe. Forbidden overwrite."
)
files_to_upload = {
filename.replace("\\", "/"): path for filename, path in the_files.items()
}
deleted = set(remote_snapshot).difference(the_files)
return files_to_upload, deleted
|
https://github.com/conan-io/conan/issues/5009
|
[b] $ sh -c "conan upload \* -r 090a7942-cd0f-45af-9347-465cbbe94a6e --confirm --all --no-overwrite "
MicroblinkConanFile/3.0.2@microblink/stable: Not found in local cache, looking in remotes...
MicroblinkConanFile/3.0.2@microblink/stable: Trying with '090a7942-cd0f-45af-9347-465cbbe94a6e'...
Downloading conanmanifest.txt
Downloading conanfile.py
MicroblinkConanFile/3.0.2@microblink/stable: Downloaded recipe revision 0
Uploading to remote '090a7942-cd0f-45af-9347-465cbbe94a6e':
Uploading CoreUtils/2.2.0@microblink/master to remote '090a7942-cd0f-45af-9347-465cbbe94a6e'
Traceback (most recent call last):
File "/usr/lib/python3.7/site-packages/conans/client/command.py", line 1579, in run
method(args[0][1:])
File "/usr/lib/python3.7/site-packages/conans/client/command.py", line 1182, in upload
retry_wait=args.retry_wait, integrity_check=args.check)
File "/usr/lib/python3.7/site-packages/conans/client/conan_api.py", line 93, in wrapper
return f(*args, **kwargs)
File "/usr/lib/python3.7/site-packages/conans/client/conan_api.py", line 868, in upload
retry_wait, integrity_check, policy, remote_name, query=query)
File "/usr/lib/python3.7/site-packages/conans/client/cmd/uploader.py", line 88, in upload
integrity_check, policy, remote, upload_recorder)
File "/usr/lib/python3.7/site-packages/conans/client/cmd/uploader.py", line 190, in _upload_ref
self._upload_recipe(ref, conanfile, retry, retry_wait, policy, recipe_remote)
File "/usr/lib/python3.7/site-packages/conans/client/cmd/uploader.py", line 230, in _upload_recipe
remote, remote_manifest)
File "/usr/lib/python3.7/site-packages/conans/client/cmd/uploader.py", line 344, in _recipe_files_to_upload
if remote_manifest == local_manifest:
File "/usr/lib/python3.7/site-packages/conans/model/manifest.py", line 132, in __eq__
return self.file_sums == other.file_sums
AttributeError: 'NoneType' object has no attribute 'file_sums'
ERROR: 'NoneType' object has no attribute 'file_sums'
|
AttributeError
|
def _check_recipe_date(self, ref, remote, local_manifest):
try:
remote_recipe_manifest, ref = self._remote_manager.get_recipe_manifest(
ref, remote
)
except NotFoundException:
return # First time uploading this package
if (
remote_recipe_manifest != local_manifest
and remote_recipe_manifest.time > local_manifest.time
):
self._print_manifest_information(
remote_recipe_manifest, local_manifest, ref, remote
)
raise ConanException(
"Remote recipe is newer than local recipe: "
"\n Remote date: %s\n Local date: %s"
% (remote_recipe_manifest.time, local_manifest.time)
)
return remote_recipe_manifest
|
def _check_recipe_date(self, ref, remote):
try:
remote_recipe_manifest, ref = self._remote_manager.get_recipe_manifest(
ref, remote
)
except NotFoundException:
return # First time uploading this package
local_manifest = self._cache.package_layout(ref).recipe_manifest()
if (
remote_recipe_manifest != local_manifest
and remote_recipe_manifest.time > local_manifest.time
):
self._print_manifest_information(
remote_recipe_manifest, local_manifest, ref, remote
)
raise ConanException(
"Remote recipe is newer than local recipe: "
"\n Remote date: %s\n Local date: %s"
% (remote_recipe_manifest.time, local_manifest.time)
)
return remote_recipe_manifest
|
https://github.com/conan-io/conan/issues/5009
|
[b] $ sh -c "conan upload \* -r 090a7942-cd0f-45af-9347-465cbbe94a6e --confirm --all --no-overwrite "
MicroblinkConanFile/3.0.2@microblink/stable: Not found in local cache, looking in remotes...
MicroblinkConanFile/3.0.2@microblink/stable: Trying with '090a7942-cd0f-45af-9347-465cbbe94a6e'...
Downloading conanmanifest.txt
Downloading conanfile.py
MicroblinkConanFile/3.0.2@microblink/stable: Downloaded recipe revision 0
Uploading to remote '090a7942-cd0f-45af-9347-465cbbe94a6e':
Uploading CoreUtils/2.2.0@microblink/master to remote '090a7942-cd0f-45af-9347-465cbbe94a6e'
Traceback (most recent call last):
File "/usr/lib/python3.7/site-packages/conans/client/command.py", line 1579, in run
method(args[0][1:])
File "/usr/lib/python3.7/site-packages/conans/client/command.py", line 1182, in upload
retry_wait=args.retry_wait, integrity_check=args.check)
File "/usr/lib/python3.7/site-packages/conans/client/conan_api.py", line 93, in wrapper
return f(*args, **kwargs)
File "/usr/lib/python3.7/site-packages/conans/client/conan_api.py", line 868, in upload
retry_wait, integrity_check, policy, remote_name, query=query)
File "/usr/lib/python3.7/site-packages/conans/client/cmd/uploader.py", line 88, in upload
integrity_check, policy, remote, upload_recorder)
File "/usr/lib/python3.7/site-packages/conans/client/cmd/uploader.py", line 190, in _upload_ref
self._upload_recipe(ref, conanfile, retry, retry_wait, policy, recipe_remote)
File "/usr/lib/python3.7/site-packages/conans/client/cmd/uploader.py", line 230, in _upload_recipe
remote, remote_manifest)
File "/usr/lib/python3.7/site-packages/conans/client/cmd/uploader.py", line 344, in _recipe_files_to_upload
if remote_manifest == local_manifest:
File "/usr/lib/python3.7/site-packages/conans/model/manifest.py", line 132, in __eq__
return self.file_sums == other.file_sums
AttributeError: 'NoneType' object has no attribute 'file_sums'
ERROR: 'NoneType' object has no attribute 'file_sums'
|
AttributeError
|
def _make_migrations(self, old_version):
# ############### FILL THIS METHOD WITH THE REQUIRED ACTIONS ##############
# VERSION 0.1
if old_version is None:
return
if old_version < Version("0.25"):
from conans.paths import DEFAULT_PROFILE_NAME
default_profile_path = os.path.join(
self.cache.conan_folder, PROFILES_FOLDER, DEFAULT_PROFILE_NAME
)
if not os.path.exists(default_profile_path):
self.out.warn(
"Migration: Moving default settings from %s file to %s"
% (CONAN_CONF, DEFAULT_PROFILE_NAME)
)
conf_path = os.path.join(self.cache.conan_folder, CONAN_CONF)
migrate_to_default_profile(conf_path, default_profile_path)
self.out.warn("Migration: export_source cache new layout")
migrate_c_src_export_source(self.cache, self.out)
if old_version < Version("1.0"):
_migrate_lock_files(self.cache, self.out)
if old_version < Version("1.12.0"):
migrate_plugins_to_hooks(self.cache)
if old_version < Version("1.13.0"):
old_settings = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS]
arch_build: [x86, x86_64, ppc32, ppc64le, ppc64, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, Arduino]
arch_target: [x86, x86_64, ppc32, ppc64le, ppc64, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
Arduino:
board: ANY
arch: [x86, x86_64, ppc32, ppc64le, ppc64, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0"]
libcxx: [libstdc++, libc++]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
"""
self._update_settings_yml(old_settings)
# MIGRATE LOCAL CACHE TO GENERATE MISSING METADATA.json
_migrate_create_metadata(self.cache, self.out)
if old_version < Version("1.14.0"):
migrate_config_install(self.cache)
if old_version < Version("1.14.2"):
_migrate_full_metadata(self.cache, self.out)
|
def _make_migrations(self, old_version):
# ############### FILL THIS METHOD WITH THE REQUIRED ACTIONS ##############
# VERSION 0.1
if old_version is None:
return
if old_version < Version("1.14.0"):
migrate_config_install(self.cache)
if old_version < Version("1.13.0"):
old_settings = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS]
arch_build: [x86, x86_64, ppc32, ppc64le, ppc64, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, Arduino]
arch_target: [x86, x86_64, ppc32, ppc64le, ppc64, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
Arduino:
board: ANY
arch: [x86, x86_64, ppc32, ppc64le, ppc64, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0"]
libcxx: [libstdc++, libc++]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
"""
self._update_settings_yml(old_settings)
# MIGRATE LOCAL CACHE TO GENERATE MISSING METADATA.json
_migrate_create_metadata(self.cache, self.out)
if old_version < Version("1.12.0"):
migrate_plugins_to_hooks(self.cache)
if old_version < Version("1.0"):
_migrate_lock_files(self.cache, self.out)
if old_version < Version("0.25"):
from conans.paths import DEFAULT_PROFILE_NAME
default_profile_path = os.path.join(
self.cache.conan_folder, PROFILES_FOLDER, DEFAULT_PROFILE_NAME
)
if not os.path.exists(default_profile_path):
self.out.warn(
"Migration: Moving default settings from %s file to %s"
% (CONAN_CONF, DEFAULT_PROFILE_NAME)
)
conf_path = os.path.join(self.cache.conan_folder, CONAN_CONF)
migrate_to_default_profile(conf_path, default_profile_path)
self.out.warn("Migration: export_source cache new layout")
migrate_c_src_export_source(self.cache, self.out)
|
https://github.com/conan-io/conan/issues/4898
|
Traceback (most recent call last):
File "build\venv\lib\site-packages\conans\model\ref.py", line 111, in loads
_, name, version, user, channel, revision, _ = ConanFileReference.sep_pattern.split(text)
ValueError: not enough values to unpack (expected 7, got 1)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "build\venv\lib\site-packages\conans\client\command.py", line 377, in install
ref = ConanFileReference.loads(args.path_or_reference)
File "build\venv\lib\site-packages\conans\model\ref.py", line 114, in loads
"OpenCV/1.0.6@user/stable" % text)
conans.errors.ConanException: Wrong package recipe reference C:/Users/daniel/src/flow
Write something like OpenCV/1.0.6@user/stable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "build\venv\lib\site-packages\conans\client\command.py", line 1579, in run
method(args[0][1:])
File "build\venv\lib\site-packages\conans\client\command.py", line 390, in install
install_folder=args.install_folder)
File "build\venv\lib\site-packages\conans\client\conan_api.py", line 93, in wrapper
return f(*args, **kwargs)
File "build\venv\lib\site-packages\conans\client\conan_api.py", line 580, in install
no_imports=no_imports)
File "build\venv\lib\site-packages\conans\client\manager.py", line 80, in install
installer.install(deps_graph, keep_build)
File "build\venv\lib\site-packages\conans\client\installer.py", line 302, in install
self._build(nodes_by_level, keep_build, root_node, graph_info)
File "build\venv\lib\site-packages\conans\client\installer.py", line 324, in _build
self._handle_node_cache(node, keep_build, processed_package_refs)
File "build\venv\lib\site-packages\conans\client\installer.py", line 404, in _handle_node_cache
assert node.prev, "PREV for %s is None" % str(pref)
AssertionError: PREV for Boost/1.68.0-16@velo3d/thirdparty:df18e90df1fedfd9db3e0852ce38a27c76c5a54b is None
ERROR: PREV for Boost/1.68.0-16@velo3d/thirdparty:df18e90df1fedfd9db3e0852ce38a27c76c5a54b is None
|
ValueError
|
def _migrate_create_metadata(cache, out):
out.warn("Migration: Generating missing metadata files")
refs = _get_refs(cache)
for ref in refs:
try:
base_folder = os.path.normpath(os.path.join(cache.store, ref.dir_repr()))
# Force using a package cache layout for everything, we want to alter the cache,
# not the editables
layout = PackageCacheLayout(
base_folder=base_folder, ref=ref, short_paths=False, no_lock=True
)
folder = layout.export()
try:
manifest = FileTreeManifest.load(folder)
rrev = manifest.summary_hash
except Exception:
rrev = DEFAULT_REVISION_V1
metadata_path = os.path.join(layout.conan(), PACKAGE_METADATA)
if not os.path.exists(metadata_path):
out.info("Creating {} for {}".format(PACKAGE_METADATA, ref))
prefs = _get_prefs(layout)
metadata = PackageMetadata()
metadata.recipe.revision = rrev
for pref in prefs:
try:
pmanifest = FileTreeManifest.load(layout.package(pref))
prev = pmanifest.summary_hash
except Exception:
prev = DEFAULT_REVISION_V1
metadata.packages[pref.id].revision = prev
metadata.packages[
pref.id
].recipe_revision = metadata.recipe.revision
save(metadata_path, metadata.dumps())
except Exception as e:
raise ConanException(
"Something went wrong while generating the metadata.json files "
"in the cache, please try to fix the issue or wipe the cache: {}"
":{}".format(ref, e)
)
out.success("Migration: Generating missing metadata files finished OK!\n")
|
def _migrate_create_metadata(cache, out):
out.warn("Migration: Generating missing metadata files")
refs = _get_refs(cache)
for ref in refs:
try:
base_folder = os.path.normpath(os.path.join(cache.store, ref.dir_repr()))
# Force using a package cache layout for everything, we want to alter the cache,
# not the editables
layout = PackageCacheLayout(
base_folder=base_folder, ref=ref, short_paths=False, no_lock=True
)
folder = layout.export()
try:
manifest = FileTreeManifest.load(folder)
rrev = manifest.summary_hash
except:
rrev = DEFAULT_REVISION_V1
metadata_path = os.path.join(layout.conan(), PACKAGE_METADATA)
if not os.path.exists(metadata_path):
out.info("Creating {} for {}".format(PACKAGE_METADATA, ref))
prefs = _get_prefs(layout)
metadata = PackageMetadata()
metadata.recipe.revision = rrev
for pref in prefs:
try:
pmanifest = FileTreeManifest.load(layout.package(pref))
prev = pmanifest.summary_hash
except:
prev = DEFAULT_REVISION_V1
metadata.packages[pref.id].revision = prev
metadata.packages[
pref.id
].recipe_revision = metadata.recipe.revision
save(metadata_path, metadata.dumps())
except Exception as e:
raise ConanException(
"Something went wrong while generating the metadata.json files "
"in the cache, please try to fix the issue or wipe the cache: {}"
":{}".format(ref, e)
)
out.success("Migration: Generating missing metadata files finished OK!\n")
|
https://github.com/conan-io/conan/issues/4898
|
Traceback (most recent call last):
File "build\venv\lib\site-packages\conans\model\ref.py", line 111, in loads
_, name, version, user, channel, revision, _ = ConanFileReference.sep_pattern.split(text)
ValueError: not enough values to unpack (expected 7, got 1)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "build\venv\lib\site-packages\conans\client\command.py", line 377, in install
ref = ConanFileReference.loads(args.path_or_reference)
File "build\venv\lib\site-packages\conans\model\ref.py", line 114, in loads
"OpenCV/1.0.6@user/stable" % text)
conans.errors.ConanException: Wrong package recipe reference C:/Users/daniel/src/flow
Write something like OpenCV/1.0.6@user/stable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "build\venv\lib\site-packages\conans\client\command.py", line 1579, in run
method(args[0][1:])
File "build\venv\lib\site-packages\conans\client\command.py", line 390, in install
install_folder=args.install_folder)
File "build\venv\lib\site-packages\conans\client\conan_api.py", line 93, in wrapper
return f(*args, **kwargs)
File "build\venv\lib\site-packages\conans\client\conan_api.py", line 580, in install
no_imports=no_imports)
File "build\venv\lib\site-packages\conans\client\manager.py", line 80, in install
installer.install(deps_graph, keep_build)
File "build\venv\lib\site-packages\conans\client\installer.py", line 302, in install
self._build(nodes_by_level, keep_build, root_node, graph_info)
File "build\venv\lib\site-packages\conans\client\installer.py", line 324, in _build
self._handle_node_cache(node, keep_build, processed_package_refs)
File "build\venv\lib\site-packages\conans\client\installer.py", line 404, in _handle_node_cache
assert node.prev, "PREV for %s is None" % str(pref)
AssertionError: PREV for Boost/1.68.0-16@velo3d/thirdparty:df18e90df1fedfd9db3e0852ce38a27c76c5a54b is None
ERROR: PREV for Boost/1.68.0-16@velo3d/thirdparty:df18e90df1fedfd9db3e0852ce38a27c76c5a54b is None
|
ValueError
|
def _evaluate_node(self, node, build_mode, update, evaluated_nodes, remote_name):
assert node.binary is None, "Node.binary should be None"
assert node.package_id is not None, "Node.package_id shouldn't be None"
ref, conanfile = node.ref, node.conanfile
pref = PackageReference(ref, node.package_id)
# Check that this same reference hasn't already been checked
previous_nodes = evaluated_nodes.get(pref)
if previous_nodes:
previous_nodes.append(node)
previous_node = previous_nodes[0]
node.binary = previous_node.binary
node.binary_remote = previous_node.binary_remote
node.prev = previous_node.prev
return
evaluated_nodes[pref] = [node]
output = conanfile.output
if node.recipe == RECIPE_EDITABLE:
node.binary = BINARY_EDITABLE
# TODO: PREV?
return
if build_mode.forced(conanfile, ref):
output.warn("Forced build from source")
node.binary = BINARY_BUILD
node.prev = None
return
package_folder = self._cache.package(pref, short_paths=conanfile.short_paths)
# Check if dirty, to remove it
with self._cache.package_lock(pref):
assert node.recipe != RECIPE_EDITABLE, (
"Editable package shouldn't reach this code"
)
if is_dirty(package_folder):
output.warn("Package is corrupted, removing folder: %s" % package_folder)
rmdir(package_folder) # Do not remove if it is EDITABLE
if self._cache.config.revisions_enabled:
metadata = self._cache.package_layout(pref.ref).load_metadata()
rec_rev = metadata.packages[pref.id].recipe_revision
if rec_rev and rec_rev != node.ref.revision:
output.warn(
"The package {} doesn't belong "
"to the installed recipe revision, removing folder".format(pref)
)
rmdir(package_folder)
if remote_name:
remote = self._registry.remotes.get(remote_name)
else:
# If the remote_name is not given, follow the binary remote, or
# the recipe remote
# If it is defined it won't iterate (might change in conan2.0)
remote = self._registry.prefs.get(pref) or self._registry.refs.get(ref)
remotes = self._registry.remotes.list
if os.path.exists(package_folder):
if update:
if remote:
try:
tmp = self._remote_manager.get_package_manifest(pref, remote)
upstream_manifest, pref = tmp
except NotFoundException:
output.warn("Can't update, no package in remote")
except NoRemoteAvailable:
output.warn("Can't update, no remote defined")
else:
if self._check_update(
upstream_manifest, package_folder, output, node
):
node.binary = BINARY_UPDATE
node.prev = pref.revision # With revision
if build_mode.outdated:
info, pref = self._remote_manager.get_package_info(
pref, remote
)
package_hash = info.recipe_hash
elif remotes:
pass
else:
output.warn("Can't update, no remote defined")
if not node.binary:
node.binary = BINARY_CACHE
metadata = self._cache.package_layout(pref.ref).load_metadata()
node.prev = metadata.packages[pref.id].revision
assert node.prev, "PREV for %s is None: %s" % (str(pref), metadata.dumps())
package_hash = ConanInfo.load_from_package(package_folder).recipe_hash
else: # Binary does NOT exist locally
remote_info = None
if remote:
try:
remote_info, pref = self._remote_manager.get_package_info(pref, remote)
except NotFoundException:
pass
except Exception:
conanfile.output.error(
"Error downloading binary package: '{}'".format(pref)
)
raise
# If the "remote" came from the registry but the user didn't specified the -r, with
# revisions iterate all remotes
if not remote or (
not remote_info and self._cache.config.revisions_enabled and not remote_name
):
for r in remotes:
try:
remote_info, pref = self._remote_manager.get_package_info(pref, r)
except NotFoundException:
pass
else:
if remote_info:
remote = r
break
if remote_info:
node.binary = BINARY_DOWNLOAD
node.prev = pref.revision
package_hash = remote_info.recipe_hash
else:
if build_mode.allowed(conanfile):
node.binary = BINARY_BUILD
else:
node.binary = BINARY_MISSING
node.prev = None
if build_mode.outdated:
if node.binary in (BINARY_CACHE, BINARY_DOWNLOAD, BINARY_UPDATE):
local_recipe_hash = (
self._cache.package_layout(ref).recipe_manifest().summary_hash
)
if local_recipe_hash != package_hash:
output.info("Outdated package!")
node.binary = BINARY_BUILD
node.prev = None
else:
output.info("Package is up to date")
node.binary_remote = remote
|
def _evaluate_node(self, node, build_mode, update, evaluated_nodes, remote_name):
assert node.binary is None, "Node.binary should be None"
assert node.package_id is not None, "Node.package_id shouldn't be None"
ref, conanfile = node.ref, node.conanfile
pref = PackageReference(ref, node.package_id)
# Check that this same reference hasn't already been checked
previous_nodes = evaluated_nodes.get(pref)
if previous_nodes:
previous_nodes.append(node)
previous_node = previous_nodes[0]
node.binary = previous_node.binary
node.binary_remote = previous_node.binary_remote
node.prev = previous_node.prev
return
evaluated_nodes[pref] = [node]
output = conanfile.output
if node.recipe == RECIPE_EDITABLE:
node.binary = BINARY_EDITABLE
# TODO: PREV?
return
if build_mode.forced(conanfile, ref):
output.warn("Forced build from source")
node.binary = BINARY_BUILD
node.prev = None
return
package_folder = self._cache.package(pref, short_paths=conanfile.short_paths)
# Check if dirty, to remove it
with self._cache.package_lock(pref):
assert node.recipe != RECIPE_EDITABLE, (
"Editable package shouldn't reach this code"
)
if is_dirty(package_folder):
output.warn("Package is corrupted, removing folder: %s" % package_folder)
rmdir(package_folder) # Do not remove if it is EDITABLE
if self._cache.config.revisions_enabled:
metadata = self._cache.package_layout(pref.ref).load_metadata()
rec_rev = metadata.packages[pref.id].recipe_revision
if rec_rev and rec_rev != node.ref.revision:
output.warn(
"The package {} doesn't belong "
"to the installed recipe revision, removing folder".format(pref)
)
rmdir(package_folder)
if remote_name:
remote = self._registry.remotes.get(remote_name)
else:
# If the remote_name is not given, follow the binary remote, or
# the recipe remote
# If it is defined it won't iterate (might change in conan2.0)
remote = self._registry.prefs.get(pref) or self._registry.refs.get(ref)
remotes = self._registry.remotes.list
if os.path.exists(package_folder):
if update:
if remote:
try:
tmp = self._remote_manager.get_package_manifest(pref, remote)
upstream_manifest, pref = tmp
except NotFoundException:
output.warn("Can't update, no package in remote")
except NoRemoteAvailable:
output.warn("Can't update, no remote defined")
else:
if self._check_update(
upstream_manifest, package_folder, output, node
):
node.binary = BINARY_UPDATE
node.prev = pref.revision # With revision
if build_mode.outdated:
info, pref = self._remote_manager.get_package_info(
pref, remote
)
package_hash = info.recipe_hash
elif remotes:
pass
else:
output.warn("Can't update, no remote defined")
if not node.binary:
node.binary = BINARY_CACHE
metadata = self._cache.package_layout(pref.ref).load_metadata()
node.prev = metadata.packages[pref.id].revision
package_hash = ConanInfo.load_from_package(package_folder).recipe_hash
else: # Binary does NOT exist locally
remote_info = None
if remote:
try:
remote_info, pref = self._remote_manager.get_package_info(pref, remote)
except NotFoundException:
pass
except Exception:
conanfile.output.error(
"Error downloading binary package: '{}'".format(pref)
)
raise
# If the "remote" came from the registry but the user didn't specified the -r, with
# revisions iterate all remotes
if not remote or (
not remote_info and self._cache.config.revisions_enabled and not remote_name
):
for r in remotes:
try:
remote_info, pref = self._remote_manager.get_package_info(pref, r)
except NotFoundException:
pass
else:
if remote_info:
remote = r
break
if remote_info:
node.binary = BINARY_DOWNLOAD
node.prev = pref.revision
package_hash = remote_info.recipe_hash
else:
if build_mode.allowed(conanfile):
node.binary = BINARY_BUILD
else:
node.binary = BINARY_MISSING
node.prev = None
if build_mode.outdated:
if node.binary in (BINARY_CACHE, BINARY_DOWNLOAD, BINARY_UPDATE):
local_recipe_hash = (
self._cache.package_layout(ref).recipe_manifest().summary_hash
)
if local_recipe_hash != package_hash:
output.info("Outdated package!")
node.binary = BINARY_BUILD
node.prev = None
else:
output.info("Package is up to date")
node.binary_remote = remote
|
https://github.com/conan-io/conan/issues/4898
|
Traceback (most recent call last):
File "build\venv\lib\site-packages\conans\model\ref.py", line 111, in loads
_, name, version, user, channel, revision, _ = ConanFileReference.sep_pattern.split(text)
ValueError: not enough values to unpack (expected 7, got 1)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "build\venv\lib\site-packages\conans\client\command.py", line 377, in install
ref = ConanFileReference.loads(args.path_or_reference)
File "build\venv\lib\site-packages\conans\model\ref.py", line 114, in loads
"OpenCV/1.0.6@user/stable" % text)
conans.errors.ConanException: Wrong package recipe reference C:/Users/daniel/src/flow
Write something like OpenCV/1.0.6@user/stable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "build\venv\lib\site-packages\conans\client\command.py", line 1579, in run
method(args[0][1:])
File "build\venv\lib\site-packages\conans\client\command.py", line 390, in install
install_folder=args.install_folder)
File "build\venv\lib\site-packages\conans\client\conan_api.py", line 93, in wrapper
return f(*args, **kwargs)
File "build\venv\lib\site-packages\conans\client\conan_api.py", line 580, in install
no_imports=no_imports)
File "build\venv\lib\site-packages\conans\client\manager.py", line 80, in install
installer.install(deps_graph, keep_build)
File "build\venv\lib\site-packages\conans\client\installer.py", line 302, in install
self._build(nodes_by_level, keep_build, root_node, graph_info)
File "build\venv\lib\site-packages\conans\client\installer.py", line 324, in _build
self._handle_node_cache(node, keep_build, processed_package_refs)
File "build\venv\lib\site-packages\conans\client\installer.py", line 404, in _handle_node_cache
assert node.prev, "PREV for %s is None" % str(pref)
AssertionError: PREV for Boost/1.68.0-16@velo3d/thirdparty:df18e90df1fedfd9db3e0852ce38a27c76c5a54b is None
ERROR: PREV for Boost/1.68.0-16@velo3d/thirdparty:df18e90df1fedfd9db3e0852ce38a27c76c5a54b is None
|
ValueError
|
def __call__(self, require):
if not self.valid:
raise ConanException("Invalid use of python_requires(%s)" % require)
try:
python_req = self._look_for_require(require)
self._requires.append(python_req)
return python_req.module
except NotFoundException:
raise ConanException(
'Unable to find python_requires("{}") in remotes'.format(require)
)
|
def __call__(self, require):
if not self.valid:
raise ConanException("Invalid use of python_requires(%s)" % require)
python_req = self._look_for_require(require)
self._requires.append(python_req)
return python_req.module
|
https://github.com/conan-io/conan/issues/4814
|
conan inspect . -a options
zlib/1.2.8@conan/stable: Not found in local cache, looking in remotes...
zlib/1.2.8@conan/stable: Trying with 'kk'...
zlib/1.2.8@conan/stable: Trying with 'rr'...
zlib/1.2.8@conan/stable: Trying with 'bincrafters'...
zlib/1.2.8@conan/stable: Trying with 'arti'...
zlib/1.2.8@conan/stable: Trying with 'virtual'...
zlib/1.2.8@conan/stable: Trying with 'c3i_PR-1_9fe37beb14d6c6ecfcec3127ea197805dcdda695'...
zlib/1.2.8@conan/stable: Trying with 'c3i_PR-1_20437a99db62349f366d19e8a571b7f1c5c1eafa'...
zlib/1.2.8@conan/stable: Trying with 'c3i_PR-1_a9da64e0861fb0a7fc7f2448a47f7ce97fda8a73'...
ERROR: Unable to load conanfile in /tmp/kk/conanfile.py
ValueError: not enough values to unpack (expected 7, got 1)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/luism/workspace/conan_sources/conans/client/conan_api.py", line 285, in inspect
ref = ConanFileReference.loads(path)
File "/home/luism/workspace/conan_sources/conans/model/ref.py", line 114, in loads
"OpenCV/1.0.6@user/stable" % text)
conans.errors.ConanException: Wrong package recipe reference .
Write something like OpenCV/1.0.6@user/stable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/luism/workspace/conan_sources/conans/client/loader.py", line 45, in load_class
return self.cached_conanfiles[conanfile_path]
KeyError: '/tmp/kk/conanfile.py'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/luism/workspace/conan_sources/conans/client/graph/python_requires.py", line 33, in _look_for_require
python_require = self._cached_requires[require]
KeyError: 'zlib/1.2.8@conan/stable'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/luism/workspace/conan_sources/conans/client/loader.py", line 254, in _parse_conanfile
loaded = imp.load_source(module_id, conan_file_path)
File "/home/luism/.virtualenvs/conan_use/lib/python3.6/imp.py", line 172, in load_source
module = _load(spec)
File "<frozen importlib._bootstrap>", line 684, in _load
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/tmp/kk/conanfile.py", line 3, in <module>
kk = python_requires("zlib/1.2.8@conan/stable")
File "/home/luism/workspace/conan_sources/conans/client/graph/python_requires.py", line 58, in __call__
python_req = self._look_for_require(require)
File "/home/luism/workspace/conan_sources/conans/client/graph/python_requires.py", line 41, in _look_for_require
recorder=ActionRecorder())
File "/home/luism/workspace/conan_sources/conans/client/graph/proxy.py", line 33, in get_recipe
result = self._get_recipe(ref, check_updates, update, remote_name, recorder)
File "/home/luism/workspace/conan_sources/conans/client/graph/proxy.py", line 49, in _get_recipe
remote, new_ref = self._download_recipe(ref, output, remote_name, recorder)
File "/home/luism/workspace/conan_sources/conans/client/graph/proxy.py", line 155, in _download_recipe
raise NotFoundException(msg)
conans.errors.NotFoundException: Unable to find 'zlib/1.2.8@conan/stable' in remotes
|
ValueError
|
def load_class(self, conanfile_path):
conanfile = self.cached_conanfiles.get(conanfile_path)
if conanfile:
return conanfile
try:
self._python_requires.valid = True
_, conanfile = parse_conanfile(conanfile_path, self._python_requires)
self._python_requires.valid = False
self.cached_conanfiles[conanfile_path] = conanfile
return conanfile
except ConanException as e:
raise ConanException(
"Error loading conanfile at '{}': {}".format(conanfile_path, e)
)
|
def load_class(self, conanfile_path):
try:
return self.cached_conanfiles[conanfile_path]
except KeyError:
self._python_requires.valid = True
_, conanfile = parse_conanfile(conanfile_path, self._python_requires)
self._python_requires.valid = False
self.cached_conanfiles[conanfile_path] = conanfile
return conanfile
|
https://github.com/conan-io/conan/issues/4814
|
conan inspect . -a options
zlib/1.2.8@conan/stable: Not found in local cache, looking in remotes...
zlib/1.2.8@conan/stable: Trying with 'kk'...
zlib/1.2.8@conan/stable: Trying with 'rr'...
zlib/1.2.8@conan/stable: Trying with 'bincrafters'...
zlib/1.2.8@conan/stable: Trying with 'arti'...
zlib/1.2.8@conan/stable: Trying with 'virtual'...
zlib/1.2.8@conan/stable: Trying with 'c3i_PR-1_9fe37beb14d6c6ecfcec3127ea197805dcdda695'...
zlib/1.2.8@conan/stable: Trying with 'c3i_PR-1_20437a99db62349f366d19e8a571b7f1c5c1eafa'...
zlib/1.2.8@conan/stable: Trying with 'c3i_PR-1_a9da64e0861fb0a7fc7f2448a47f7ce97fda8a73'...
ERROR: Unable to load conanfile in /tmp/kk/conanfile.py
ValueError: not enough values to unpack (expected 7, got 1)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/luism/workspace/conan_sources/conans/client/conan_api.py", line 285, in inspect
ref = ConanFileReference.loads(path)
File "/home/luism/workspace/conan_sources/conans/model/ref.py", line 114, in loads
"OpenCV/1.0.6@user/stable" % text)
conans.errors.ConanException: Wrong package recipe reference .
Write something like OpenCV/1.0.6@user/stable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/luism/workspace/conan_sources/conans/client/loader.py", line 45, in load_class
return self.cached_conanfiles[conanfile_path]
KeyError: '/tmp/kk/conanfile.py'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/luism/workspace/conan_sources/conans/client/graph/python_requires.py", line 33, in _look_for_require
python_require = self._cached_requires[require]
KeyError: 'zlib/1.2.8@conan/stable'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/luism/workspace/conan_sources/conans/client/loader.py", line 254, in _parse_conanfile
loaded = imp.load_source(module_id, conan_file_path)
File "/home/luism/.virtualenvs/conan_use/lib/python3.6/imp.py", line 172, in load_source
module = _load(spec)
File "<frozen importlib._bootstrap>", line 684, in _load
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/tmp/kk/conanfile.py", line 3, in <module>
kk = python_requires("zlib/1.2.8@conan/stable")
File "/home/luism/workspace/conan_sources/conans/client/graph/python_requires.py", line 58, in __call__
python_req = self._look_for_require(require)
File "/home/luism/workspace/conan_sources/conans/client/graph/python_requires.py", line 41, in _look_for_require
recorder=ActionRecorder())
File "/home/luism/workspace/conan_sources/conans/client/graph/proxy.py", line 33, in get_recipe
result = self._get_recipe(ref, check_updates, update, remote_name, recorder)
File "/home/luism/workspace/conan_sources/conans/client/graph/proxy.py", line 49, in _get_recipe
remote, new_ref = self._download_recipe(ref, output, remote_name, recorder)
File "/home/luism/workspace/conan_sources/conans/client/graph/proxy.py", line 155, in _download_recipe
raise NotFoundException(msg)
conans.errors.NotFoundException: Unable to find 'zlib/1.2.8@conan/stable' in remotes
|
ValueError
|
def _parse_conanfile(conan_file_path):
"""From a given path, obtain the in memory python import module"""
if not os.path.exists(conan_file_path):
raise NotFoundException("%s not found!" % conan_file_path)
module_id = str(uuid.uuid1())
current_dir = os.path.dirname(conan_file_path)
sys.path.insert(0, current_dir)
try:
old_modules = list(sys.modules.keys())
with chdir(current_dir):
sys.dont_write_bytecode = True
loaded = imp.load_source(module_id, conan_file_path)
sys.dont_write_bytecode = False
# These lines are necessary, otherwise local conanfile imports with same name
# collide, but no error, and overwrite other packages imports!!
added_modules = set(sys.modules).difference(old_modules)
for added in added_modules:
module = sys.modules[added]
if module:
try:
try:
# Most modules will have __file__ != None
folder = os.path.dirname(module.__file__)
except (AttributeError, TypeError):
# But __file__ might not exist or equal None
# Like some builtins and Namespace packages py3
folder = module.__path__._path[0]
except AttributeError: # In case the module.__path__ doesn't exist
pass
else:
if folder.startswith(current_dir):
module = sys.modules.pop(added)
sys.modules["%s.%s" % (module_id, added)] = module
except ConanException:
raise
except Exception:
import traceback
trace = traceback.format_exc().split("\n")
raise ConanException(
"Unable to load conanfile in %s\n%s"
% (conan_file_path, "\n".join(trace[3:]))
)
finally:
sys.path.pop(0)
return loaded, module_id
|
def _parse_conanfile(conan_file_path):
"""From a given path, obtain the in memory python import module"""
if not os.path.exists(conan_file_path):
raise NotFoundException("%s not found!" % conan_file_path)
module_id = str(uuid.uuid1())
current_dir = os.path.dirname(conan_file_path)
sys.path.insert(0, current_dir)
try:
old_modules = list(sys.modules.keys())
with chdir(current_dir):
sys.dont_write_bytecode = True
loaded = imp.load_source(module_id, conan_file_path)
sys.dont_write_bytecode = False
# These lines are necessary, otherwise local conanfile imports with same name
# collide, but no error, and overwrite other packages imports!!
added_modules = set(sys.modules).difference(old_modules)
for added in added_modules:
module = sys.modules[added]
if module:
try:
try:
# Most modules will have __file__ != None
folder = os.path.dirname(module.__file__)
except (AttributeError, TypeError):
# But __file__ might not exist or equal None
# Like some builtins and Namespace packages py3
folder = module.__path__._path[0]
except AttributeError: # In case the module.__path__ doesn't exist
pass
else:
if folder.startswith(current_dir):
module = sys.modules.pop(added)
sys.modules["%s.%s" % (module_id, added)] = module
except Exception:
import traceback
trace = traceback.format_exc().split("\n")
raise ConanException(
"Unable to load conanfile in %s\n%s"
% (conan_file_path, "\n".join(trace[3:]))
)
finally:
sys.path.pop(0)
return loaded, module_id
|
https://github.com/conan-io/conan/issues/4814
|
conan inspect . -a options
zlib/1.2.8@conan/stable: Not found in local cache, looking in remotes...
zlib/1.2.8@conan/stable: Trying with 'kk'...
zlib/1.2.8@conan/stable: Trying with 'rr'...
zlib/1.2.8@conan/stable: Trying with 'bincrafters'...
zlib/1.2.8@conan/stable: Trying with 'arti'...
zlib/1.2.8@conan/stable: Trying with 'virtual'...
zlib/1.2.8@conan/stable: Trying with 'c3i_PR-1_9fe37beb14d6c6ecfcec3127ea197805dcdda695'...
zlib/1.2.8@conan/stable: Trying with 'c3i_PR-1_20437a99db62349f366d19e8a571b7f1c5c1eafa'...
zlib/1.2.8@conan/stable: Trying with 'c3i_PR-1_a9da64e0861fb0a7fc7f2448a47f7ce97fda8a73'...
ERROR: Unable to load conanfile in /tmp/kk/conanfile.py
ValueError: not enough values to unpack (expected 7, got 1)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/luism/workspace/conan_sources/conans/client/conan_api.py", line 285, in inspect
ref = ConanFileReference.loads(path)
File "/home/luism/workspace/conan_sources/conans/model/ref.py", line 114, in loads
"OpenCV/1.0.6@user/stable" % text)
conans.errors.ConanException: Wrong package recipe reference .
Write something like OpenCV/1.0.6@user/stable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/luism/workspace/conan_sources/conans/client/loader.py", line 45, in load_class
return self.cached_conanfiles[conanfile_path]
KeyError: '/tmp/kk/conanfile.py'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/luism/workspace/conan_sources/conans/client/graph/python_requires.py", line 33, in _look_for_require
python_require = self._cached_requires[require]
KeyError: 'zlib/1.2.8@conan/stable'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/luism/workspace/conan_sources/conans/client/loader.py", line 254, in _parse_conanfile
loaded = imp.load_source(module_id, conan_file_path)
File "/home/luism/.virtualenvs/conan_use/lib/python3.6/imp.py", line 172, in load_source
module = _load(spec)
File "<frozen importlib._bootstrap>", line 684, in _load
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/tmp/kk/conanfile.py", line 3, in <module>
kk = python_requires("zlib/1.2.8@conan/stable")
File "/home/luism/workspace/conan_sources/conans/client/graph/python_requires.py", line 58, in __call__
python_req = self._look_for_require(require)
File "/home/luism/workspace/conan_sources/conans/client/graph/python_requires.py", line 41, in _look_for_require
recorder=ActionRecorder())
File "/home/luism/workspace/conan_sources/conans/client/graph/proxy.py", line 33, in get_recipe
result = self._get_recipe(ref, check_updates, update, remote_name, recorder)
File "/home/luism/workspace/conan_sources/conans/client/graph/proxy.py", line 49, in _get_recipe
remote, new_ref = self._download_recipe(ref, output, remote_name, recorder)
File "/home/luism/workspace/conan_sources/conans/client/graph/proxy.py", line 155, in _download_recipe
raise NotFoundException(msg)
conans.errors.NotFoundException: Unable to find 'zlib/1.2.8@conan/stable' in remotes
|
ValueError
|
def _evaluate_node(self, node, build_mode, update, evaluated_nodes, remote_name):
assert node.binary is None, "Node.binary should be None"
assert node.package_id is not None, "Node.package_id shouldn't be None"
ref, conanfile = node.ref, node.conanfile
pref = PackageReference(ref, node.package_id)
# Check that this same reference hasn't already been checked
previous_nodes = evaluated_nodes.get(pref)
if previous_nodes:
previous_nodes.append(node)
previous_node = previous_nodes[0]
node.binary = previous_node.binary
node.binary_remote = previous_node.binary_remote
node.prev = previous_node.prev
return
evaluated_nodes[pref] = [node]
output = conanfile.output
if node.recipe == RECIPE_EDITABLE:
node.binary = BINARY_EDITABLE
# TODO: PREV?
return
if build_mode.forced(conanfile, ref):
output.warn("Forced build from source")
node.binary = BINARY_BUILD
node.prev = None
return
package_folder = self._cache.package(pref, short_paths=conanfile.short_paths)
# Check if dirty, to remove it
with self._cache.package_lock(pref):
assert node.recipe != RECIPE_EDITABLE, (
"Editable package shouldn't reach this code"
)
if is_dirty(package_folder):
output.warn("Package is corrupted, removing folder: %s" % package_folder)
rmdir(package_folder) # Do not remove if it is EDITABLE
if self._cache.config.revisions_enabled:
metadata = self._cache.package_layout(pref.ref).load_metadata()
rec_rev = metadata.packages[pref.id].recipe_revision
if rec_rev and rec_rev != node.ref.revision:
output.warn(
"The package {} doesn't belong "
"to the installed recipe revision, removing folder".format(pref)
)
rmdir(package_folder)
if remote_name:
remote = self._registry.remotes.get(remote_name)
else:
# If the remote_name is not given, follow the binary remote, or
# the recipe remote
# If it is defined it won't iterate (might change in conan2.0)
remote = self._registry.prefs.get(pref) or self._registry.refs.get(ref)
remotes = self._registry.remotes.list
if os.path.exists(package_folder):
if update:
if remote:
try:
tmp = self._remote_manager.get_package_manifest(pref, remote)
upstream_manifest, pref = tmp
except NotFoundException:
output.warn("Can't update, no package in remote")
except NoRemoteAvailable:
output.warn("Can't update, no remote defined")
else:
if self._check_update(
upstream_manifest, package_folder, output, node
):
node.binary = BINARY_UPDATE
node.prev = pref.revision # With revision
if build_mode.outdated:
info, pref = self._remote_manager.get_package_info(
pref, remote
)
package_hash = info.recipe_hash
elif remotes:
pass
else:
output.warn("Can't update, no remote defined")
if not node.binary:
node.binary = BINARY_CACHE
metadata = self._cache.package_layout(pref.ref).load_metadata()
node.prev = metadata.packages[pref.id].revision
package_hash = ConanInfo.load_from_package(package_folder).recipe_hash
else: # Binary does NOT exist locally
remote_info = None
if remote:
try:
remote_info, pref = self._remote_manager.get_package_info(pref, remote)
except NotFoundException:
pass
# If the "remote" came from the registry but the user didn't specified the -r, with
# revisions iterate all remotes
if not remote or (
not remote_info and self._cache.config.revisions_enabled and not remote_name
):
for r in remotes:
try:
remote_info, pref = self._remote_manager.get_package_info(pref, r)
except NotFoundException:
pass
else:
if remote_info:
remote = r
break
if remote_info:
node.binary = BINARY_DOWNLOAD
node.prev = pref.revision
package_hash = remote_info.recipe_hash
else:
if build_mode.allowed(conanfile):
node.binary = BINARY_BUILD
else:
node.binary = BINARY_MISSING
node.prev = None
if build_mode.outdated:
if node.binary in (BINARY_CACHE, BINARY_DOWNLOAD, BINARY_UPDATE):
local_recipe_hash = (
self._cache.package_layout(ref).recipe_manifest().summary_hash
)
if local_recipe_hash != package_hash:
output.info("Outdated package!")
node.binary = BINARY_BUILD
node.prev = None
else:
output.info("Package is up to date")
node.binary_remote = remote
|
def _evaluate_node(self, node, build_mode, update, evaluated_nodes, remote_name):
assert node.binary is None, "Node.binary should be None"
assert node.package_id is not None, "Node.package_id shouldn't be None"
ref, conanfile = node.ref, node.conanfile
pref = PackageReference(ref, node.package_id)
# Check that this same reference hasn't already been checked
previous_nodes = evaluated_nodes.get(pref)
if previous_nodes:
previous_nodes.append(node)
previous_node = previous_nodes[0]
node.binary = previous_node.binary
node.binary_remote = previous_node.binary_remote
node.prev = previous_node.prev
return
evaluated_nodes[pref] = [node]
output = conanfile.output
if node.recipe == RECIPE_EDITABLE:
node.binary = BINARY_EDITABLE
# TODO: PREV?
return
if build_mode.forced(conanfile, ref):
output.warn("Forced build from source")
node.binary = BINARY_BUILD
node.prev = None
return
package_folder = self._cache.package(pref, short_paths=conanfile.short_paths)
# Check if dirty, to remove it
with self._cache.package_lock(pref):
assert node.recipe != RECIPE_EDITABLE, (
"Editable package shouldn't reach this code"
)
if is_dirty(package_folder):
output.warn("Package is corrupted, removing folder: %s" % package_folder)
rmdir(package_folder) # Do not remove if it is EDITABLE
if self._cache.config.revisions_enabled:
metadata = self._cache.package_layout(pref.ref).load_metadata()
rec_rev = metadata.packages[pref.id].recipe_revision
if rec_rev and rec_rev != node.ref.revision:
output.warn(
"The package {} doesn't belong "
"to the installed recipe revision, removing folder".format(pref)
)
rmdir(package_folder)
if remote_name:
remote = self._registry.remotes.get(remote_name)
else:
# If the remote_name is not given, follow the binary remote, or
# the recipe remote
# If it is defined it won't iterate (might change in conan2.0)
remote = self._registry.prefs.get(pref) or self._registry.refs.get(ref)
remotes = self._registry.remotes.list
if os.path.exists(package_folder):
if update:
if remote:
try:
tmp = self._remote_manager.get_package_manifest(pref, remote)
upstream_manifest, pref = tmp
except NotFoundException:
output.warn("Can't update, no package in remote")
except NoRemoteAvailable:
output.warn("Can't update, no remote defined")
else:
if self._check_update(
upstream_manifest, package_folder, output, node
):
node.binary = BINARY_UPDATE
node.prev = pref.revision # With revision
if build_mode.outdated:
info, pref = self._remote_manager.get_package_info(
pref, remote
)
package_hash = info.recipe_hash()
elif remotes:
pass
else:
output.warn("Can't update, no remote defined")
if not node.binary:
node.binary = BINARY_CACHE
metadata = self._cache.package_layout(pref.ref).load_metadata()
node.prev = metadata.packages[pref.id].revision
package_hash = ConanInfo.load_from_package(package_folder).recipe_hash
else: # Binary does NOT exist locally
remote_info = None
if remote:
try:
remote_info, pref = self._remote_manager.get_package_info(pref, remote)
except NotFoundException:
pass
# If the "remote" came from the registry but the user didn't specified the -r, with
# revisions iterate all remotes
if not remote or (
not remote_info and self._cache.config.revisions_enabled and not remote_name
):
for r in remotes:
try:
remote_info, pref = self._remote_manager.get_package_info(pref, r)
except NotFoundException:
pass
else:
if remote_info:
remote = r
break
if remote_info:
node.binary = BINARY_DOWNLOAD
node.prev = pref.revision
package_hash = remote_info.recipe_hash
else:
if build_mode.allowed(conanfile):
node.binary = BINARY_BUILD
else:
node.binary = BINARY_MISSING
node.prev = None
if build_mode.outdated:
if node.binary in (BINARY_CACHE, BINARY_DOWNLOAD, BINARY_UPDATE):
local_recipe_hash = (
self._cache.package_layout(ref).recipe_manifest().summary_hash
)
if local_recipe_hash != package_hash:
output.info("Outdated package!")
node.binary = BINARY_BUILD
node.prev = None
else:
output.info("Package is up to date")
node.binary_remote = remote
|
https://github.com/conan-io/conan/issues/4789
|
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/conans/model/ref.py", line 111, in loads
_, name, version, user, channel, revision, _ = ConanFileReference.sep_pattern.split(text)
ValueError: not enough values to unpack (expected 7, got 1)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/conans/client/command.py", line 376, in install
ref = ConanFileReference.loads(args.path_or_reference)
File "/usr/local/lib/python3.6/dist-packages/conans/model/ref.py", line 114, in loads
"OpenCV/1.0.6@user/stable" % text)
conans.errors.ConanException: Wrong package recipe reference /home/karagon/Documents/Projects/framework-cpp/arcturus/conanfile.py
Write something like OpenCV/1.0.6@user/stable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/conans/client/command.py", line 1570, in run
method(args[0][1:])
File "/usr/local/lib/python3.6/dist-packages/conans/client/command.py", line 389, in install
install_folder=args.install_folder)
File "/usr/local/lib/python3.6/dist-packages/conans/client/conan_api.py", line 93, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/conans/client/conan_api.py", line 572, in install
no_imports=no_imports)
File "/usr/local/lib/python3.6/dist-packages/conans/client/manager.py", line 60, in install
self._recorder)
File "/usr/local/lib/python3.6/dist-packages/conans/client/graph/graph_manager.py", line 148, in load_graph
processed_profile=processed_profile)
File "/usr/local/lib/python3.6/dist-packages/conans/client/graph/graph_manager.py", line 238, in _load_graph
profile_build_requires, recorder, processed_profile)
File "/usr/local/lib/python3.6/dist-packages/conans/client/graph/graph_manager.py", line 178, in _recurse_build_requires
binaries_analyzer.evaluate_graph(graph, build_mode, update, remote_name)
File "/usr/local/lib/python3.6/dist-packages/conans/client/graph/graph_binaries.py", line 213, in evaluate_graph
self._evaluate_node(node, build_mode, update, evaluated, remote_name)
File "/usr/local/lib/python3.6/dist-packages/conans/client/graph/graph_binaries.py", line 106, in _evaluate_node
package_hash = info.recipe_hash()
TypeError: 'str' object is not callable
|
ValueError
|
def search(self, *args):
"""Searches package recipes and binaries in the local cache or in a remote.
If you provide a pattern, then it will search for existing package
recipes matching it. If a full reference is provided
(pkg/0.1@user/channel) then the existing binary packages for that
reference will be displayed. If no remote is specified, the search
will be done in the local cache. Search is case sensitive, exact case
has to be used. For case insensitive file systems, like Windows, case
sensitive search can be forced with '--case-sensitive'.
"""
parser = argparse.ArgumentParser(
description=self.search.__doc__, prog="conan search"
)
parser.add_argument(
"pattern_or_reference", nargs="?", help=_PATTERN_OR_REFERENCE_HELP
)
parser.add_argument(
"-o",
"--outdated",
default=False,
action="store_true",
help="Show only outdated from recipe packages. "
"This flag can only be used with a reference",
)
parser.add_argument(
"-q", "--query", default=None, action=OnceArgument, help=_QUERY_HELP
)
parser.add_argument(
"-r",
"--remote",
action=OnceArgument,
help="Remote to search in. '-r all' searches all remotes",
)
parser.add_argument(
"--case-sensitive",
default=False,
action="store_true",
help="Make a case-sensitive search. Use it to guarantee "
"case-sensitive "
"search in Windows or other case-insensitive file systems",
)
parser.add_argument(
"--raw",
default=False,
action="store_true",
help="Print just the list of recipes",
)
parser.add_argument(
"--table",
action=OnceArgument,
help="Outputs html file with a table of binaries. Only valid for a "
"reference search",
)
parser.add_argument(
"-j",
"--json",
default=None,
action=OnceArgument,
help="json file path where the search information will be written to",
)
parser.add_argument(
"-rev",
"--revisions",
default=False,
action="store_true",
help="Get a list of revisions for a reference or a package reference.",
)
args = parser.parse_args(*args)
if args.revisions and not self._cache.config.revisions_enabled:
raise ConanException(
"The client doesn't have the revisions feature enabled."
" Enable this feature setting to '1' the environment variable"
" 'CONAN_REVISIONS_ENABLED' or the config value"
" 'general.revisions_enabled' in your conan.conf file"
)
if args.table and args.json:
raise ConanException("'--table' argument cannot be used together with '--json'")
try:
ref = ConanFileReference.loads(args.pattern_or_reference)
if "*" in ref:
# Fixes a version with only a wildcard (valid reference) but not real reference
# e.g.: conan search lib/*@lasote/stable
ref = None
except (TypeError, ConanException):
ref = None
if args.query:
raise ConanException(
"-q parameter only allowed with a valid recipe reference, "
"not with a pattern"
)
cwd = os.getcwd()
info = None
try:
if args.revisions:
try:
pref = PackageReference.loads(args.pattern_or_reference)
except (TypeError, ConanException, AttributeError):
pass
else:
info = self._conan.get_package_revisions(
pref.full_repr(), remote_name=args.remote
)
if not info:
if not ref:
msg = (
"With --revision, specify a reference (e.g {ref}) or a package "
"reference with "
"recipe revision (e.g {ref}#3453453453:d50a0d523d98c15bb147b18f"
"a7d203887c38be8b)".format(ref=_REFERENCE_EXAMPLE)
)
raise ConanException(msg)
info = self._conan.get_recipe_revisions(
ref.full_repr(), remote_name=args.remote
)
self._outputer.print_revisions(ref, info, remote_name=args.remote)
return
if ref:
info = self._conan.search_packages(
ref.full_repr(),
query=args.query,
remote_name=args.remote,
outdated=args.outdated,
)
# search is done for one reference
self._outputer.print_search_packages(
info["results"], ref, args.query, args.table, outdated=args.outdated
)
else:
if args.table:
raise ConanException(
"'--table' argument can only be used with a reference"
)
elif args.outdated:
raise ConanException(
"'--outdated' argument can only be used with a reference"
)
info = self._conan.search_recipes(
args.pattern_or_reference,
remote_name=args.remote,
case_sensitive=args.case_sensitive,
)
# Deprecate 2.0: Dirty check if search is done for all remotes or for remote "all"
try:
remote_all = self._conan.get_remote_by_name("all")
except NoRemoteAvailable:
remote_all = None
all_remotes_search = remote_all is None and args.remote == "all"
self._outputer.print_search_references(
info["results"], args.pattern_or_reference, args.raw, all_remotes_search
)
except ConanException as exc:
info = exc.info
raise
finally:
if args.json and info:
self._outputer.json_output(info, args.json, cwd)
|
def search(self, *args):
"""Searches package recipes and binaries in the local cache or in a remote.
If you provide a pattern, then it will search for existing package
recipes matching it. If a full reference is provided
(pkg/0.1@user/channel) then the existing binary packages for that
reference will be displayed. If no remote is specified, the search
will be done in the local cache. Search is case sensitive, exact case
has to be used. For case insensitive file systems, like Windows, case
sensitive search can be forced with '--case-sensitive'.
"""
parser = argparse.ArgumentParser(
description=self.search.__doc__, prog="conan search"
)
parser.add_argument(
"pattern_or_reference", nargs="?", help=_PATTERN_OR_REFERENCE_HELP
)
parser.add_argument(
"-o",
"--outdated",
default=False,
action="store_true",
help="Show only outdated from recipe packages. "
"This flag can only be used with a reference",
)
parser.add_argument(
"-q", "--query", default=None, action=OnceArgument, help=_QUERY_HELP
)
parser.add_argument(
"-r",
"--remote",
action=OnceArgument,
help="Remote to search in. '-r all' searches all remotes",
)
parser.add_argument(
"--case-sensitive",
default=False,
action="store_true",
help="Make a case-sensitive search. Use it to guarantee "
"case-sensitive "
"search in Windows or other case-insensitive file systems",
)
parser.add_argument(
"--raw",
default=False,
action="store_true",
help="Print just the list of recipes",
)
parser.add_argument(
"--table",
action=OnceArgument,
help="Outputs html file with a table of binaries. Only valid for a "
"reference search",
)
parser.add_argument(
"-j",
"--json",
default=None,
action=OnceArgument,
help="json file path where the search information will be written to",
)
parser.add_argument(
"-rev",
"--revisions",
default=False,
action="store_true",
help="Get a list of revisions for a reference or a package reference.",
)
args = parser.parse_args(*args)
if args.revisions and not self._cache.config.revisions_enabled:
raise ConanException(
"The client doesn't have the revisions feature enabled."
" Enable this feature setting to '1' the environment variable"
" 'CONAN_REVISIONS_ENABLED' or the config value"
" 'general.revisions_enabled' in your conan.conf file"
)
if args.table and args.json:
raise ConanException("'--table' argument cannot be used together with '--json'")
try:
ref = ConanFileReference.loads(args.pattern_or_reference)
if "*" in ref:
# Fixes a version with only a wildcard (valid reference) but not real reference
# e.g.: conan search lib/*@lasote/stable
ref = None
except (TypeError, ConanException):
ref = None
if args.query:
raise ConanException(
"-q parameter only allowed with a valid recipe reference, "
"not with a pattern"
)
cwd = os.getcwd()
info = None
try:
if args.revisions:
try:
pref = PackageReference.loads(args.pattern_or_reference)
except (TypeError, ConanException):
pass
else:
info = self._conan.get_package_revisions(
pref.full_repr(), remote_name=args.remote
)
if not info:
if not ref:
msg = (
"With --revision, specify a reference (e.g {ref}) or a package "
"reference with "
"recipe revision (e.g {ref}#3453453453:d50a0d523d98c15bb147b18f"
"a7d203887c38be8b)".format(ref=_REFERENCE_EXAMPLE)
)
raise ConanException(msg)
info = self._conan.get_recipe_revisions(
ref.full_repr(), remote_name=args.remote
)
self._outputer.print_revisions(ref, info, remote_name=args.remote)
return
if ref:
info = self._conan.search_packages(
ref.full_repr(),
query=args.query,
remote_name=args.remote,
outdated=args.outdated,
)
# search is done for one reference
self._outputer.print_search_packages(
info["results"], ref, args.query, args.table, outdated=args.outdated
)
else:
if args.table:
raise ConanException(
"'--table' argument can only be used with a reference"
)
elif args.outdated:
raise ConanException(
"'--outdated' argument can only be used with a reference"
)
info = self._conan.search_recipes(
args.pattern_or_reference,
remote_name=args.remote,
case_sensitive=args.case_sensitive,
)
# Deprecate 2.0: Dirty check if search is done for all remotes or for remote "all"
try:
remote_all = self._conan.get_remote_by_name("all")
except NoRemoteAvailable:
remote_all = None
all_remotes_search = remote_all is None and args.remote == "all"
self._outputer.print_search_references(
info["results"], args.pattern_or_reference, args.raw, all_remotes_search
)
except ConanException as exc:
info = exc.info
raise
finally:
if args.json and info:
self._outputer.json_output(info, args.json, cwd)
|
https://github.com/conan-io/conan/issues/4622
|
conan search --revisions
Traceback (most recent call last):
File "/home/luism/workspace/conan_sources/conans/client/command.py", line 1570, in run
method(args[0][1:])
File "/home/luism/workspace/conan_sources/conans/client/command.py", line 1052, in search
pref = PackageReference.loads(args.pattern_or_reference)
File "/home/luism/workspace/conan_sources/conans/model/ref.py", line 154, in loads
text = text.strip()
AttributeError: 'NoneType' object has no attribute 'strip'
ERROR: 'NoneType' object has no attribute 'strip'
|
AttributeError
|
def get_win_os_version():
"""
Get's the OS major and minor versions. Returns a tuple of
(OS_MAJOR, OS_MINOR).
"""
import ctypes
class _OSVERSIONINFOEXW(ctypes.Structure):
_fields_ = [
("dwOSVersionInfoSize", ctypes.c_ulong),
("dwMajorVersion", ctypes.c_ulong),
("dwMinorVersion", ctypes.c_ulong),
("dwBuildNumber", ctypes.c_ulong),
("dwPlatformId", ctypes.c_ulong),
("szCSDVersion", ctypes.c_wchar * 128),
("wServicePackMajor", ctypes.c_ushort),
("wServicePackMinor", ctypes.c_ushort),
("wSuiteMask", ctypes.c_ushort),
("wProductType", ctypes.c_byte),
("wReserved", ctypes.c_byte),
]
os_version = _OSVERSIONINFOEXW()
os_version.dwOSVersionInfoSize = ctypes.sizeof(os_version)
if not hasattr(ctypes, "windll"):
return None
retcode = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(os_version))
if retcode != 0:
return None
return Version("%d.%d" % (os_version.dwMajorVersion, os_version.dwMinorVersion))
|
def get_win_os_version():
"""
Get's the OS major and minor versions. Returns a tuple of
(OS_MAJOR, OS_MINOR).
"""
import ctypes
class _OSVERSIONINFOEXW(ctypes.Structure):
_fields_ = [
("dwOSVersionInfoSize", ctypes.c_ulong),
("dwMajorVersion", ctypes.c_ulong),
("dwMinorVersion", ctypes.c_ulong),
("dwBuildNumber", ctypes.c_ulong),
("dwPlatformId", ctypes.c_ulong),
("szCSDVersion", ctypes.c_wchar * 128),
("wServicePackMajor", ctypes.c_ushort),
("wServicePackMinor", ctypes.c_ushort),
("wSuiteMask", ctypes.c_ushort),
("wProductType", ctypes.c_byte),
("wReserved", ctypes.c_byte),
]
os_version = _OSVERSIONINFOEXW()
os_version.dwOSVersionInfoSize = ctypes.sizeof(os_version)
retcode = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(os_version))
if retcode != 0:
return None
return Version("%d.%d" % (os_version.dwMajorVersion, os_version.dwMinorVersion))
|
https://github.com/conan-io/conan/issues/4540
|
$ conan profile new xxx --detect
ERROR: Error detecting os_info
Traceback (most recent call last):
File "/usr/lib/python3.6/site-packages/conans/client/command.py", line 1495, in run
method(args[0][1:])
File "/usr/lib/python3.6/site-packages/conans/client/command.py", line 1307, in profile
self._conan.create_profile(profile, args.detect)
File "/usr/lib/python3.6/site-packages/conans/client/conan_api.py", line 88, in wrapper
return f(*args, **kwargs)
File "/usr/lib/python3.6/site-packages/conans/client/conan_api.py", line 894, in create_profile
self._user_io.out, detect)
File "/usr/lib/python3.6/site-packages/conans/client/cmd/profile.py", line 41, in cmd_profile_create
settings = detect_defaults_settings(output)
File "/usr/lib/python3.6/site-packages/conans/client/conf/detect.py", line 200, in detect_defaults_settings
_detect_os_arch(result, output)
File "/usr/lib/python3.6/site-packages/conans/client/conf/detect.py", line 178, in _detect_os_arch
the_os = detected_os()
File "/usr/lib/python3.6/site-packages/conans/client/tools/oss.py", line 37, in detected_os
if OSInfo().is_macos:
File "/usr/lib/python3.6/site-packages/conans/client/tools/oss.py", line 115, in __init__
self.os_version = self.get_win_os_version()
File "/usr/lib/python3.6/site-packages/conans/client/tools/oss.py", line 187, in get_win_os_version
retcode = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(os_version))
AttributeError: module 'ctypes' has no attribute 'windll'
|
AttributeError
|
def vcvars_dict(
settings,
arch=None,
compiler_version=None,
force=False,
filter_known_paths=False,
vcvars_ver=None,
winsdk_version=None,
only_diff=True,
):
known_path_lists = ("include", "lib", "libpath", "path")
cmd = vcvars_command(
settings,
arch=arch,
compiler_version=compiler_version,
force=force,
vcvars_ver=vcvars_ver,
winsdk_version=winsdk_version,
)
cmd += " && echo __BEGINS__ && set"
ret = decode_text(subprocess.check_output(cmd, shell=True))
new_env = {}
start_reached = False
for line in ret.splitlines():
line = line.strip()
if not start_reached:
if "__BEGINS__" in line:
start_reached = True
continue
if line == "\n" or not line:
continue
try:
name_var, value = line.split("=", 1)
new_value = (
value.split(os.pathsep)
if name_var.lower() in known_path_lists
else value
)
# Return only new vars & changed ones, but only with the changed elements if the var is
# a list
if only_diff:
old_value = os.environ.get(name_var)
if name_var.lower() == "path":
old_values_lower = [v.lower() for v in old_value.split(os.pathsep)]
# Clean all repeated entries, not append if the element was already there
new_env[name_var] = [
v for v in new_value if v.lower() not in old_values_lower
]
elif old_value and value.endswith(os.pathsep + old_value):
# The new value ends with separator and the old value, is a list,
# get only the new elements
new_env[name_var] = value[: -(len(old_value) + 1)].split(os.pathsep)
elif value != old_value:
# Only if the vcvars changed something, we return the variable,
# otherwise is not vcvars related
new_env[name_var] = new_value
else:
new_env[name_var] = new_value
except ValueError:
pass
if filter_known_paths:
def relevant_path(path):
path = path.replace("\\", "/").lower()
keywords = (
"msbuild",
"visual",
"microsoft",
"/msvc/",
"/vc/",
"system32",
"windows",
)
return any(word in path for word in keywords)
path_key = next(
(name for name in new_env.keys() if "path" == name.lower()), None
)
if path_key:
path = [
entry for entry in new_env.get(path_key, "") if relevant_path(entry)
]
new_env[path_key] = ";".join(path)
return new_env
|
def vcvars_dict(
settings,
arch=None,
compiler_version=None,
force=False,
filter_known_paths=False,
vcvars_ver=None,
winsdk_version=None,
only_diff=True,
):
known_path_lists = ("include", "lib", "libpath", "path")
cmd = vcvars_command(
settings,
arch=arch,
compiler_version=compiler_version,
force=force,
vcvars_ver=vcvars_ver,
winsdk_version=winsdk_version,
)
cmd += " && echo __BEGINS__ && set"
ret = decode_text(subprocess.check_output(cmd, shell=True))
new_env = {}
start_reached = False
for line in ret.splitlines():
line = line.strip()
if not start_reached:
if "__BEGINS__" in line:
start_reached = True
continue
if line == "\n" or not line:
continue
try:
name_var, value = line.split("=", 1)
new_value = (
value.split(os.pathsep)
if name_var.lower() in known_path_lists
else value
)
# Return only new vars & changed ones, but only with the changed elements if the var is
# a list
if only_diff:
old_value = os.environ.get(name_var)
if name_var.lower() == "path":
old_values_lower = [v.lower() for v in old_value.split(os.pathsep)]
# Clean all repeated entries, not append if the element was already there
new_env[name_var] = [
v for v in new_value if v.lower() not in old_values_lower
]
elif old_value and value.endswith(os.pathsep + old_value):
# The new value ends with separator and the old value, is a list,
# get only the new elements
new_env[name_var] = value[: -(len(old_value) + 1)].split(os.pathsep)
elif value != old_value:
# Only if the vcvars changed something, we return the variable,
# otherwise is not vcvars related
new_env[name_var] = new_value
else:
new_env[name_var] = new_value
except ValueError:
pass
if filter_known_paths:
def relevant_path(path):
path = path.replace("\\", "/").lower()
keywords = (
"msbuild",
"visual",
"microsoft",
"/msvc/",
"/vc/",
"system32",
"windows",
)
return any(word in path for word in keywords)
path = new_env.get("PATH", "").split(";")
path = [entry for entry in path if relevant_path(entry)]
new_env["PATH"] = ";".join(path)
return new_env
|
https://github.com/conan-io/conan/issues/3940
|
boost/1.68.0@odant/tt: Calling build()
boost/1.68.0@odant/tt: -------------- Bootstrap ------------------------
Traceback (most recent call last):
File "D:\.conan\data\boost\1.68.0\odant\tt\export\conanfile.py", line 171, in get_build_environment
env = tools.vcvars_dict(self.settings, filter_known_paths=True, force=True)
File "D:\Python27\lib\site-packages\conans\client\tools\win.py", line 460, in vcvars_dict
path = new_env.get("PATH", "").split(";")
AttributeError: 'list' object has no attribute 'split'
boost/1.68.0@odant/tt:
boost/1.68.0@odant/tt: ERROR: Package '74d6e6f7d63123455b1033138a5a462ab0736c21' build failed
boost/1.68.0@odant/tt: WARN: Build folder D:/.conan/s\4w_jf4\1
ERROR: boost/1.68.0@odant/tt: Error in build() method, line 80
b2 = self.bootstrap(source_folder)
while calling 'bootstrap', line 101
env = self.get_build_environment()
while calling 'get_build_environment', line 171
env = tools.vcvars_dict(self.settings, filter_known_paths=True, force=True)
AttributeError: 'list' object has no attribute 'split'
|
AttributeError
|
def __call__(self, require):
try:
python_require = self._cached_requires[require]
except KeyError:
r = ConanFileReference.loads(require)
requirement = Requirement(r)
self._range_resolver.resolve(
requirement, "python_require", update=False, remote_name=None
)
r = requirement.conan_reference
result = self._proxy.get_recipe(
r, False, False, remote_name=None, recorder=ActionRecorder()
)
path, _, _, reference = result
module, _ = parse_conanfile(path)
python_require = PythonRequire(reference, module)
self._cached_requires[require] = python_require
self._requires.append(python_require)
return python_require.module
|
def __call__(self, require):
try:
python_require = self._cached_requires[require]
except KeyError:
r = ConanFileReference.loads(require)
requirement = Requirement(r)
self._range_resolver.resolve(
requirement, "python_require", update=False, remote_name=None
)
r = requirement.conan_reference
result = self._proxy.get_recipe(
r, False, False, remote_name=None, recorder=ActionRecorder()
)
path, _, _, reference = result
try:
dirname = os.path.dirname(path)
sys.path.append(dirname)
# replace avoid warnings in Py2 with dots
module = imp.load_source(str(r).replace(".", "*"), path)
finally:
sys.path.pop()
python_require = PythonRequire(reference, module)
self._cached_requires[require] = python_require
self._requires.append(python_require)
return python_require.module
|
https://github.com/conan-io/conan/issues/3874
|
ERROR: Unable to load conanfile in module2/V2.0/conanfile.py
KeyError: 'module1/1.0@user/channel'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "Python3/lib/site-packages/conans/client/loader.py", line 235, in _parse_file
loaded = imp.load_source(filename, conan_file_path)
File "Python3/lib/imp.py", line 172, in load_source
module = _load(spec)
File "<frozen importlib._bootstrap>", line 684, in _load
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "module2/V2.0/conanfile.py", line 3, in <module>
module1 = python_requires("module1/1.0@user/channel")
File "Python3/lib/site-packages/conans/client/graph/python_requires.py", line 41, in __call__
module = imp.load_source(str(r).replace(".", "*"), path)
File "Python3/lib/imp.py", line 172, in load_source
module = _load(spec)
File "<frozen importlib._bootstrap>", line 684, in _load
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File ".conan/data/module1/1.0/user/channel/export/conanfile.py", line 3, in <module>
class Module1(ConanFile):
File ".conan/data/module1/1.0/user/channel/export/conanfile.py", line 5, in Module1
version = base.helper.getVersion()
AttributeError: module 'helper' has no attribute 'getVersion'
|
KeyError
|
def load_class(self, conanfile_path):
loaded, filename = parse_conanfile(conanfile_path)
try:
conanfile = _parse_module(loaded, filename)
conanfile.python_requires = self._python_requires.requires
return conanfile
except Exception as e: # re-raise with file name
raise ConanException("%s: %s" % (conanfile_path, str(e)))
|
def load_class(self, conanfile_path):
loaded, filename = _parse_file(conanfile_path)
try:
conanfile = _parse_module(loaded, filename)
conanfile.python_requires = self._python_requires.requires
return conanfile
except Exception as e: # re-raise with file name
raise ConanException("%s: %s" % (conanfile_path, str(e)))
|
https://github.com/conan-io/conan/issues/3874
|
ERROR: Unable to load conanfile in module2/V2.0/conanfile.py
KeyError: 'module1/1.0@user/channel'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "Python3/lib/site-packages/conans/client/loader.py", line 235, in _parse_file
loaded = imp.load_source(filename, conan_file_path)
File "Python3/lib/imp.py", line 172, in load_source
module = _load(spec)
File "<frozen importlib._bootstrap>", line 684, in _load
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "module2/V2.0/conanfile.py", line 3, in <module>
module1 = python_requires("module1/1.0@user/channel")
File "Python3/lib/site-packages/conans/client/graph/python_requires.py", line 41, in __call__
module = imp.load_source(str(r).replace(".", "*"), path)
File "Python3/lib/imp.py", line 172, in load_source
module = _load(spec)
File "<frozen importlib._bootstrap>", line 684, in _load
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File ".conan/data/module1/1.0/user/channel/export/conanfile.py", line 3, in <module>
class Module1(ConanFile):
File ".conan/data/module1/1.0/user/channel/export/conanfile.py", line 5, in Module1
version = base.helper.getVersion()
AttributeError: module 'helper' has no attribute 'getVersion'
|
KeyError
|
def excluded_files(self):
self._check_svn_repo()
excluded_list = []
output = self.run("status --no-ignore")
for it in output.splitlines():
if it.startswith("I"): # Only ignored files
filepath = it[8:].strip()
excluded_list.append(os.path.normpath(filepath))
return excluded_list
|
def excluded_files(self):
self._check_svn_repo()
excluded_list = []
output = self.run("status --no-ignore")
for it in output.splitlines():
if it[0] == "I": # Only ignored files
filepath = it[8:].strip()
excluded_list.append(os.path.normpath(filepath))
return excluded_list
|
https://github.com/conan-io/conan/issues/3810
|
ERROR: Traceback (most recent call last):
File "D:\CWS\Tools\Python3\lib\site-packages\conans\errors.py", line 24, in conanfile_exception_formatter
yield
File "D:\CWS\Tools\Python3\lib\site-packages\conans\client\source.py", line 139, in config_source
_fetch_scm(scm_data, dest_dir, local_sources_path, output)
File "D:\CWS\Tools\Python3\lib\site-packages\conans\client\source.py", line 195, in _fetch_scm
excluded = SCM(scm_data, local_sources_path).excluded_files
File "D:\CWS\Tools\Python3\lib\site-packages\conans\model\scm.py", line 57, in excluded_files
return self.repo.excluded_files()
File "D:\CWS\Tools\Python3\lib\site-packages\conans\client\tools\scm.py", line 231, in excluded_files
if it[0] == 'I': # Only ignored files
IndexError: string index out of range
|
IndexError
|
def full_closure(self, node, private=False):
# Needed to propagate correctly the cpp_info even with privates
closure = OrderedDict()
current = node.neighbors()
while current:
new_current = []
for n in current:
closure[n] = n
for n in current:
neighbors = n.public_neighbors() if not private else n.neighbors()
for neigh in neighbors:
if neigh not in new_current and neigh not in closure:
new_current.append(neigh)
current = new_current
return closure
|
def full_closure(self, node):
# Needed to propagate correctly the cpp_info even with privates
closure = OrderedDict()
current = node.neighbors()
while current:
new_current = []
for n in current:
closure[n] = n
for n in current:
for neigh in n.public_neighbors():
if neigh not in new_current and neigh not in closure:
new_current.append(neigh)
current = new_current
return closure
|
https://github.com/conan-io/conan/issues/3166
|
$ conan install -u -r artifactory --build=outdated ./packaging -s python=cpython -s python.version=3.6
Version range '^2.1' required by 'fbthrift/2018.05.21.00@igs/testing' resolved to 'gflags/2.2.0@igs/testing'
Version range '^2.1' required by 'folly/2018.05.21.00@igs/testing' resolved to 'gflags/2.2.0@igs/testing'
Version range '^0.3.5' required by 'folly/2018.05.21.00@igs/testing' resolved to 'glog/0.3.5@igs/testing'
Version range '^1.1' required by 'folly/2018.05.21.00@igs/testing' resolved to 'double-conversion/1.1.5@igs/testing'
glog/0.3.5@igs/testing requirement gflags/[^2.2]@igs/testing overriden by folly/2018.05.21.00@igs/testing to gflags/2.2.0@igs/testing
Version range '^2.2' required by 'glog/0.3.5@igs/testing' valid for downstream requirement 'gflags/2.2.0@igs/testing'
Version range '^2.1' required by 'wangle/2018.05.21.00@igs/testing' resolved to 'gflags/2.2.0@igs/testing'
wangle/2018.05.21.00@igs/testing: WARN: Can't update, no package in remote
wangle/2018.05.21.00@igs/testing: Package is up to date
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/conans/model/ref.py", line 70, in loads
name, version, user, channel = tokens
ValueError: not enough values to unpack (expected 4, got 2)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/conans/client/command.py", line 314, in install
reference = ConanFileReference.loads(args.path_or_reference)
File "/usr/local/lib/python3.6/dist-packages/conans/model/ref.py", line 73, in loads
"OpenCV/1.0.6@user/stable" % text)
conans.errors.ConanException: Wrong package recipe reference ./packaging
Write something like OpenCV/1.0.6@user/stable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/conans/client/command.py", line 1219, in run
method(args[0][1:])
File "/usr/local/lib/python3.6/dist-packages/conans/client/command.py", line 325, in install
install_folder=args.install_folder)
File "/usr/local/lib/python3.6/dist-packages/conans/client/conan_api.py", line 79, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/conans/client/conan_api.py", line 482, in install
no_imports=no_imports)
File "/usr/local/lib/python3.6/dist-packages/conans/client/manager.py", line 258, in install
profile.build_requires)
File "/usr/local/lib/python3.6/dist-packages/conans/client/graph/graph_manager.py", line 108, in load_graph
binaries_analyzer.evaluate_graph(graph, build_mode, update, remote_name)
File "/usr/local/lib/python3.6/dist-packages/conans/client/graph/graph_binaries.py", line 138, in evaluate_graph
self._evaluate_node(node, build_mode, update, evaluated_references, remote_name)
File "/usr/local/lib/python3.6/dist-packages/conans/client/graph/graph_binaries.py", line 47, in _evaluate_node
assert node.binary is None
AssertionError
|
ValueError
|
def evaluate_graph(self, deps_graph, build_mode, update, remote_name):
evaluated_references = {}
for node in deps_graph.nodes:
if not node.conan_ref or node.binary: # Only value should be SKIP
continue
private_neighbours = node.private_neighbors()
if private_neighbours:
self._evaluate_node(
node, build_mode, update, evaluated_references, remote_name
)
if node.binary in (BINARY_CACHE, BINARY_DOWNLOAD, BINARY_UPDATE):
for neigh in private_neighbours:
neigh.binary = BINARY_SKIP
closure = deps_graph.full_closure(neigh, private=True)
for n in closure:
n.binary = BINARY_SKIP
for node in deps_graph.nodes:
if not node.conan_ref or node.binary:
continue
self._evaluate_node(node, build_mode, update, evaluated_references, remote_name)
|
def evaluate_graph(self, deps_graph, build_mode, update, remote_name):
evaluated_references = {}
for node in deps_graph.nodes:
if not node.conan_ref:
continue
private_neighbours = node.private_neighbors()
if private_neighbours:
self._evaluate_node(
node, build_mode, update, evaluated_references, remote_name
)
if node.binary != BINARY_BUILD:
for neigh in private_neighbours:
neigh.binary = BINARY_SKIP
closure = deps_graph.full_closure(neigh)
for n in closure:
n.binary = BINARY_SKIP
for node in deps_graph.nodes:
if not node.conan_ref or node.binary:
continue
self._evaluate_node(node, build_mode, update, evaluated_references, remote_name)
|
https://github.com/conan-io/conan/issues/3166
|
$ conan install -u -r artifactory --build=outdated ./packaging -s python=cpython -s python.version=3.6
Version range '^2.1' required by 'fbthrift/2018.05.21.00@igs/testing' resolved to 'gflags/2.2.0@igs/testing'
Version range '^2.1' required by 'folly/2018.05.21.00@igs/testing' resolved to 'gflags/2.2.0@igs/testing'
Version range '^0.3.5' required by 'folly/2018.05.21.00@igs/testing' resolved to 'glog/0.3.5@igs/testing'
Version range '^1.1' required by 'folly/2018.05.21.00@igs/testing' resolved to 'double-conversion/1.1.5@igs/testing'
glog/0.3.5@igs/testing requirement gflags/[^2.2]@igs/testing overriden by folly/2018.05.21.00@igs/testing to gflags/2.2.0@igs/testing
Version range '^2.2' required by 'glog/0.3.5@igs/testing' valid for downstream requirement 'gflags/2.2.0@igs/testing'
Version range '^2.1' required by 'wangle/2018.05.21.00@igs/testing' resolved to 'gflags/2.2.0@igs/testing'
wangle/2018.05.21.00@igs/testing: WARN: Can't update, no package in remote
wangle/2018.05.21.00@igs/testing: Package is up to date
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/conans/model/ref.py", line 70, in loads
name, version, user, channel = tokens
ValueError: not enough values to unpack (expected 4, got 2)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/conans/client/command.py", line 314, in install
reference = ConanFileReference.loads(args.path_or_reference)
File "/usr/local/lib/python3.6/dist-packages/conans/model/ref.py", line 73, in loads
"OpenCV/1.0.6@user/stable" % text)
conans.errors.ConanException: Wrong package recipe reference ./packaging
Write something like OpenCV/1.0.6@user/stable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/conans/client/command.py", line 1219, in run
method(args[0][1:])
File "/usr/local/lib/python3.6/dist-packages/conans/client/command.py", line 325, in install
install_folder=args.install_folder)
File "/usr/local/lib/python3.6/dist-packages/conans/client/conan_api.py", line 79, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/conans/client/conan_api.py", line 482, in install
no_imports=no_imports)
File "/usr/local/lib/python3.6/dist-packages/conans/client/manager.py", line 258, in install
profile.build_requires)
File "/usr/local/lib/python3.6/dist-packages/conans/client/graph/graph_manager.py", line 108, in load_graph
binaries_analyzer.evaluate_graph(graph, build_mode, update, remote_name)
File "/usr/local/lib/python3.6/dist-packages/conans/client/graph/graph_binaries.py", line 138, in evaluate_graph
self._evaluate_node(node, build_mode, update, evaluated_references, remote_name)
File "/usr/local/lib/python3.6/dist-packages/conans/client/graph/graph_binaries.py", line 47, in _evaluate_node
assert node.binary is None
AssertionError
|
ValueError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.