after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def __monitor(self):
to_monitor = self.workflow_scheduling_manager.active_workflow_schedulers
while self.monitor_running:
try:
if self.invocation_grabber:
self.invocation_grabber.grab_unhandled_items()
monitor_step_timer = self.app.execution_timer_factory.get_timer(
"internal.galaxy.workflows.scheduling_manager.monitor_step",
"Workflow scheduling manager monitor step complete.",
)
for workflow_scheduler_id, workflow_scheduler in to_monitor.items():
if not self.monitor_running:
return
self.__schedule(workflow_scheduler_id, workflow_scheduler)
log.trace(monitor_step_timer.to_str())
except Exception:
log.exception("An exception occured scheduling while scheduling workflows")
self._monitor_sleep(1)
|
def __monitor(self):
to_monitor = self.workflow_scheduling_manager.active_workflow_schedulers
while self.monitor_running:
if self.invocation_grabber:
self.invocation_grabber.grab_unhandled_items()
monitor_step_timer = self.app.execution_timer_factory.get_timer(
"internal.galaxy.workflows.scheduling_manager.monitor_step",
"Workflow scheduling manager monitor step complete.",
)
for workflow_scheduler_id, workflow_scheduler in to_monitor.items():
if not self.monitor_running:
return
self.__schedule(workflow_scheduler_id, workflow_scheduler)
log.trace(monitor_step_timer.to_str())
self._monitor_sleep(1)
|
https://github.com/galaxyproject/galaxy/issues/11433
|
galaxy.workflow.scheduling_manager DEBUG 2021-02-21 15:59:02,329 Workflow invocation [540513] scheduled
galaxy.workflow.scheduling_manager DEBUG 2021-02-21 15:59:03,453 Attempting to schedule workflow invocation [(53535
2,)]
galaxy.workflow.run ERROR 2021-02-21 15:59:08,637 Failed to execute scheduled workflow.
Traceback (most recent call last):
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1296, in _execute_context
result = context.get_result_proxy()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py", line 624, in get_result_proxy
return _result.BufferedRowResultProxy(self)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/result.py", line 775, in __init__
self._init_metadata()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/result.py", line 1426, in _init_metadata
self.__buffer_rows()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/result.py", line 1448, in
__buffer_rows
self.__rowbuffer = collections.deque(self.cursor.fetchmany(size))
psycopg2.OperationalError: server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/cvmfs/main.galaxyproject.org/galaxy/lib/galaxy/workflow/run.py", line 82, in __invoke
outputs = invoker.invoke()
File "/cvmfs/main.galaxyproject.org/galaxy/lib/galaxy/workflow/run.py", line 174, in invoke
remaining_steps = self.progress.remaining_steps()
File "/cvmfs/main.galaxyproject.org/galaxy/lib/galaxy/workflow/run.py", line 296, in remaining_steps
step_states = self.workflow_invocation.step_states_by_step_id()
File "/cvmfs/main.galaxyproject.org/galaxy/lib/galaxy/model/__init__.py", line 5379, in step_states_by_step_id
for step_state in self.step_states:
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/attributes.py", line 287, in
__get__
return self.impl.get(instance_state(instance), dict_)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/attributes.py", line 723, in
get
value = self.callable_(state, passive)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/strategies.py", line 760, in
_load_for_state
session, state, primary_key_identity, passive
File "<string>", line 1, in <lambda>
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/strategies.py", line 902, in
_emit_lazyload
.with_post_criteria(set_default_params)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/ext/baked.py", line 544, in all
return list(self)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/ext/baked.py", line 444, in __ite
r__
return q._execute_and_instances(context)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 3560, in _exe
cute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1011, in execute
return meth(self, multiparams, params)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1130, in _execute_clauseelement
distilled_params,
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
e, statement, parameters, cursor, context
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 182, in raise_
raise exception
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1296, in _execute_context
result = context.get_result_proxy()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py", line 624, in get_result_proxy
return _result.BufferedRowResultProxy(self)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/result.py", line 775, in __init__
self._init_metadata()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/result.py", line 1426, in _init_metadata
self.__buffer_rows()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/result.py", line 1448, in __buffer_rows
self.__rowbuffer = collections.deque(self.cursor.fetchmany(size))
sqlalchemy.exc.OperationalError: (psycopg2.OperationalError) server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
[SQL: SELECT workflow_request_step_states.id AS workflow_request_step_states_id, workflow_request_step_states.workflow_invocation_id AS workflow_request_step_states_workflow_invocation_id, workflow_request_step_states.workflow_step_id AS workflow_request_step_states_workflow_step_id, workflow_request_step_states.value AS workflow_request_step_states_value
FROM workflow_request_step_states
WHERE %(param_1)s = workflow_request_step_states.workflow_invocation_id]
[parameters: {'param_1': 535352}]
(Background on this error at: http://sqlalche.me/e/13/e3q8)
raven.base.Client DEBUG 2021-02-21 15:59:09,458 Sending message of length 8730 to https://sentry.galaxyproject.org/api/2/store/
galaxy.workflow.scheduling_manager ERROR 2021-02-21 15:59:11,246 Exception raised while attempting to schedule work
flow request.
Traceback (most recent call last):
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 2336, in _wrap_pool_connect
return fn()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 364, in connect
return _ConnectionFairy._checkout(self)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 778, in _checkout
fairy = _ConnectionRecord.checkout(pool)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 500, in checkout
rec._checkin_failed(err)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 70, in __exit__
with_traceback=exc_tb,
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 182, in raise_
raise exception
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 497, in checkout
dbapi_connection = rec.get_connection()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 638, in get_c
onnection
self.__connect()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 661, in __connect
pool.logger.debug("Error on connect(): %s", e)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 70, in __exit__
with_traceback=exc_tb,
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 182, in raise_
raise exception
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 656, in __connect
connection = pool._invoke_creator(self)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/strategies.py", line 114, in connect
return dialect.connect(*cargs, **cparams)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 493, in connect
return self.dbapi.connect(*cargs, **cparams)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/psycopg2/__init__.py", line 126, in connect
conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
psycopg2.OperationalError: FATAL: the database system is in recovery mode
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/cvmfs/main.galaxyproject.org/galaxy/lib/galaxy/workflow/scheduling_manager.py", line 324, in __attempt_schedule
workflow_scheduler.schedule(workflow_invocation)
File "/cvmfs/main.galaxyproject.org/galaxy/lib/galaxy/workflow/schedulers/core.py", line 41, in schedule
workflow_invocation=workflow_invocation,
File "/cvmfs/main.galaxyproject.org/galaxy/lib/galaxy/workflow/run.py", line 27, in schedule
return __invoke(trans, workflow, workflow_run_config, workflow_invocation)
File "/cvmfs/main.galaxyproject.org/galaxy/lib/galaxy/workflow/run.py", line 103, in __invoke
trans.sa_session.flush()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/scoping.py", line 163, in do
return getattr(self.registry(), name)(*args, **kwargs)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2536, in flush
self._flush(objects)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2678, in _flush
transaction.rollback(_capture_exception=True)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 70, in __exit__
with_traceback=exc_tb,
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 182, in raise_
raise exception
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 2638, in _flush
flush_context.execute()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
rec.execute(self)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 589, in execute
uow,
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 213, in save_obj
) in _organize_states_for_save(base_mapper, states, uowtransaction):
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 374, in _organize_states_for_save
base_mapper, uowtransaction, states
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 1602, in _connections_for_states
connection = uowtransaction.transaction.connection(base_mapper)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 314, in connection
return self._connection_for_bind(bind, execution_options)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 433, in _connection_for_bind
conn = bind._contextual_connect()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 2302, in _contextual_connect
self._wrap_pool_connect(self.pool.connect, None),
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 2340, in _wrap_pool_connect
e, dialect, self
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1584, in _handle_dbapi_exception_noconnection
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 182, in raise_
raise exception
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 2336, in _wrap_pool_connect
return fn()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 364, in connect
return _ConnectionFairy._checkout(self)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 778, in _checkout
fairy = _ConnectionRecord.checkout(pool)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 500, in checkout
rec._checkin_failed(err)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 70, in __exit__
with_traceback=exc_tb,
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 182, in raise_
raise exception
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 497, in checkout
dbapi_connection = rec.get_connection()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 638, in get_connection
self.__connect()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 661, in __connect
pool.logger.debug("Error on connect(): %s", e)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 70, in __exit__
with_traceback=exc_tb,
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 182, in raise_
raise exception
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 656, in __connect
connection = pool._invoke_creator(self)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/strategies.py", line 114, in connect
return dialect.connect(*cargs, **cparams)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 493, in connect
return self.dbapi.connect(*cargs, **cparams)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/psycopg2/__init__.py", line 126, in connect
conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
sqlalchemy.exc.OperationalError: (psycopg2.OperationalError) FATAL: the database system is in recovery mode
(Background on this error at: http://sqlalche.me/e/13/e3q8)
raven.base.Client DEBUG 2021-02-21 15:59:11,789 Sending message of length 8362 to https://sentry.galaxyproject.org/api/2/store/
galaxy.workflow.scheduling_manager DEBUG 2021-02-21 15:59:11,789 Attempting to schedule workflow invocation [(535353,)]
Exception in thread WorkflowRequestMonitor.monitor_thread:
Traceback (most recent call last):
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 2336, in _wrap_pool_connect
return fn()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 364, in conne
ct
return _ConnectionFairy._checkout(self)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 778, in _checkout
fairy = _ConnectionRecord.checkout(pool)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 500, in checkout
rec._checkin_failed(err)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 70, in __exit__
with_traceback=exc_tb,
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 182, in raise_
raise exception
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 497, in checkout
dbapi_connection = rec.get_connection()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 610, in get_connection
self.__connect()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 661, in __connect
pool.logger.debug("Error on connect(): %s", e)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 70, in
__exit__
with_traceback=exc_tb,
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 182, in raise_
raise exception
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 656, in __connect
connection = pool._invoke_creator(self)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/strategies.py", line 114, in connect
return dialect.connect(*cargs, **cparams)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 493, in connect
return self.dbapi.connect(*cargs, **cparams)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/psycopg2/__init__.py", line 126, in connect
conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
psycopg2.OperationalError: FATAL: the database system is in recovery mode
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/cvmfs/main.galaxyproject.org/deps/_conda/envs/_galaxy_/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/cvmfs/main.galaxyproject.org/deps/_conda/envs/_galaxy_/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/cvmfs/main.galaxyproject.org/galaxy/lib/galaxy/workflow/scheduling_manager.py", line 297, in __monitor
self.__schedule(workflow_scheduler_id, workflow_scheduler)
File "/cvmfs/main.galaxyproject.org/galaxy/lib/galaxy/workflow/scheduling_manager.py", line 305, in __schedule
self.__attempt_schedule(invocation_id, workflow_scheduler)
File "/cvmfs/main.galaxyproject.org/galaxy/lib/galaxy/workflow/scheduling_manager.py", line 311, in __attempt_schedule
workflow_invocation = sa_session.query(model.WorkflowInvocation).get(invocation_id)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 1018, in get
return self._get_impl(ident, loading.load_on_pk_identity)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 1135, in _get_impl
return db_load_fn(self, primary_key_identity)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/loading.py", line 286, in load_on_pk_identity
return q.one()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 3490, in one
ret = self.one_or_none()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 3459, in one_or_none
ret = list(self)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 3535, in __it
er__
return self._execute_and_instances(context)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 3557, in _execute_and_instances
querycontext, self._connection_from_session, close_with_result=True
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 3572, in _get_bind_args
mapper=self._bind_mapper(), clause=querycontext.statement, **kw
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 3550, in _connection_from_session
conn = self.session.connection(**kw)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 1141, in connection
execution_options=execution_options,
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/orm/session.py", line 1150, in _connection_for_bind
conn = engine._contextual_connect(**kw)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 2302, in _contextual_connect
self._wrap_pool_connect(self.pool.connect, None),
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 2340, in _wrap_pool_connect
e, dialect, self
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1584, in _h
andle_dbapi_exception_noconnection
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 182, in raise_
raise exception
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 2336, in _wrap_pool_connect
return fn()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 364, in connect
return _ConnectionFairy._checkout(self)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 778, in _checkout
fairy = _ConnectionRecord.checkout(pool)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 500, in checkout
rec._checkin_failed(err)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 70, in __exit__
with_traceback=exc_tb,
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 182, in raise_
raise exception
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 497, in check
out
dbapi_connection = rec.get_connection()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 610, in get_connection
self.__connect()
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 661, in __connect
pool.logger.debug("Error on connect(): %s", e)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 70, in __exit__
with_traceback=exc_tb,
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 182, in raise_
raise exception
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/pool/base.py", line 656, in __connect
connection = pool._invoke_creator(self)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/strategies.py", line 114, in connect
return dialect.connect(*cargs, **cparams)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/sqlalchemy/engine/default.py", line 493, in connect
return self.dbapi.connect(*cargs, **cparams)
File "/cvmfs/main.galaxyproject.org/venv/lib/python3.6/site-packages/psycopg2/__init__.py", line 126, in connect
conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
sqlalchemy.exc.OperationalError: (psycopg2.OperationalError) FATAL: the database system is in recovery mode
|
psycopg2.OperationalError
|
def display(
self,
trans,
history_content_id,
history_id,
preview=False,
filename=None,
to_ext=None,
raw=False,
**kwd,
):
"""
GET /api/histories/{encoded_history_id}/contents/{encoded_content_id}/display
Displays history content (dataset).
The query parameter 'raw' should be considered experimental and may be dropped at
some point in the future without warning. Generally, data should be processed by its
datatype prior to display (the defult if raw is unspecified or explicitly false.
"""
decoded_content_id = self.decode_id(history_content_id)
raw = util.string_as_bool_or_none(raw)
rval = ""
try:
hda = self.hda_manager.get_accessible(decoded_content_id, trans.user)
if raw:
if filename and filename != "index":
object_store = trans.app.object_store
dir_name = hda.dataset.extra_files_path_name
file_path = object_store.get_filename(
hda.dataset, extra_dir=dir_name, alt_name=filename
)
else:
file_path = hda.file_name
rval = open(file_path, "rb")
else:
display_kwd = kwd.copy()
if "key" in display_kwd:
del display_kwd["key"]
rval = hda.datatype.display_data(
trans, hda, preview, filename, to_ext, **display_kwd
)
except galaxy_exceptions.MessageException:
raise
except Exception as e:
log.exception(
"Server error getting display data for dataset (%s) from history (%s)",
history_content_id,
history_id,
)
raise galaxy_exceptions.InternalServerError(
f"Could not get display data for dataset: {util.unicodify(e)}"
)
return rval
|
def display(
self,
trans,
history_content_id,
history_id,
preview=False,
filename=None,
to_ext=None,
raw=False,
**kwd,
):
"""
GET /api/histories/{encoded_history_id}/contents/{encoded_content_id}/display
Displays history content (dataset).
The query parameter 'raw' should be considered experimental and may be dropped at
some point in the future without warning. Generally, data should be processed by its
datatype prior to display (the defult if raw is unspecified or explicitly false.
"""
decoded_content_id = self.decode_id(history_content_id)
raw = util.string_as_bool_or_none(raw)
rval = ""
try:
hda = self.hda_manager.get_accessible(decoded_content_id, trans.user)
if raw:
if filename and filename != "index":
object_store = trans.app.object_store
dir_name = hda.dataset.extra_files_path_name
file_path = object_store.get_filename(
hda.dataset, extra_dir=dir_name, alt_name=filename
)
else:
file_path = hda.file_name
rval = open(file_path, "rb")
else:
display_kwd = kwd.copy()
if "key" in display_kwd:
del display_kwd["key"]
rval = hda.datatype.display_data(
trans, hda, preview, filename, to_ext, **display_kwd
)
except Exception as e:
log.exception(
"Error getting display data for dataset (%s) from history (%s)",
history_content_id,
history_id,
)
trans.response.status = 500
rval = "Could not get display data for dataset: %s" % util.unicodify(e)
return rval
|
https://github.com/galaxyproject/galaxy/issues/11462
|
galaxy.webapps.galaxy.api.datasets ERROR 2021-02-24 13:04:42,977 [p:41176,w:3,m:0] [uWSGIWorker3Core0] Error getting display data for dataset (e6693f4d1a4d9f91) from history (e9859c1e537d1ced)
Traceback (most recent call last):
File "lib/galaxy/webapps/galaxy/api/datasets.py", line 441, in display
hda = self.hda_manager.get_accessible(decoded_content_id, trans.user)
File "lib/galaxy/managers/secured.py", line 33, in get_accessible
return self.error_unless_accessible(item, user, **kwargs)
File "lib/galaxy/managers/secured.py", line 43, in error_unless_accessible
raise exceptions.ItemAccessibilityException("%s is not accessible by user" % (self.model_class.__name__))
galaxy.exceptions.ItemAccessibilityException: HistoryDatasetAssociation is not accessible by user
10.10.58.115 - - [24/Feb/2021:13:04:42 +0200] "GET /api/histories/e9859c1e537d1ced/contents/e6693f4d1a4d9f91/display HTTP/1.1" 500 - "-" "python-requests/2.25.1"
|
galaxy.exceptions.ItemAccessibilityException
|
def display(api_key, url, return_formatted=True):
"""
Sends an API GET request and acts as a generic formatter for the JSON response.
"""
try:
r = get(api_key, url)
except HTTPError as e:
print(e)
print(e.read(1024)) # Only return the first 1K of errors.
sys.exit(1)
if not return_formatted:
return r
elif type(r) == list:
# Response is a collection as defined in the REST style.
print("Collection Members")
print("------------------")
for n, i in enumerate(r):
if isinstance(i, str):
print(" %s" % i)
else:
# All collection members should have a name in the response.
# url is optional
if "url" in i:
print("#%d: %s" % (n + 1, i.pop("url")))
if "name" in i:
print(" name: %s" % i.pop("name"))
for k, v in i.items():
print(" %s: %s" % (k, v))
print("")
print("%d element(s) in collection" % len(r))
elif type(r) == dict:
# Response is an element as defined in the REST style.
print("Member Information")
print("------------------")
for k, v in r.items():
print(f"{k}: {v}")
elif type(r) == str:
print(r)
else:
print("response is unknown type: %s" % type(r))
|
def display(api_key, url, return_formatted=True):
"""
Sends an API GET request and acts as a generic formatter for the JSON response.
"""
try:
r = get(api_key, url)
except HTTPError as e:
print(e)
print(e.read(1024)) # Only return the first 1K of errors.
sys.exit(1)
if not return_formatted:
return r
elif type(r) == list:
# Response is a collection as defined in the REST style.
print("Collection Members")
print("------------------")
for n, i in enumerate(r):
# All collection members should have a name in the response.
# url is optional
if "url" in i:
print("#%d: %s" % (n + 1, i.pop("url")))
if "name" in i:
print(" name: %s" % i.pop("name"))
for k, v in i.items():
print(f" {k}: {v}")
print("")
print("%d element(s) in collection" % len(r))
elif type(r) == dict:
# Response is an element as defined in the REST style.
print("Member Information")
print("------------------")
for k, v in r.items():
print(f"{k}: {v}")
elif type(r) == str:
print(r)
else:
print("response is unknown type: %s" % type(r))
|
https://github.com/galaxyproject/galaxy/issues/11193
|
(.venv) rivendell$ ./scripts/api/display.py [key] http://127.0.0.1:8080/api/datatypes
Collection Members
------------------
Traceback (most recent call last):
File "./scripts/api/display.py", line 10, in <module>
display(*sys.argv[1:3])
File "/home/sergey/0dev/galaxy/_galaxy/dev/scripts/api/common.py", line 96, in display
for k, v in i.items(): # TODO this is a str in api/datatypes route
AttributeError: 'str' object has no attribute 'items'
|
AttributeError
|
def execute(
trans,
tool,
mapping_params,
history,
rerun_remap_job_id=None,
collection_info=None,
workflow_invocation_uuid=None,
invocation_step=None,
max_num_jobs=None,
job_callback=None,
completed_jobs=None,
workflow_resource_parameters=None,
validate_outputs=False,
):
"""
Execute a tool and return object containing summary (output data, number of
failures, etc...).
"""
if max_num_jobs:
assert invocation_step is not None
if rerun_remap_job_id:
assert invocation_step is None
all_jobs_timer = tool.app.execution_timer_factory.get_timer(
"internals.galaxy.tools.execute.job_batch", BATCH_EXECUTION_MESSAGE
)
if invocation_step is None:
execution_tracker = ToolExecutionTracker(
trans, tool, mapping_params, collection_info, completed_jobs=completed_jobs
)
else:
execution_tracker = WorkflowStepExecutionTracker(
trans,
tool,
mapping_params,
collection_info,
invocation_step,
completed_jobs=completed_jobs,
)
execution_cache = ToolExecutionCache(trans)
def execute_single_job(execution_slice, completed_job):
job_timer = tool.app.execution_timer_factory.get_timer(
"internals.galaxy.tools.execute.job_single",
SINGLE_EXECUTION_SUCCESS_MESSAGE,
)
params = execution_slice.param_combination
if workflow_invocation_uuid:
params["__workflow_invocation_uuid__"] = workflow_invocation_uuid
elif "__workflow_invocation_uuid__" in params:
# Only workflow invocation code gets to set this, ignore user supplied
# values or rerun parameters.
del params["__workflow_invocation_uuid__"]
if workflow_resource_parameters:
params["__workflow_resource_params__"] = workflow_resource_parameters
elif "__workflow_resource_params__" in params:
# Only workflow invocation code gets to set this, ignore user supplied
# values or rerun parameters.
del params["__workflow_resource_params__"]
if validate_outputs:
params["__validate_outputs__"] = True
job, result = tool.handle_single_execution(
trans,
rerun_remap_job_id,
execution_slice,
history,
execution_cache,
completed_job,
collection_info,
job_callback=job_callback,
flush_job=False,
)
if job:
log.debug(job_timer.to_str(tool_id=tool.id, job_id=job.id))
execution_tracker.record_success(execution_slice, job, result)
else:
execution_tracker.record_error(result)
tool_action = tool.tool_action
if hasattr(tool_action, "check_inputs_ready"):
for params in execution_tracker.param_combinations:
# This will throw an exception if the tool is not ready.
tool_action.check_inputs_ready(
tool,
trans,
params,
history,
execution_cache=execution_cache,
collection_info=collection_info,
)
execution_tracker.ensure_implicit_collections_populated(
history, mapping_params.param_template
)
job_count = len(execution_tracker.param_combinations)
jobs_executed = 0
has_remaining_jobs = False
execution_slice = None
for i, execution_slice in enumerate(execution_tracker.new_execution_slices()):
if max_num_jobs and jobs_executed >= max_num_jobs:
has_remaining_jobs = True
break
else:
execute_single_job(execution_slice, completed_jobs[i])
history = execution_slice.history or history
jobs_executed += 1
if execution_slice:
# a side effect of adding datasets to a history is a commit within db_next_hid (even with flush=False).
history.add_pending_datasets()
else:
# Make sure collections, implicit jobs etc are flushed even if there are no precreated output datasets
trans.sa_session.flush()
tool_id = tool.id
for job in execution_tracker.successful_jobs:
# Put the job in the queue if tracking in memory
tool.app.job_manager.enqueue(job, tool=tool, flush=False)
trans.log_event(
"Added job to the job queue, id: %s" % str(job.id), tool_id=tool_id
)
trans.sa_session.flush()
if has_remaining_jobs:
raise PartialJobExecution(execution_tracker)
else:
execution_tracker.finalize_dataset_collections(trans)
log.debug(all_jobs_timer.to_str(job_count=job_count, tool_id=tool.id))
return execution_tracker
|
def execute(
trans,
tool,
mapping_params,
history,
rerun_remap_job_id=None,
collection_info=None,
workflow_invocation_uuid=None,
invocation_step=None,
max_num_jobs=None,
job_callback=None,
completed_jobs=None,
workflow_resource_parameters=None,
validate_outputs=False,
):
"""
Execute a tool and return object containing summary (output data, number of
failures, etc...).
"""
if max_num_jobs:
assert invocation_step is not None
if rerun_remap_job_id:
assert invocation_step is None
all_jobs_timer = tool.app.execution_timer_factory.get_timer(
"internals.galaxy.tools.execute.job_batch", BATCH_EXECUTION_MESSAGE
)
if invocation_step is None:
execution_tracker = ToolExecutionTracker(
trans, tool, mapping_params, collection_info, completed_jobs=completed_jobs
)
else:
execution_tracker = WorkflowStepExecutionTracker(
trans,
tool,
mapping_params,
collection_info,
invocation_step,
completed_jobs=completed_jobs,
)
execution_cache = ToolExecutionCache(trans)
def execute_single_job(execution_slice, completed_job):
job_timer = tool.app.execution_timer_factory.get_timer(
"internals.galaxy.tools.execute.job_single",
SINGLE_EXECUTION_SUCCESS_MESSAGE,
)
params = execution_slice.param_combination
if workflow_invocation_uuid:
params["__workflow_invocation_uuid__"] = workflow_invocation_uuid
elif "__workflow_invocation_uuid__" in params:
# Only workflow invocation code gets to set this, ignore user supplied
# values or rerun parameters.
del params["__workflow_invocation_uuid__"]
if workflow_resource_parameters:
params["__workflow_resource_params__"] = workflow_resource_parameters
elif "__workflow_resource_params__" in params:
# Only workflow invocation code gets to set this, ignore user supplied
# values or rerun parameters.
del params["__workflow_resource_params__"]
if validate_outputs:
params["__validate_outputs__"] = True
job, result = tool.handle_single_execution(
trans,
rerun_remap_job_id,
execution_slice,
history,
execution_cache,
completed_job,
collection_info,
job_callback=job_callback,
flush_job=False,
)
if job:
log.debug(job_timer.to_str(tool_id=tool.id, job_id=job.id))
execution_tracker.record_success(execution_slice, job, result)
else:
execution_tracker.record_error(result)
tool_action = tool.tool_action
if hasattr(tool_action, "check_inputs_ready"):
for params in execution_tracker.param_combinations:
# This will throw an exception if the tool is not ready.
tool_action.check_inputs_ready(
tool,
trans,
params,
history,
execution_cache=execution_cache,
collection_info=collection_info,
)
execution_tracker.ensure_implicit_collections_populated(
history, mapping_params.param_template
)
job_count = len(execution_tracker.param_combinations)
jobs_executed = 0
has_remaining_jobs = False
execution_slice = None
for i, execution_slice in enumerate(execution_tracker.new_execution_slices()):
if max_num_jobs and jobs_executed >= max_num_jobs:
has_remaining_jobs = True
break
else:
execute_single_job(execution_slice, completed_jobs[i])
history = execution_slice.history or history
jobs_executed += 1
if execution_slice:
# a side effect of adding datasets to a history is a commit within db_next_hid (even with flush=False).
history.add_pending_datasets()
else:
# Make sure collections, implicit jobs etc are flushed even if there are no precreated output datasets
trans.sa_session.flush()
for job in execution_tracker.successful_jobs:
# Put the job in the queue if tracking in memory
tool.app.job_manager.enqueue(job, tool=tool, flush=False)
trans.log_event(
"Added job to the job queue, id: %s" % str(job.id), tool_id=job.tool_id
)
trans.sa_session.flush()
if has_remaining_jobs:
raise PartialJobExecution(execution_tracker)
else:
execution_tracker.finalize_dataset_collections(trans)
log.debug(all_jobs_timer.to_str(job_count=job_count, tool_id=tool.id))
return execution_tracker
|
https://github.com/galaxyproject/galaxy/issues/11146
|
galaxy.web_stack.transport DEBUG 2021-01-15 16:38:33,341 [p:29115,w:0,m:1] [UWSGIFarmMessageTransport.dispatcher_thread] Released lock
galaxy.web_stack.transport DEBUG 2021-01-15 16:38:33,341 [p:29117,w:0,m:3] [UWSGIFarmMessageTransport.dispatcher_thread] Acquired message lock, waiting for new message
galaxy.web_stack.transport DEBUG 2021-01-15 16:38:33,342 [p:29117,w:0,m:3] [UWSGIFarmMessageTransport.dispatcher_thread] Received message: {"target": "job_handler", "params": {"task": "setup", "job_id": null}, "__classname__": "JobHandlerMessage"}
galaxy.web_stack.transport ERROR 2021-01-15 16:38:33,349 [p:29117,w:0,m:3] [UWSGIFarmMessageTransport.dispatcher_thread] Exception in mule message handling
Traceback (most recent call last):
File "lib/galaxy/web_stack/transport.py", line 119, in _dispatch_messages
self.dispatcher.dispatch(msg)
File "lib/galaxy/web_stack/message.py", line 46, in dispatch
self.__funcs[msg.target](msg)
File "lib/galaxy/web_stack/message.py", line 147, in default_handler
getattr(self, '_handle_%s_msg' % msg.task)(**msg.params)
File "lib/galaxy/jobs/handler.py", line 823, in _handle_setup_msg
if job.handler is None:
AttributeError: 'NoneType' object has no attribute 'handler'
|
AttributeError
|
def _assign_uwsgi_mule_message_handler(
self, obj, method, configured, message_callback=None, flush=True, **kwargs
):
"""Assign object to a handler by sending a setup message to the appropriate handler pool (farm), where a handler
(mule) will receive the message and assign itself.
:param obj: Same as :method:`ConfiguresHandlers.assign_handler()`.
:param method: Same as :method:`ConfiguresHandlers._assign_db_preassign_handler()`.
:param configured: Same as :method:`ConfiguresHandlers.assign_handler()`.
:param queue_callback: Callback returning a setup message to be sent via the stack messaging interface's
``send_message()`` method. No arguments are passed.
:type queue_callback: callable
:raises HandlerAssignmentSkip: if the configured or default handler is not a known handler pool (farm)
:returns: str -- The assigned handler pool.
"""
assert message_callback is not None, (
"Cannot perform '%s' handler assignment: `message_callback` is None"
% HANDLER_ASSIGNMENT_METHODS.UWSGI_MULE_MESSAGE
)
tag = configured or self.DEFAULT_HANDLER_TAG
pool = self.pool_for_tag.get(tag)
if pool is None:
log.debug(
"(%s) No handler pool (uWSGI farm) for '%s' found", obj.log_str(), tag
)
raise HandlerAssignmentSkip()
else:
if flush or not obj.id:
_timed_flush_obj(obj)
message = message_callback()
self.app.application_stack.send_message(pool, message)
return pool
|
def _assign_uwsgi_mule_message_handler(
self, obj, method, configured, message_callback=None, flush=True, **kwargs
):
"""Assign object to a handler by sending a setup message to the appropriate handler pool (farm), where a handler
(mule) will receive the message and assign itself.
:param obj: Same as :method:`ConfiguresHandlers.assign_handler()`.
:param method: Same as :method:`ConfiguresHandlers._assign_db_preassign_handler()`.
:param configured: Same as :method:`ConfiguresHandlers.assign_handler()`.
:param queue_callback: Callback returning a setup message to be sent via the stack messaging interface's
``send_message()`` method. No arguments are passed.
:type queue_callback: callable
:raises HandlerAssignmentSkip: if the configured or default handler is not a known handler pool (farm)
:returns: str -- The assigned handler pool.
"""
assert message_callback is not None, (
"Cannot perform '%s' handler assignment: `message_callback` is None"
% HANDLER_ASSIGNMENT_METHODS.UWSGI_MULE_MESSAGE
)
tag = configured or self.DEFAULT_HANDLER_TAG
pool = self.pool_for_tag.get(tag)
if pool is None:
log.debug(
"(%s) No handler pool (uWSGI farm) for '%s' found", obj.log_str(), tag
)
raise HandlerAssignmentSkip()
else:
if flush:
_timed_flush_obj(obj)
message = message_callback()
self.app.application_stack.send_message(pool, message)
return pool
|
https://github.com/galaxyproject/galaxy/issues/11146
|
galaxy.web_stack.transport DEBUG 2021-01-15 16:38:33,341 [p:29115,w:0,m:1] [UWSGIFarmMessageTransport.dispatcher_thread] Released lock
galaxy.web_stack.transport DEBUG 2021-01-15 16:38:33,341 [p:29117,w:0,m:3] [UWSGIFarmMessageTransport.dispatcher_thread] Acquired message lock, waiting for new message
galaxy.web_stack.transport DEBUG 2021-01-15 16:38:33,342 [p:29117,w:0,m:3] [UWSGIFarmMessageTransport.dispatcher_thread] Received message: {"target": "job_handler", "params": {"task": "setup", "job_id": null}, "__classname__": "JobHandlerMessage"}
galaxy.web_stack.transport ERROR 2021-01-15 16:38:33,349 [p:29117,w:0,m:3] [UWSGIFarmMessageTransport.dispatcher_thread] Exception in mule message handling
Traceback (most recent call last):
File "lib/galaxy/web_stack/transport.py", line 119, in _dispatch_messages
self.dispatcher.dispatch(msg)
File "lib/galaxy/web_stack/message.py", line 46, in dispatch
self.__funcs[msg.target](msg)
File "lib/galaxy/web_stack/message.py", line 147, in default_handler
getattr(self, '_handle_%s_msg' % msg.task)(**msg.params)
File "lib/galaxy/jobs/handler.py", line 823, in _handle_setup_msg
if job.handler is None:
AttributeError: 'NoneType' object has no attribute 'handler'
|
AttributeError
|
def resolve(self, enabled_container_types, tool_info, **kwds):
if (
not self.docker_cli_available
or tool_info.requires_galaxy_python_environment
or self.container_type not in enabled_container_types
):
return None
targets = mulled_targets(tool_info)
resolution_cache = kwds.get("resolution_cache")
return docker_cached_container_description(
targets,
self.namespace,
hash_func=self.hash_func,
shell=self.shell,
resolution_cache=resolution_cache,
)
|
def resolve(self, enabled_container_types, tool_info, **kwds):
if (
tool_info.requires_galaxy_python_environment
or self.container_type not in enabled_container_types
):
return None
targets = mulled_targets(tool_info)
resolution_cache = kwds.get("resolution_cache")
return docker_cached_container_description(
targets,
self.namespace,
hash_func=self.hash_func,
shell=self.shell,
resolution_cache=resolution_cache,
)
|
https://github.com/galaxyproject/galaxy/issues/11125
|
urllib3.connectionpool DEBUG 2021-01-13 15:26:00,421 https://quay.io:443 "GET /api/v1/repository/biocontainers/porechop HTTP/1.1" 200 4033
galaxy.tool_util.deps.containers ERROR 2021-01-13 15:26:00,499 Could not get container description for tool 'toolshed.g2.bx.psu.edu/repos/iuc/porechop/porechop/0.2.3'
Traceback (most recent call last):
File "/galaxy/server/lib/galaxy/tool_util/deps/containers.py", line 243, in find_best_container_description
resolved_container_description = self.resolve(enabled_container_types, tool_info, **kwds)
File "/galaxy/server/lib/galaxy/tool_util/deps/containers.py", line 265, in resolve
container_description = container_resolver.resolve(enabled_container_types, tool_info, install=install, resolution_cache=resolution_cache, session=session)
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 410, in resolve
if install and not self.cached_container_description(
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 385, in cached_container_description
return docker_cached_container_description(targets, namespace, hash_func, resolution_cache)
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 202, in docker_cached_container_description
cached_images = list_docker_cached_mulled_images(namespace, hash_func=hash_func, resolution_cache=resolution_cache)
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 71, in list_docker_cached_mulled_images
images_and_versions = unicodify(subprocess.check_output(command)).strip().splitlines()
File "/usr/lib/python3.8/subprocess.py", line 411, in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
File "/usr/lib/python3.8/subprocess.py", line 489, in run
with Popen(*popenargs, **kwargs) as process:
File "/usr/lib/python3.8/subprocess.py", line 854, in __init__
self._execute_child(args, executable, preexec_fn, close_fds,
File "/usr/lib/python3.8/subprocess.py", line 1702, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
FileNotFoundError: [Errno 2] No such file or directory: 'docker'
|
FileNotFoundError
|
def cached_container_description(self, targets, namespace, hash_func, resolution_cache):
try:
return docker_cached_container_description(
targets, namespace, hash_func, resolution_cache
)
except subprocess.CalledProcessError:
# We should only get here if a docker binary is available, but command quits with a non-zero exit code,
# e.g if the docker daemon is not available
log.exception(
"An error occured while listing cached docker image. Docker daemon may need to be restarted."
)
return None
|
def cached_container_description(self, targets, namespace, hash_func, resolution_cache):
return docker_cached_container_description(
targets, namespace, hash_func, resolution_cache
)
|
https://github.com/galaxyproject/galaxy/issues/11125
|
urllib3.connectionpool DEBUG 2021-01-13 15:26:00,421 https://quay.io:443 "GET /api/v1/repository/biocontainers/porechop HTTP/1.1" 200 4033
galaxy.tool_util.deps.containers ERROR 2021-01-13 15:26:00,499 Could not get container description for tool 'toolshed.g2.bx.psu.edu/repos/iuc/porechop/porechop/0.2.3'
Traceback (most recent call last):
File "/galaxy/server/lib/galaxy/tool_util/deps/containers.py", line 243, in find_best_container_description
resolved_container_description = self.resolve(enabled_container_types, tool_info, **kwds)
File "/galaxy/server/lib/galaxy/tool_util/deps/containers.py", line 265, in resolve
container_description = container_resolver.resolve(enabled_container_types, tool_info, install=install, resolution_cache=resolution_cache, session=session)
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 410, in resolve
if install and not self.cached_container_description(
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 385, in cached_container_description
return docker_cached_container_description(targets, namespace, hash_func, resolution_cache)
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 202, in docker_cached_container_description
cached_images = list_docker_cached_mulled_images(namespace, hash_func=hash_func, resolution_cache=resolution_cache)
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 71, in list_docker_cached_mulled_images
images_and_versions = unicodify(subprocess.check_output(command)).strip().splitlines()
File "/usr/lib/python3.8/subprocess.py", line 411, in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
File "/usr/lib/python3.8/subprocess.py", line 489, in run
with Popen(*popenargs, **kwargs) as process:
File "/usr/lib/python3.8/subprocess.py", line 854, in __init__
self._execute_child(args, executable, preexec_fn, close_fds,
File "/usr/lib/python3.8/subprocess.py", line 1702, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
FileNotFoundError: [Errno 2] No such file or directory: 'docker'
|
FileNotFoundError
|
def pull(self, container):
if self.docker_cli_available:
command = container.build_pull_command()
shell(command)
|
def pull(self, container):
command = container.build_pull_command()
shell(command)
|
https://github.com/galaxyproject/galaxy/issues/11125
|
urllib3.connectionpool DEBUG 2021-01-13 15:26:00,421 https://quay.io:443 "GET /api/v1/repository/biocontainers/porechop HTTP/1.1" 200 4033
galaxy.tool_util.deps.containers ERROR 2021-01-13 15:26:00,499 Could not get container description for tool 'toolshed.g2.bx.psu.edu/repos/iuc/porechop/porechop/0.2.3'
Traceback (most recent call last):
File "/galaxy/server/lib/galaxy/tool_util/deps/containers.py", line 243, in find_best_container_description
resolved_container_description = self.resolve(enabled_container_types, tool_info, **kwds)
File "/galaxy/server/lib/galaxy/tool_util/deps/containers.py", line 265, in resolve
container_description = container_resolver.resolve(enabled_container_types, tool_info, install=install, resolution_cache=resolution_cache, session=session)
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 410, in resolve
if install and not self.cached_container_description(
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 385, in cached_container_description
return docker_cached_container_description(targets, namespace, hash_func, resolution_cache)
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 202, in docker_cached_container_description
cached_images = list_docker_cached_mulled_images(namespace, hash_func=hash_func, resolution_cache=resolution_cache)
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 71, in list_docker_cached_mulled_images
images_and_versions = unicodify(subprocess.check_output(command)).strip().splitlines()
File "/usr/lib/python3.8/subprocess.py", line 411, in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
File "/usr/lib/python3.8/subprocess.py", line 489, in run
with Popen(*popenargs, **kwargs) as process:
File "/usr/lib/python3.8/subprocess.py", line 854, in __init__
self._execute_child(args, executable, preexec_fn, close_fds,
File "/usr/lib/python3.8/subprocess.py", line 1702, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
FileNotFoundError: [Errno 2] No such file or directory: 'docker'
|
FileNotFoundError
|
def resolve(
self, enabled_container_types, tool_info, install=False, session=None, **kwds
):
resolution_cache = kwds.get("resolution_cache")
if (
tool_info.requires_galaxy_python_environment
or self.container_type not in enabled_container_types
):
return None
targets = mulled_targets(tool_info)
if len(targets) == 0:
return None
name = targets_to_mulled_name(
targets=targets,
hash_func=self.hash_func,
namespace=self.namespace,
resolution_cache=resolution_cache,
session=session,
)
if name:
container_id = "quay.io/{}/{}".format(self.namespace, name)
if self.protocol:
container_id = "{}{}".format(self.protocol, container_id)
container_description = ContainerDescription(
container_id,
type=self.container_type,
shell=self.shell,
)
if self.docker_cli_available:
if install and not self.cached_container_description(
targets,
namespace=self.namespace,
hash_func=self.hash_func,
resolution_cache=resolution_cache,
):
destination_info = {}
destination_for_container_type = kwds.get(
"destination_for_container_type"
)
if destination_for_container_type:
destination_info = destination_for_container_type(
self.container_type
)
container = CONTAINER_CLASSES[self.container_type](
container_description.identifier,
self.app_info,
tool_info,
destination_info,
{},
container_description,
)
self.pull(container)
if not self.auto_install:
container_description = self.cached_container_description(
targets,
namespace=self.namespace,
hash_func=self.hash_func,
resolution_cache=resolution_cache,
)
return container_description
|
def resolve(
self, enabled_container_types, tool_info, install=False, session=None, **kwds
):
resolution_cache = kwds.get("resolution_cache")
if (
tool_info.requires_galaxy_python_environment
or self.container_type not in enabled_container_types
):
return None
targets = mulled_targets(tool_info)
if len(targets) == 0:
return None
name = targets_to_mulled_name(
targets=targets,
hash_func=self.hash_func,
namespace=self.namespace,
resolution_cache=resolution_cache,
session=session,
)
if name:
container_id = "quay.io/{}/{}".format(self.namespace, name)
if self.protocol:
container_id = "{}{}".format(self.protocol, container_id)
container_description = ContainerDescription(
container_id,
type=self.container_type,
shell=self.shell,
)
if install and not self.cached_container_description(
targets,
namespace=self.namespace,
hash_func=self.hash_func,
resolution_cache=resolution_cache,
):
destination_info = {}
destination_for_container_type = kwds.get("destination_for_container_type")
if destination_for_container_type:
destination_info = destination_for_container_type(self.container_type)
container = CONTAINER_CLASSES[self.container_type](
container_description.identifier,
self.app_info,
tool_info,
destination_info,
{},
container_description,
)
self.pull(container)
if not self.auto_install:
container_description = self.cached_container_description(
targets,
namespace=self.namespace,
hash_func=self.hash_func,
resolution_cache=resolution_cache,
)
return container_description
|
https://github.com/galaxyproject/galaxy/issues/11125
|
urllib3.connectionpool DEBUG 2021-01-13 15:26:00,421 https://quay.io:443 "GET /api/v1/repository/biocontainers/porechop HTTP/1.1" 200 4033
galaxy.tool_util.deps.containers ERROR 2021-01-13 15:26:00,499 Could not get container description for tool 'toolshed.g2.bx.psu.edu/repos/iuc/porechop/porechop/0.2.3'
Traceback (most recent call last):
File "/galaxy/server/lib/galaxy/tool_util/deps/containers.py", line 243, in find_best_container_description
resolved_container_description = self.resolve(enabled_container_types, tool_info, **kwds)
File "/galaxy/server/lib/galaxy/tool_util/deps/containers.py", line 265, in resolve
container_description = container_resolver.resolve(enabled_container_types, tool_info, install=install, resolution_cache=resolution_cache, session=session)
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 410, in resolve
if install and not self.cached_container_description(
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 385, in cached_container_description
return docker_cached_container_description(targets, namespace, hash_func, resolution_cache)
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 202, in docker_cached_container_description
cached_images = list_docker_cached_mulled_images(namespace, hash_func=hash_func, resolution_cache=resolution_cache)
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 71, in list_docker_cached_mulled_images
images_and_versions = unicodify(subprocess.check_output(command)).strip().splitlines()
File "/usr/lib/python3.8/subprocess.py", line 411, in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
File "/usr/lib/python3.8/subprocess.py", line 489, in run
with Popen(*popenargs, **kwargs) as process:
File "/usr/lib/python3.8/subprocess.py", line 854, in __init__
self._execute_child(args, executable, preexec_fn, close_fds,
File "/usr/lib/python3.8/subprocess.py", line 1702, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
FileNotFoundError: [Errno 2] No such file or directory: 'docker'
|
FileNotFoundError
|
def __default_containers_resolvers(self):
default_resolvers = [
ExplicitContainerResolver(self.app_info),
ExplicitSingularityContainerResolver(self.app_info),
]
if self.enable_mulled_containers:
default_resolvers.extend(
[
CachedMulledDockerContainerResolver(
self.app_info, namespace="biocontainers"
),
CachedMulledDockerContainerResolver(self.app_info, namespace="local"),
CachedMulledSingularityContainerResolver(
self.app_info, namespace="biocontainers"
),
CachedMulledSingularityContainerResolver(
self.app_info, namespace="local"
),
MulledDockerContainerResolver(self.app_info, namespace="biocontainers"),
MulledSingularityContainerResolver(
self.app_info, namespace="biocontainers"
),
]
)
# BuildMulledDockerContainerResolver and BuildMulledSingularityContainerResolver both need the docker daemon to build images.
# If docker is not available, we don't load them.
build_mulled_docker_container_resolver = BuildMulledDockerContainerResolver(
self.app_info
)
if build_mulled_docker_container_resolver.docker_cli_available:
default_resolvers.extend(
[
build_mulled_docker_container_resolver,
BuildMulledSingularityContainerResolver(self.app_info),
]
)
return default_resolvers
|
def __default_containers_resolvers(self):
default_resolvers = [
ExplicitContainerResolver(self.app_info),
ExplicitSingularityContainerResolver(self.app_info),
]
if self.enable_mulled_containers:
default_resolvers.extend(
[
CachedMulledDockerContainerResolver(
self.app_info, namespace="biocontainers"
),
CachedMulledDockerContainerResolver(self.app_info, namespace="local"),
CachedMulledSingularityContainerResolver(
self.app_info, namespace="biocontainers"
),
CachedMulledSingularityContainerResolver(
self.app_info, namespace="local"
),
MulledDockerContainerResolver(self.app_info, namespace="biocontainers"),
MulledSingularityContainerResolver(
self.app_info, namespace="biocontainers"
),
BuildMulledDockerContainerResolver(self.app_info),
BuildMulledSingularityContainerResolver(self.app_info),
]
)
return default_resolvers
|
https://github.com/galaxyproject/galaxy/issues/11125
|
urllib3.connectionpool DEBUG 2021-01-13 15:26:00,421 https://quay.io:443 "GET /api/v1/repository/biocontainers/porechop HTTP/1.1" 200 4033
galaxy.tool_util.deps.containers ERROR 2021-01-13 15:26:00,499 Could not get container description for tool 'toolshed.g2.bx.psu.edu/repos/iuc/porechop/porechop/0.2.3'
Traceback (most recent call last):
File "/galaxy/server/lib/galaxy/tool_util/deps/containers.py", line 243, in find_best_container_description
resolved_container_description = self.resolve(enabled_container_types, tool_info, **kwds)
File "/galaxy/server/lib/galaxy/tool_util/deps/containers.py", line 265, in resolve
container_description = container_resolver.resolve(enabled_container_types, tool_info, install=install, resolution_cache=resolution_cache, session=session)
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 410, in resolve
if install and not self.cached_container_description(
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 385, in cached_container_description
return docker_cached_container_description(targets, namespace, hash_func, resolution_cache)
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 202, in docker_cached_container_description
cached_images = list_docker_cached_mulled_images(namespace, hash_func=hash_func, resolution_cache=resolution_cache)
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 71, in list_docker_cached_mulled_images
images_and_versions = unicodify(subprocess.check_output(command)).strip().splitlines()
File "/usr/lib/python3.8/subprocess.py", line 411, in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
File "/usr/lib/python3.8/subprocess.py", line 489, in run
with Popen(*popenargs, **kwargs) as process:
File "/usr/lib/python3.8/subprocess.py", line 854, in __init__
self._execute_child(args, executable, preexec_fn, close_fds,
File "/usr/lib/python3.8/subprocess.py", line 1702, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
FileNotFoundError: [Errno 2] No such file or directory: 'docker'
|
FileNotFoundError
|
def resolve(self, enabled_container_types, tool_info, install=False, **kwds):
if (
not self.docker_cli_available
or tool_info.requires_galaxy_python_environment
or self.container_type not in enabled_container_types
):
return None
targets = mulled_targets(tool_info)
if len(targets) == 0:
return None
if self.auto_install or install:
mull_targets(
targets,
involucro_context=self._get_involucro_context(),
**self._mulled_kwds,
)
return docker_cached_container_description(
targets, self.namespace, hash_func=self.hash_func, shell=self.shell
)
|
def resolve(self, enabled_container_types, tool_info, install=False, **kwds):
if (
tool_info.requires_galaxy_python_environment
or self.container_type not in enabled_container_types
):
return None
targets = mulled_targets(tool_info)
if len(targets) == 0:
return None
if self.auto_install or install:
mull_targets(
targets,
involucro_context=self._get_involucro_context(),
**self._mulled_kwds,
)
return docker_cached_container_description(
targets, self.namespace, hash_func=self.hash_func, shell=self.shell
)
|
https://github.com/galaxyproject/galaxy/issues/11125
|
urllib3.connectionpool DEBUG 2021-01-13 15:26:00,421 https://quay.io:443 "GET /api/v1/repository/biocontainers/porechop HTTP/1.1" 200 4033
galaxy.tool_util.deps.containers ERROR 2021-01-13 15:26:00,499 Could not get container description for tool 'toolshed.g2.bx.psu.edu/repos/iuc/porechop/porechop/0.2.3'
Traceback (most recent call last):
File "/galaxy/server/lib/galaxy/tool_util/deps/containers.py", line 243, in find_best_container_description
resolved_container_description = self.resolve(enabled_container_types, tool_info, **kwds)
File "/galaxy/server/lib/galaxy/tool_util/deps/containers.py", line 265, in resolve
container_description = container_resolver.resolve(enabled_container_types, tool_info, install=install, resolution_cache=resolution_cache, session=session)
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 410, in resolve
if install and not self.cached_container_description(
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 385, in cached_container_description
return docker_cached_container_description(targets, namespace, hash_func, resolution_cache)
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 202, in docker_cached_container_description
cached_images = list_docker_cached_mulled_images(namespace, hash_func=hash_func, resolution_cache=resolution_cache)
File "/galaxy/server/lib/galaxy/tool_util/deps/container_resolvers/mulled.py", line 71, in list_docker_cached_mulled_images
images_and_versions = unicodify(subprocess.check_output(command)).strip().splitlines()
File "/usr/lib/python3.8/subprocess.py", line 411, in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
File "/usr/lib/python3.8/subprocess.py", line 489, in run
with Popen(*popenargs, **kwargs) as process:
File "/usr/lib/python3.8/subprocess.py", line 854, in __init__
self._execute_child(args, executable, preexec_fn, close_fds,
File "/usr/lib/python3.8/subprocess.py", line 1702, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
FileNotFoundError: [Errno 2] No such file or directory: 'docker'
|
FileNotFoundError
|
def record_success(self, execution_slice, job, outputs):
super().record_success(execution_slice, job, outputs)
if not self.collection_info:
for output_name, output in outputs:
self.invocation_step.add_output(output_name, output)
self.invocation_step.job = job
|
def record_success(self, execution_slice, job, outputs):
super().record_success(execution_slice, job, outputs)
if not self.collection_info:
self.invocation_step.job = job
|
https://github.com/galaxyproject/galaxy/issues/10966
|
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: galaxy.workflow.run ERROR 2020-12-18 11:53:28,997 Failed to schedule Workflow[id=74152,name=COVID-19: variation analysis reporting v2], problem occurred on WorkflowStep[index=25,type=tool].
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: Traceback (most recent call last):
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 353, in replacement_for_connection
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: replacement = step_outputs[output_name]
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: KeyError: 'out_file1'
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: During handling of the above exception, another exception occurred:
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: Traceback (most recent call last):
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 190, in invoke
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: incomplete_or_none = self._invoke_step(workflow_invocation_step)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 266, in _invoke_step
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: use_cached_job=self.workflow_invocation.use_cached_job)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/modules.py", line 1666, in execute
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: collection_info = self.compute_collection_info(progress, step, all_inputs)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/modules.py", line 336, in compute_collection_info
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: progress, step, all_inputs
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/modules.py", line 354, in _find_collections_to_match
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: data = progress.replacement_for_input(step, input_dict)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 337, in replacement_for_input
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: replacement = self.replacement_for_connection(connection[0], is_data=is_data)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 358, in replacement_for_connection
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: raise Exception(message)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: Exception: Workflow evaluation problem - failed to find output_name out_file1 in step_outputs {}
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: galaxy.workflow.run ERROR 2020-12-18 11:53:29,002 Failed to execute scheduled workflow.
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: Traceback (most recent call last):
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 353, in replacement_for_connection
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: replacement = step_outputs[output_name]
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: KeyError: 'out_file1'
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: During handling of the above exception, another exception occurred:
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: Traceback (most recent call last):
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 83, in __invoke
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: outputs = invoker.invoke()
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 190, in invoke
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: incomplete_or_none = self._invoke_step(workflow_invocation_step)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 266, in _invoke_step
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: use_cached_job=self.workflow_invocation.use_cached_job)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/modules.py", line 1666, in execute
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: collection_info = self.compute_collection_info(progress, step, all_inputs)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/modules.py", line 336, in compute_collection_info
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: progress, step, all_inputs
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/modules.py", line 354, in _find_collections_to_match
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: data = progress.replacement_for_input(step, input_dict)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 337, in replacement_for_input
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: replacement = self.replacement_for_connection(connection[0], is_data=is_data)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 358, in replacement_for_connection
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: raise Exception(message)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: Exception: Workflow evaluation problem - failed to find output_name out_file1 in step_outputs {}
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: galaxy.workflow.scheduling_manager DEBUG 2020-12-18 11:53:29,021 Workflow invocation [129864] scheduled
|
KeyError
|
def ensure_implicit_collections_populated(self, history, params):
if not self.collection_info:
return
history = history or self.tool.get_default_history_by_trans(self.trans)
if self.invocation_step.is_new:
self.precreate_output_collections(history, params)
for output_name, implicit_collection in self.implicit_collections.items():
self.invocation_step.add_output(output_name, implicit_collection)
else:
collections = {}
for output_assoc in self.invocation_step.output_dataset_collections:
implicit_collection = output_assoc.dataset_collection
assert hasattr(
implicit_collection, "history_content_type"
) # make sure it is an HDCA and not a DC
collections[output_assoc.output_name] = output_assoc.dataset_collection
self.implicit_collections = collections
self.invocation_step.implicit_collection_jobs = self.implicit_collection_jobs
|
def ensure_implicit_collections_populated(self, history, params):
if not self.collection_info:
return
history = history or self.tool.get_default_history_by_trans(self.trans)
if self.invocation_step.is_new:
self.precreate_output_collections(history, params)
else:
collections = {}
for output_assoc in self.invocation_step.output_dataset_collections:
implicit_collection = output_assoc.dataset_collection
assert hasattr(
implicit_collection, "history_content_type"
) # make sure it is an HDCA and not a DC
collections[output_assoc.output_name] = output_assoc.dataset_collection
self.implicit_collections = collections
self.invocation_step.implicit_collection_jobs = self.implicit_collection_jobs
|
https://github.com/galaxyproject/galaxy/issues/10966
|
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: galaxy.workflow.run ERROR 2020-12-18 11:53:28,997 Failed to schedule Workflow[id=74152,name=COVID-19: variation analysis reporting v2], problem occurred on WorkflowStep[index=25,type=tool].
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: Traceback (most recent call last):
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 353, in replacement_for_connection
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: replacement = step_outputs[output_name]
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: KeyError: 'out_file1'
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: During handling of the above exception, another exception occurred:
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: Traceback (most recent call last):
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 190, in invoke
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: incomplete_or_none = self._invoke_step(workflow_invocation_step)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 266, in _invoke_step
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: use_cached_job=self.workflow_invocation.use_cached_job)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/modules.py", line 1666, in execute
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: collection_info = self.compute_collection_info(progress, step, all_inputs)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/modules.py", line 336, in compute_collection_info
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: progress, step, all_inputs
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/modules.py", line 354, in _find_collections_to_match
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: data = progress.replacement_for_input(step, input_dict)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 337, in replacement_for_input
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: replacement = self.replacement_for_connection(connection[0], is_data=is_data)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 358, in replacement_for_connection
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: raise Exception(message)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: Exception: Workflow evaluation problem - failed to find output_name out_file1 in step_outputs {}
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: galaxy.workflow.run ERROR 2020-12-18 11:53:29,002 Failed to execute scheduled workflow.
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: Traceback (most recent call last):
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 353, in replacement_for_connection
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: replacement = step_outputs[output_name]
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: KeyError: 'out_file1'
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: During handling of the above exception, another exception occurred:
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: Traceback (most recent call last):
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 83, in __invoke
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: outputs = invoker.invoke()
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 190, in invoke
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: incomplete_or_none = self._invoke_step(workflow_invocation_step)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 266, in _invoke_step
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: use_cached_job=self.workflow_invocation.use_cached_job)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/modules.py", line 1666, in execute
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: collection_info = self.compute_collection_info(progress, step, all_inputs)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/modules.py", line 336, in compute_collection_info
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: progress, step, all_inputs
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/modules.py", line 354, in _find_collections_to_match
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: data = progress.replacement_for_input(step, input_dict)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 337, in replacement_for_input
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: replacement = self.replacement_for_connection(connection[0], is_data=is_data)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 358, in replacement_for_connection
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: raise Exception(message)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: Exception: Workflow evaluation problem - failed to find output_name out_file1 in step_outputs {}
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: galaxy.workflow.scheduling_manager DEBUG 2020-12-18 11:53:29,021 Workflow invocation [129864] scheduled
|
KeyError
|
def set_step_outputs(self, invocation_step, outputs, already_persisted=False):
step = invocation_step.workflow_step
if invocation_step.output_value:
outputs[invocation_step.output_value.workflow_output.output_name] = (
invocation_step.output_value.value
)
self.outputs[step.id] = outputs
if not already_persisted:
for workflow_output in step.workflow_outputs:
output_name = workflow_output.output_name
if output_name not in outputs:
message = "Failed to find expected workflow output [{}] in step outputs [{}]".format(
output_name, outputs
)
# raise KeyError(message)
# Pre-18.01 we would have never even detected this output wasn't configured
# and even in 18.01 we don't have a way to tell the user something bad is
# happening so I guess we just log a debug message and continue sadly for now.
# Once https://github.com/galaxyproject/galaxy/issues/5142 is complete we could
# at least tell the user what happened, give them a warning.
log.debug(message)
continue
output = outputs[output_name]
self._record_workflow_output(
step,
workflow_output,
output=output,
)
|
def set_step_outputs(self, invocation_step, outputs, already_persisted=False):
step = invocation_step.workflow_step
if invocation_step.output_value:
outputs[invocation_step.output_value.workflow_output.output_name] = (
invocation_step.output_value.value
)
self.outputs[step.id] = outputs
if not already_persisted:
for output_name, output_object in outputs.items():
if hasattr(output_object, "history_content_type"):
invocation_step.add_output(output_name, output_object)
else:
# This is a problem, this non-data, non-collection output
# won't be recovered on a subsequent workflow scheduling
# iteration. This seems to have been a pre-existing problem
# prior to #4584 though.
pass
for workflow_output in step.workflow_outputs:
output_name = workflow_output.output_name
if output_name not in outputs:
message = "Failed to find expected workflow output [{}] in step outputs [{}]".format(
output_name, outputs
)
# raise KeyError(message)
# Pre-18.01 we would have never even detected this output wasn't configured
# and even in 18.01 we don't have a way to tell the user something bad is
# happening so I guess we just log a debug message and continue sadly for now.
# Once https://github.com/galaxyproject/galaxy/issues/5142 is complete we could
# at least tell the user what happened, give them a warning.
log.debug(message)
continue
output = outputs[output_name]
self._record_workflow_output(
step,
workflow_output,
output=output,
)
|
https://github.com/galaxyproject/galaxy/issues/10966
|
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: galaxy.workflow.run ERROR 2020-12-18 11:53:28,997 Failed to schedule Workflow[id=74152,name=COVID-19: variation analysis reporting v2], problem occurred on WorkflowStep[index=25,type=tool].
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: Traceback (most recent call last):
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 353, in replacement_for_connection
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: replacement = step_outputs[output_name]
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: KeyError: 'out_file1'
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: During handling of the above exception, another exception occurred:
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: Traceback (most recent call last):
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 190, in invoke
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: incomplete_or_none = self._invoke_step(workflow_invocation_step)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 266, in _invoke_step
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: use_cached_job=self.workflow_invocation.use_cached_job)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/modules.py", line 1666, in execute
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: collection_info = self.compute_collection_info(progress, step, all_inputs)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/modules.py", line 336, in compute_collection_info
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: progress, step, all_inputs
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/modules.py", line 354, in _find_collections_to_match
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: data = progress.replacement_for_input(step, input_dict)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 337, in replacement_for_input
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: replacement = self.replacement_for_connection(connection[0], is_data=is_data)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 358, in replacement_for_connection
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: raise Exception(message)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: Exception: Workflow evaluation problem - failed to find output_name out_file1 in step_outputs {}
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: galaxy.workflow.run ERROR 2020-12-18 11:53:29,002 Failed to execute scheduled workflow.
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: Traceback (most recent call last):
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 353, in replacement_for_connection
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: replacement = step_outputs[output_name]
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: KeyError: 'out_file1'
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: During handling of the above exception, another exception occurred:
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: Traceback (most recent call last):
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 83, in __invoke
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: outputs = invoker.invoke()
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 190, in invoke
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: incomplete_or_none = self._invoke_step(workflow_invocation_step)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 266, in _invoke_step
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: use_cached_job=self.workflow_invocation.use_cached_job)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/modules.py", line 1666, in execute
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: collection_info = self.compute_collection_info(progress, step, all_inputs)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/modules.py", line 336, in compute_collection_info
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: progress, step, all_inputs
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/modules.py", line 354, in _find_collections_to_match
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: data = progress.replacement_for_input(step, input_dict)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 337, in replacement_for_input
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: replacement = self.replacement_for_connection(connection[0], is_data=is_data)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: File "/opt/galaxy/server/lib/galaxy/workflow/run.py", line 358, in replacement_for_connection
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: raise Exception(message)
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: Exception: Workflow evaluation problem - failed to find output_name out_file1 in step_outputs {}
Dec 18 11:53:29 sn04.bi.uni-freiburg.de python[2113691]: galaxy.workflow.scheduling_manager DEBUG 2020-12-18 11:53:29,021 Workflow invocation [129864] scheduled
|
KeyError
|
def _download(self, rel_path):
try:
log.debug(
"Pulling key '%s' into cache to %s",
rel_path,
self._get_cache_path(rel_path),
)
key = self._bucket.get_key(rel_path)
# Test if cache is large enough to hold the new file
if self.cache_size > 0 and key.size > self.cache_size:
log.critical(
"File %s is larger (%s) than the cache size (%s). Cannot download.",
rel_path,
key.size,
self.cache_size,
)
return False
if self.use_axel:
log.debug(
"Parallel pulled key '%s' into cache to %s",
rel_path,
self._get_cache_path(rel_path),
)
ncores = multiprocessing.cpu_count()
url = key.generate_url(7200)
ret_code = subprocess.call(["axel", "-a", "-n", str(ncores), url])
if ret_code == 0:
return True
else:
log.debug(
"Pulled key '%s' into cache to %s",
rel_path,
self._get_cache_path(rel_path),
)
self.transfer_progress = 0 # Reset transfer progress counter
key.get_contents_to_filename(
self._get_cache_path(rel_path), cb=self._transfer_cb, num_cb=10
)
return True
except S3ResponseError:
log.exception(
"Problem downloading key '%s' from S3 bucket '%s'",
rel_path,
self._bucket.name,
)
return False
|
def _download(self, rel_path):
try:
log.debug(
"Pulling key '%s' into cache to %s",
rel_path,
self._get_cache_path(rel_path),
)
key = self._bucket.get_key(rel_path)
# Test if cache is large enough to hold the new file
if self.cache_size > 0 and key.size > self.cache_size:
log.critical(
"File %s is larger (%s) than the cache size (%s). Cannot download.",
rel_path,
key.size,
self.cache_size,
)
return False
if self.use_axel:
log.debug(
"Parallel pulled key '%s' into cache to %s",
rel_path,
self._get_cache_path(rel_path),
)
ncores = multiprocessing.cpu_count()
url = key.generate_url(7200)
ret_code = subprocess.call(["axel", "-a", "-n", ncores, url])
if ret_code == 0:
return True
else:
log.debug(
"Pulled key '%s' into cache to %s",
rel_path,
self._get_cache_path(rel_path),
)
self.transfer_progress = 0 # Reset transfer progress counter
key.get_contents_to_filename(
self._get_cache_path(rel_path), cb=self._transfer_cb, num_cb=10
)
return True
except S3ResponseError:
log.exception(
"Problem downloading key '%s' from S3 bucket '%s'",
rel_path,
self._bucket.name,
)
return False
|
https://github.com/galaxyproject/galaxy/issues/10824
|
galaxy.objectstore.s3 DEBUG 2020-11-30 12:41:25,364 [p:18802,w:1,m:0] [uWSGIWorker1Core1] Pulling key '000/dataset_74.dat' into cache to /mnt/volume/shared/galaxy/var/database/object_store_cache/000/dataset_74.dat
galaxy.objectstore.s3 DEBUG 2020-11-30 12:41:25,370 [p:18802,w:1,m:0] [uWSGIWorker1Core1] Parallel pulled key '000/dataset_74.dat' into cache to /mnt/volume/shared/galaxy/var/database/object_store_cache/000/dataset_74.dat
galaxy.web.framework.decorators ERROR 2020-11-30 12:41:25,381 [p:18802,w:1,m:0] [uWSGIWorker1Core1] Uncaught exception in exposed API method:
Traceback (most recent call last):
File "/mnt/volume/shared/galaxy/server/lib/galaxy/web/framework/decorators.py", line 282, in decorator
rval = func(self, trans, *args, **kwargs)
File "/mnt/volume/shared/galaxy/server/lib/galaxy/webapps/galaxy/api/folder_contents.py", line 113, in index
library_dataset_dict = content_item.to_dict()
File "/mnt/volume/shared/galaxy/server/lib/galaxy/model/__init__.py", line 3493, in to_dict
file_name=ldda.file_name,
File "/mnt/volume/shared/galaxy/server/lib/galaxy/model/__init__.py", line 2510, in get_file_name
return self.dataset.get_file_name()
File "/mnt/volume/shared/galaxy/server/lib/galaxy/model/__init__.py", line 2221, in get_file_name
return self.object_store.get_filename(self)
File "/mnt/volume/shared/galaxy/server/lib/galaxy/objectstore/__init__.py", line 561, in get_filename
return self._call_method('get_filename', obj, ObjectNotFound, True, **kwargs)
File "/mnt/volume/shared/galaxy/server/lib/galaxy/objectstore/__init__.py", line 587, in _call_method
return store.__getattribute__(method)(obj, **kwargs)
File "/mnt/volume/shared/galaxy/server/lib/galaxy/objectstore/s3.py", line 671, in get_filename
if self._pull_into_cache(rel_path):
File "/mnt/volume/shared/galaxy/server/lib/galaxy/objectstore/s3.py", line 420, in _pull_into_cache
file_ok = self._download(rel_path)
File "/mnt/volume/shared/galaxy/server/lib/galaxy/objectstore/s3.py", line 440, in _download
ret_code = subprocess.call(['axel', '-a', '-n', ncores, url])
File "/usr/lib/python3.6/subprocess.py", line 287, in call
with Popen(*popenargs, **kwargs) as p:
File "/usr/lib/python3.6/subprocess.py", line 729, in __init__
restore_signals, start_new_session)
File "/usr/lib/python3.6/subprocess.py", line 1295, in _execute_child
restore_signals, start_new_session, preexec_fn)
TypeError: expected str, bytes or os.PathLike object, not int
|
TypeError
|
def preferences(self):
user = self.trans.user
return user and user.extra_preferences or defaultdict(lambda: None)
|
def preferences(self):
user = self.trans.user
return user and user.extra_preferences
|
https://github.com/galaxyproject/galaxy/issues/10595
|
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: Traceback (most recent call last):
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/util/__init__.py", line 1040, in unicodify
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: value = str(value)
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/Cheetah/Template.py", line 1053, in __unicode__
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: return getattr(self, mainMethName)()
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "cheetah_DynamicallyCompiledCheetahTemplate_1604111380_182973_40258.py", line 86, in respond
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: KeyError: 'dropbox|access_token'
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: During handling of the above exception, another exception occurred:
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: Traceback (most recent call last):
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/util/__init__.py", line 1042, in unicodify
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: value = str(value)
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/Cheetah/Template.py", line 1053, in __unicode__
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: return getattr(self, mainMethName)()
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "cheetah_DynamicallyCompiledCheetahTemplate_1604111380_182973_40258.py", line 86, in respond
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: KeyError: 'dropbox|access_token'
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: During handling of the above exception, another exception occurred:
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: Traceback (most recent call last):
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/jobs/runners/__init__.py", line 236, in prepare_job
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: job_wrapper.prepare()
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/jobs/__init__.py", line 1112, in prepare
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: self.command_line, self.extra_filenames, self.environment_variables = tool_evaluator.build()
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/tools/evaluation.py", line 450, in build
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: raise e
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/tools/evaluation.py", line 446, in build
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: self.__build_config_files()
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/tools/evaluation.py", line 511, in __build_config_files
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: config_text, is_template = self.__build_config_file_text(content)
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/tools/evaluation.py", line 600, in __build_config_file_text
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: file_sources_dict = self.app.file_sources.to_dict(for_serialization=True, user_context=user_context)
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/files/__init__.py", line 144, in to_dict
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: 'file_sources': self.plugins_to_dict(for_serialization=for_serialization, user_context=user_context),
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/files/__init__.py", line 138, in plugins_to_dict
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: el = file_source.to_dict(for_serialization=for_serialization, user_context=user_context)
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/files/sources/__init__.py", line 97, in to_dict
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: rval.update(self._serialization_props(user_context=user_context))
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/files/sources/_pyfilesystem2.py", line 69, in _serialization_props
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: effective_props[key] = self._evaluate_prop(val, user_context=user_context)
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/files/sources/__init__.py", line 132, in _evaluate_prop
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: rval = fill_template(prop_val, context=template_context, futurized=True)
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/util/template.py", line 127, in fill_template
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: raise first_exception or e
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/util/template.py", line 81, in fill_template
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: return unicodify(t)
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/util/__init__.py", line 1048, in unicodify
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: msg = "Value '{}' could not be coerced to Unicode: {}('{}')".format(value, type(e).__name__, e)
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/Cheetah/Template.py", line 1053, in __unicode__
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: return getattr(self, mainMethName)()
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "cheetah_DynamicallyCompiledCheetahTemplate_1604111380_182973_40258.py", line 86, in respond
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: KeyError: 'dropbox|access_token'
|
KeyError
|
def extra_preferences(self):
data = defaultdict(lambda: None)
extra_user_preferences = self.preferences.get("extra_user_preferences")
if extra_user_preferences:
try:
data = json.loads(extra_user_preferences)
except Exception:
pass
return data
|
def extra_preferences(self):
data = {}
extra_user_preferences = self.preferences.get("extra_user_preferences")
if extra_user_preferences:
try:
data = json.loads(extra_user_preferences)
except Exception:
pass
return data
|
https://github.com/galaxyproject/galaxy/issues/10595
|
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: Traceback (most recent call last):
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/util/__init__.py", line 1040, in unicodify
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: value = str(value)
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/Cheetah/Template.py", line 1053, in __unicode__
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: return getattr(self, mainMethName)()
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "cheetah_DynamicallyCompiledCheetahTemplate_1604111380_182973_40258.py", line 86, in respond
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: KeyError: 'dropbox|access_token'
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: During handling of the above exception, another exception occurred:
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: Traceback (most recent call last):
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/util/__init__.py", line 1042, in unicodify
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: value = str(value)
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/Cheetah/Template.py", line 1053, in __unicode__
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: return getattr(self, mainMethName)()
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "cheetah_DynamicallyCompiledCheetahTemplate_1604111380_182973_40258.py", line 86, in respond
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: KeyError: 'dropbox|access_token'
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: During handling of the above exception, another exception occurred:
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: Traceback (most recent call last):
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/jobs/runners/__init__.py", line 236, in prepare_job
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: job_wrapper.prepare()
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/jobs/__init__.py", line 1112, in prepare
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: self.command_line, self.extra_filenames, self.environment_variables = tool_evaluator.build()
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/tools/evaluation.py", line 450, in build
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: raise e
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/tools/evaluation.py", line 446, in build
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: self.__build_config_files()
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/tools/evaluation.py", line 511, in __build_config_files
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: config_text, is_template = self.__build_config_file_text(content)
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/tools/evaluation.py", line 600, in __build_config_file_text
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: file_sources_dict = self.app.file_sources.to_dict(for_serialization=True, user_context=user_context)
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/files/__init__.py", line 144, in to_dict
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: 'file_sources': self.plugins_to_dict(for_serialization=for_serialization, user_context=user_context),
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/files/__init__.py", line 138, in plugins_to_dict
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: el = file_source.to_dict(for_serialization=for_serialization, user_context=user_context)
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/files/sources/__init__.py", line 97, in to_dict
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: rval.update(self._serialization_props(user_context=user_context))
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/files/sources/_pyfilesystem2.py", line 69, in _serialization_props
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: effective_props[key] = self._evaluate_prop(val, user_context=user_context)
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/files/sources/__init__.py", line 132, in _evaluate_prop
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: rval = fill_template(prop_val, context=template_context, futurized=True)
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/util/template.py", line 127, in fill_template
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: raise first_exception or e
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/util/template.py", line 81, in fill_template
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: return unicodify(t)
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/server/lib/galaxy/util/__init__.py", line 1048, in unicodify
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: msg = "Value '{}' could not be coerced to Unicode: {}('{}')".format(value, type(e).__name__, e)
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/Cheetah/Template.py", line 1053, in __unicode__
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: return getattr(self, mainMethName)()
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: File "cheetah_DynamicallyCompiledCheetahTemplate_1604111380_182973_40258.py", line 86, in respond
Oct 31 10:38:52 sn04.bi.uni-freiburg.de python[1936614]: KeyError: 'dropbox|access_token'
|
KeyError
|
def dataset_states_and_extensions_summary(self):
if not hasattr(self, "_dataset_states_and_extensions_summary"):
db_session = object_session(self)
dc = alias(DatasetCollection.table)
de = alias(DatasetCollectionElement.table)
hda = alias(HistoryDatasetAssociation.table)
dataset = alias(Dataset.table)
select_from = dc.outerjoin(de, de.c.dataset_collection_id == dc.c.id)
depth_collection_type = self.collection_type
while ":" in depth_collection_type:
child_collection = alias(DatasetCollection.table)
child_collection_element = alias(DatasetCollectionElement.table)
select_from = select_from.outerjoin(
child_collection, child_collection.c.id == de.c.child_collection_id
)
select_from = select_from.outerjoin(
child_collection_element,
child_collection_element.c.dataset_collection_id
== child_collection.c.id,
)
de = child_collection_element
depth_collection_type = depth_collection_type.split(":", 1)[1]
select_from = select_from.outerjoin(hda, hda.c.id == de.c.hda_id).outerjoin(
dataset, hda.c.dataset_id == dataset.c.id
)
select_stmt = (
select([hda.c.extension, dataset.c.state])
.select_from(select_from)
.where(dc.c.id == self.id)
.distinct()
)
extensions = set()
states = set()
for extension, state in db_session.execute(select_stmt).fetchall():
if state is not None:
# query may return (None, None) if not collection elements present
states.add(state)
extensions.add(extension)
self._dataset_states_and_extensions_summary = (states, extensions)
return self._dataset_states_and_extensions_summary
|
def dataset_states_and_extensions_summary(self):
if not hasattr(self, "_dataset_states_and_extensions_summary"):
db_session = object_session(self)
dc = alias(DatasetCollection.table)
de = alias(DatasetCollectionElement.table)
hda = alias(HistoryDatasetAssociation.table)
dataset = alias(Dataset.table)
select_from = dc.outerjoin(de, de.c.dataset_collection_id == dc.c.id)
depth_collection_type = self.collection_type
while ":" in depth_collection_type:
child_collection = alias(DatasetCollection.table)
child_collection_element = alias(DatasetCollectionElement.table)
select_from = select_from.outerjoin(
child_collection, child_collection.c.id == de.c.child_collection_id
)
select_from = select_from.outerjoin(
child_collection_element,
child_collection_element.c.dataset_collection_id
== child_collection.c.id,
)
de = child_collection_element
depth_collection_type = depth_collection_type.split(":", 1)[1]
select_from = select_from.outerjoin(hda, hda.c.id == de.c.hda_id).outerjoin(
dataset, hda.c.dataset_id == dataset.c.id
)
select_stmt = (
select([hda.c.extension, dataset.c.state])
.select_from(select_from)
.where(dc.c.id == self.id)
.distinct()
)
extensions = set()
states = set()
for extension, state in db_session.execute(select_stmt).fetchall():
states.add(state)
extensions.add(extension)
self._dataset_states_and_extensions_summary = (states, extensions)
return self._dataset_states_and_extensions_summary
|
https://github.com/galaxyproject/galaxy/issues/8390
|
[pid: 63775|app: 0|req: 160/596] 127.0.0.1 () {50 vars in 1324 bytes} [Wed Jul 31 09:38:45 2019] GET /api/histories/1cd8e2f6b131e891/contents?details=3f5830403180d620&order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-07-31T16%3A38%3A41.000Z&qv=False&qv=False => generated 2 bytes in 53 msecs (HTTP/1.1 200) 3 headers in 139 bytes (1 switches on core 3)
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:45,835 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Attempting to schedule workflow invocation [(4,)]
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,941 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 10 outputs of invocation 4 delayed (tool [__BUILD_LIST__] inputs are not ready, this special tool requires inputs to be ready)
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,941 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 11 outputs of invocation 4 delayed (dependent step [10] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,941 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 12 outputs of invocation 4 delayed (dependent step [11] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,942 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 13 outputs of invocation 4 delayed (dependent step [11] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,942 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 14 outputs of invocation 4 delayed (dependent step [13] delayed, so this step must be delayed)
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:45,957 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow invocation [4] scheduled
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:46,960 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Attempting to schedule workflow invocation [(4,)]
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,070 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 10 outputs of invocation 4 delayed (tool [__BUILD_LIST__] inputs are not ready, this special tool requires inputs to be ready)
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,071 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 11 outputs of invocation 4 delayed (dependent step [10] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,071 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 12 outputs of invocation 4 delayed (dependent step [11] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,072 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 13 outputs of invocation 4 delayed (dependent step [11] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,072 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 14 outputs of invocation 4 delayed (dependent step [13] delayed, so this step must be delayed)
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:47,086 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow invocation [4] scheduled
galaxy.jobs.runners.drmaa DEBUG 2019-07-31 09:38:47,465 [p:63775,w:2,m:0] [SlurmRunner.monitor_thread] (10/24674328) state change: job finished normally
galaxy.model.metadata DEBUG 2019-07-31 09:38:47,587 [p:63775,w:2,m:0] [SlurmRunner.work_thread-0] loading metadata from file for: HistoryDatasetAssociation 16
galaxy.jobs INFO 2019-07-31 09:38:47,692 [p:63775,w:2,m:0] [SlurmRunner.work_thread-0] Collecting metrics for Job 10 in /project/6004808/ncm3/galaxy-19.05-git/galaxy-database/jobs_directory/000/10
galaxy.jobs DEBUG 2019-07-31 09:38:47,703 [p:63775,w:2,m:0] [SlurmRunner.work_thread-0] job 10 ended (finish() executed in (159.904 ms))
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:48,090 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Attempting to schedule workflow invocation [(4,)]
galaxy.managers.collections DEBUG 2019-07-31 09:38:48,247 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Created collection with 2 elements
/project/6004808/ncm3/galaxy-19.05-git/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:268: SAWarning: Unicode type received non-unicode bind param value 'list'. (this warning may be suppressed after 10 occurrences)
(util.ellipses_string(value),),
/project/6004808/ncm3/galaxy-19.05-git/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:268: SAWarning: Unicode type received non-unicode bind param value 'output'. (this warning may be suppressed after 10 occurrences)
(util.ellipses_string(value),),
galaxy.tools.actions.model_operations INFO 2019-07-31 09:38:48,343 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Calling produce_outputs, tool is <galaxy.tools.BuildListCollectionTool object at 0x7f32a9f2a690>
galaxy.tools.execute DEBUG 2019-07-31 09:38:48,348 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Tool [__BUILD_LIST__] created job [11] (153.141 ms)
galaxy.tools.execute DEBUG 2019-07-31 09:38:48,358 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Executed 1 job(s) for tool __BUILD_LIST__ request: (178.532 ms)
galaxy.workflow.run DEBUG 2019-07-31 09:38:48,368 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 10 of invocation 4 invoked (189.270 ms)
galaxy.managers.collections DEBUG 2019-07-31 09:38:48,441 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Created collection with 0 elements
galaxy.managers.collections DEBUG 2019-07-31 09:38:48,469 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Created collection with 2 elements
/project/6004808/ncm3/galaxy-19.05-git/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:268: SAWarning: Unicode type received non-unicode bind param value 'input'. (this warning may be suppressed after 10 occurrences)
(util.ellipses_string(value),),
/project/6004808/ncm3/galaxy-19.05-git/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:268: SAWarning: Unicode type received non-unicode bind param value 'output_discarded'. (this warning may be suppressed after 10 occurrences)
(util.ellipses_string(value),),
galaxy.tools.actions.model_operations INFO 2019-07-31 09:38:48,512 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Calling produce_outputs, tool is <galaxy.tools.FilterFromFileTool object at 0x7f32a9f63810>
galaxy.tools.execute DEBUG 2019-07-31 09:38:48,516 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Tool [__FILTER_FROM_FILE__] created job [12] (106.710 ms)
galaxy.tools.execute DEBUG 2019-07-31 09:38:48,524 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Executed 1 job(s) for tool __FILTER_FROM_FILE__ request: (140.242 ms)
galaxy.workflow.run DEBUG 2019-07-31 09:38:48,533 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 11 of invocation 4 invoked (164.122 ms)
galaxy.workflow.run ERROR 2019-07-31 09:38:48,553 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Failed to schedule Workflow[id=2,name=test merge empty (imported from uploaded file)], problem occurred on WorkflowStep[index=4,type=tool].
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 1189, in execute
collection_info = self.compute_collection_info(progress, step, all_inputs)
File "lib/galaxy/workflow/modules.py", line 309, in compute_collection_info
progress, step, all_inputs
File "lib/galaxy/workflow/modules.py", line 327, in _find_collections_to_match
data = progress.replacement_for_input(step, input_dict)
File "lib/galaxy/workflow/run.py", line 337, in replacement_for_input
replacement = self.replacement_for_connection(connection[0], is_data=is_data)
File "lib/galaxy/workflow/run.py", line 358, in replacement_for_connection
raise Exception(message)
Exception: Workflow evaluation problem - failed to find output_name output in step_outputs {}
galaxy.workflow.run ERROR 2019-07-31 09:38:48,697 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Failed to execute scheduled workflow.
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 83, in __invoke
outputs = invoker.invoke()
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 1189, in execute
collection_info = self.compute_collection_info(progress, step, all_inputs)
File "lib/galaxy/workflow/modules.py", line 309, in compute_collection_info
progress, step, all_inputs
File "lib/galaxy/workflow/modules.py", line 327, in _find_collections_to_match
data = progress.replacement_for_input(step, input_dict)
File "lib/galaxy/workflow/run.py", line 337, in replacement_for_input
replacement = self.replacement_for_connection(connection[0], is_data=is_data)
File "lib/galaxy/workflow/run.py", line 358, in replacement_for_connection
raise Exception(message)
Exception: Workflow evaluation problem - failed to find output_name output in step_outputs {}
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:48,712 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow invocation [4] scheduled
|
Exception
|
def replacement_for_connection(self, connection, is_data=True):
output_step_id = connection.output_step.id
if output_step_id not in self.outputs:
template = "No outputs found for step id %s, outputs are %s"
message = template % (output_step_id, self.outputs)
raise Exception(message)
step_outputs = self.outputs[output_step_id]
if step_outputs is STEP_OUTPUT_DELAYED:
delayed_why = (
"dependent step [%s] delayed, so this step must be delayed" % output_step_id
)
raise modules.DelayedWorkflowEvaluation(why=delayed_why)
output_name = connection.output_name
try:
replacement = step_outputs[output_name]
except KeyError:
# Must resolve.
template = "Workflow evaluation problem - failed to find output_name %s in step_outputs %s"
message = template % (output_name, step_outputs)
raise Exception(message)
if isinstance(replacement, model.HistoryDatasetCollectionAssociation):
if not replacement.collection.populated:
if not replacement.waiting_for_elements:
# If we are not waiting for elements, there was some
# problem creating the collection. Collection will never
# be populated.
# TODO: consider distinguish between cancelled and failed?
raise modules.CancelWorkflowEvaluation()
delayed_why = (
"dependent collection [%s] not yet populated with datasets"
% replacement.id
)
raise modules.DelayedWorkflowEvaluation(why=delayed_why)
data_inputs = (
model.HistoryDatasetAssociation,
model.HistoryDatasetCollectionAssociation,
model.DatasetCollection,
)
if not is_data and isinstance(replacement, data_inputs):
if isinstance(replacement, model.HistoryDatasetAssociation):
if replacement.is_pending:
raise modules.DelayedWorkflowEvaluation()
if not replacement.is_ok:
raise modules.CancelWorkflowEvaluation()
else:
if not replacement.collection.populated:
raise modules.DelayedWorkflowEvaluation()
pending = False
for dataset_instance in replacement.dataset_instances:
if dataset_instance.is_pending:
pending = True
elif not dataset_instance.is_ok:
raise modules.CancelWorkflowEvaluation()
if pending:
raise modules.DelayedWorkflowEvaluation()
return replacement
|
def replacement_for_connection(self, connection, is_data=True):
output_step_id = connection.output_step.id
if output_step_id not in self.outputs:
template = "No outputs found for step id %s, outputs are %s"
message = template % (output_step_id, self.outputs)
raise Exception(message)
step_outputs = self.outputs[output_step_id]
if step_outputs is STEP_OUTPUT_DELAYED:
delayed_why = (
"dependent step [%s] delayed, so this step must be delayed" % output_step_id
)
raise modules.DelayedWorkflowEvaluation(why=delayed_why)
output_name = connection.output_name
try:
replacement = step_outputs[output_name]
except KeyError:
replacement = self.inputs_by_step_id.get(output_step_id)
if (
connection.output_step.type == "parameter_input"
and output_step_id is not None
):
# FIXME: parameter_input step outputs should be properly recorded as step outputs, but for now we can
# short-circuit and just pick the input value
pass
else:
# Must resolve.
template = "Workflow evaluation problem - failed to find output_name %s in step_outputs %s"
message = template % (output_name, step_outputs)
raise Exception(message)
if isinstance(replacement, model.HistoryDatasetCollectionAssociation):
if not replacement.collection.populated:
if not replacement.waiting_for_elements:
# If we are not waiting for elements, there was some
# problem creating the collection. Collection will never
# be populated.
# TODO: consider distinguish between cancelled and failed?
raise modules.CancelWorkflowEvaluation()
delayed_why = (
"dependent collection [%s] not yet populated with datasets"
% replacement.id
)
raise modules.DelayedWorkflowEvaluation(why=delayed_why)
data_inputs = (
model.HistoryDatasetAssociation,
model.HistoryDatasetCollectionAssociation,
model.DatasetCollection,
)
if not is_data and isinstance(replacement, data_inputs):
if isinstance(replacement, model.HistoryDatasetAssociation):
if replacement.is_pending:
raise modules.DelayedWorkflowEvaluation()
if not replacement.is_ok:
raise modules.CancelWorkflowEvaluation()
else:
if not replacement.collection.populated:
raise modules.DelayedWorkflowEvaluation()
pending = False
for dataset_instance in replacement.dataset_instances:
if dataset_instance.is_pending:
pending = True
elif not dataset_instance.is_ok:
raise modules.CancelWorkflowEvaluation()
if pending:
raise modules.DelayedWorkflowEvaluation()
return replacement
|
https://github.com/galaxyproject/galaxy/issues/8390
|
[pid: 63775|app: 0|req: 160/596] 127.0.0.1 () {50 vars in 1324 bytes} [Wed Jul 31 09:38:45 2019] GET /api/histories/1cd8e2f6b131e891/contents?details=3f5830403180d620&order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-07-31T16%3A38%3A41.000Z&qv=False&qv=False => generated 2 bytes in 53 msecs (HTTP/1.1 200) 3 headers in 139 bytes (1 switches on core 3)
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:45,835 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Attempting to schedule workflow invocation [(4,)]
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,941 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 10 outputs of invocation 4 delayed (tool [__BUILD_LIST__] inputs are not ready, this special tool requires inputs to be ready)
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,941 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 11 outputs of invocation 4 delayed (dependent step [10] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,941 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 12 outputs of invocation 4 delayed (dependent step [11] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,942 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 13 outputs of invocation 4 delayed (dependent step [11] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,942 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 14 outputs of invocation 4 delayed (dependent step [13] delayed, so this step must be delayed)
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:45,957 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow invocation [4] scheduled
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:46,960 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Attempting to schedule workflow invocation [(4,)]
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,070 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 10 outputs of invocation 4 delayed (tool [__BUILD_LIST__] inputs are not ready, this special tool requires inputs to be ready)
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,071 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 11 outputs of invocation 4 delayed (dependent step [10] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,071 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 12 outputs of invocation 4 delayed (dependent step [11] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,072 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 13 outputs of invocation 4 delayed (dependent step [11] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,072 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 14 outputs of invocation 4 delayed (dependent step [13] delayed, so this step must be delayed)
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:47,086 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow invocation [4] scheduled
galaxy.jobs.runners.drmaa DEBUG 2019-07-31 09:38:47,465 [p:63775,w:2,m:0] [SlurmRunner.monitor_thread] (10/24674328) state change: job finished normally
galaxy.model.metadata DEBUG 2019-07-31 09:38:47,587 [p:63775,w:2,m:0] [SlurmRunner.work_thread-0] loading metadata from file for: HistoryDatasetAssociation 16
galaxy.jobs INFO 2019-07-31 09:38:47,692 [p:63775,w:2,m:0] [SlurmRunner.work_thread-0] Collecting metrics for Job 10 in /project/6004808/ncm3/galaxy-19.05-git/galaxy-database/jobs_directory/000/10
galaxy.jobs DEBUG 2019-07-31 09:38:47,703 [p:63775,w:2,m:0] [SlurmRunner.work_thread-0] job 10 ended (finish() executed in (159.904 ms))
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:48,090 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Attempting to schedule workflow invocation [(4,)]
galaxy.managers.collections DEBUG 2019-07-31 09:38:48,247 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Created collection with 2 elements
/project/6004808/ncm3/galaxy-19.05-git/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:268: SAWarning: Unicode type received non-unicode bind param value 'list'. (this warning may be suppressed after 10 occurrences)
(util.ellipses_string(value),),
/project/6004808/ncm3/galaxy-19.05-git/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:268: SAWarning: Unicode type received non-unicode bind param value 'output'. (this warning may be suppressed after 10 occurrences)
(util.ellipses_string(value),),
galaxy.tools.actions.model_operations INFO 2019-07-31 09:38:48,343 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Calling produce_outputs, tool is <galaxy.tools.BuildListCollectionTool object at 0x7f32a9f2a690>
galaxy.tools.execute DEBUG 2019-07-31 09:38:48,348 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Tool [__BUILD_LIST__] created job [11] (153.141 ms)
galaxy.tools.execute DEBUG 2019-07-31 09:38:48,358 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Executed 1 job(s) for tool __BUILD_LIST__ request: (178.532 ms)
galaxy.workflow.run DEBUG 2019-07-31 09:38:48,368 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 10 of invocation 4 invoked (189.270 ms)
galaxy.managers.collections DEBUG 2019-07-31 09:38:48,441 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Created collection with 0 elements
galaxy.managers.collections DEBUG 2019-07-31 09:38:48,469 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Created collection with 2 elements
/project/6004808/ncm3/galaxy-19.05-git/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:268: SAWarning: Unicode type received non-unicode bind param value 'input'. (this warning may be suppressed after 10 occurrences)
(util.ellipses_string(value),),
/project/6004808/ncm3/galaxy-19.05-git/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:268: SAWarning: Unicode type received non-unicode bind param value 'output_discarded'. (this warning may be suppressed after 10 occurrences)
(util.ellipses_string(value),),
galaxy.tools.actions.model_operations INFO 2019-07-31 09:38:48,512 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Calling produce_outputs, tool is <galaxy.tools.FilterFromFileTool object at 0x7f32a9f63810>
galaxy.tools.execute DEBUG 2019-07-31 09:38:48,516 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Tool [__FILTER_FROM_FILE__] created job [12] (106.710 ms)
galaxy.tools.execute DEBUG 2019-07-31 09:38:48,524 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Executed 1 job(s) for tool __FILTER_FROM_FILE__ request: (140.242 ms)
galaxy.workflow.run DEBUG 2019-07-31 09:38:48,533 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 11 of invocation 4 invoked (164.122 ms)
galaxy.workflow.run ERROR 2019-07-31 09:38:48,553 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Failed to schedule Workflow[id=2,name=test merge empty (imported from uploaded file)], problem occurred on WorkflowStep[index=4,type=tool].
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 1189, in execute
collection_info = self.compute_collection_info(progress, step, all_inputs)
File "lib/galaxy/workflow/modules.py", line 309, in compute_collection_info
progress, step, all_inputs
File "lib/galaxy/workflow/modules.py", line 327, in _find_collections_to_match
data = progress.replacement_for_input(step, input_dict)
File "lib/galaxy/workflow/run.py", line 337, in replacement_for_input
replacement = self.replacement_for_connection(connection[0], is_data=is_data)
File "lib/galaxy/workflow/run.py", line 358, in replacement_for_connection
raise Exception(message)
Exception: Workflow evaluation problem - failed to find output_name output in step_outputs {}
galaxy.workflow.run ERROR 2019-07-31 09:38:48,697 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Failed to execute scheduled workflow.
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 83, in __invoke
outputs = invoker.invoke()
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 1189, in execute
collection_info = self.compute_collection_info(progress, step, all_inputs)
File "lib/galaxy/workflow/modules.py", line 309, in compute_collection_info
progress, step, all_inputs
File "lib/galaxy/workflow/modules.py", line 327, in _find_collections_to_match
data = progress.replacement_for_input(step, input_dict)
File "lib/galaxy/workflow/run.py", line 337, in replacement_for_input
replacement = self.replacement_for_connection(connection[0], is_data=is_data)
File "lib/galaxy/workflow/run.py", line 358, in replacement_for_connection
raise Exception(message)
Exception: Workflow evaluation problem - failed to find output_name output in step_outputs {}
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:48,712 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow invocation [4] scheduled
|
Exception
|
def set_step_outputs(self, invocation_step, outputs, already_persisted=False):
step = invocation_step.workflow_step
if invocation_step.output_value:
outputs[invocation_step.output_value.workflow_output.output_name] = (
invocation_step.output_value.value
)
self.outputs[step.id] = outputs
if not already_persisted:
for output_name, output_object in outputs.items():
if hasattr(output_object, "history_content_type"):
invocation_step.add_output(output_name, output_object)
else:
# This is a problem, this non-data, non-collection output
# won't be recovered on a subsequent workflow scheduling
# iteration. This seems to have been a pre-existing problem
# prior to #4584 though.
pass
for workflow_output in step.workflow_outputs:
output_name = workflow_output.output_name
if output_name not in outputs:
message = f"Failed to find expected workflow output [{output_name}] in step outputs [{outputs}]"
# raise KeyError(message)
# Pre-18.01 we would have never even detected this output wasn't configured
# and even in 18.01 we don't have a way to tell the user something bad is
# happening so I guess we just log a debug message and continue sadly for now.
# Once https://github.com/galaxyproject/galaxy/issues/5142 is complete we could
# at least tell the user what happened, give them a warning.
log.debug(message)
continue
output = outputs[output_name]
self._record_workflow_output(
step,
workflow_output,
output=output,
)
|
def set_step_outputs(self, invocation_step, outputs, already_persisted=False):
step = invocation_step.workflow_step
self.outputs[step.id] = outputs
if not already_persisted:
for output_name, output_object in outputs.items():
if hasattr(output_object, "history_content_type"):
invocation_step.add_output(output_name, output_object)
else:
# This is a problem, this non-data, non-collection output
# won't be recovered on a subsequent workflow scheduling
# iteration. This seems to have been a pre-existing problem
# prior to #4584 though.
pass
for workflow_output in step.workflow_outputs:
output_name = workflow_output.output_name
if output_name not in outputs:
message = f"Failed to find expected workflow output [{output_name}] in step outputs [{outputs}]"
# raise KeyError(message)
# Pre-18.01 we would have never even detected this output wasn't configured
# and even in 18.01 we don't have a way to tell the user something bad is
# happening so I guess we just log a debug message and continue sadly for now.
# Once https://github.com/galaxyproject/galaxy/issues/5142 is complete we could
# at least tell the user what happened, give them a warning.
log.debug(message)
continue
output = outputs[output_name]
self._record_workflow_output(
step,
workflow_output,
output=output,
)
|
https://github.com/galaxyproject/galaxy/issues/8390
|
[pid: 63775|app: 0|req: 160/596] 127.0.0.1 () {50 vars in 1324 bytes} [Wed Jul 31 09:38:45 2019] GET /api/histories/1cd8e2f6b131e891/contents?details=3f5830403180d620&order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-07-31T16%3A38%3A41.000Z&qv=False&qv=False => generated 2 bytes in 53 msecs (HTTP/1.1 200) 3 headers in 139 bytes (1 switches on core 3)
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:45,835 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Attempting to schedule workflow invocation [(4,)]
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,941 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 10 outputs of invocation 4 delayed (tool [__BUILD_LIST__] inputs are not ready, this special tool requires inputs to be ready)
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,941 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 11 outputs of invocation 4 delayed (dependent step [10] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,941 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 12 outputs of invocation 4 delayed (dependent step [11] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,942 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 13 outputs of invocation 4 delayed (dependent step [11] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,942 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 14 outputs of invocation 4 delayed (dependent step [13] delayed, so this step must be delayed)
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:45,957 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow invocation [4] scheduled
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:46,960 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Attempting to schedule workflow invocation [(4,)]
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,070 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 10 outputs of invocation 4 delayed (tool [__BUILD_LIST__] inputs are not ready, this special tool requires inputs to be ready)
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,071 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 11 outputs of invocation 4 delayed (dependent step [10] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,071 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 12 outputs of invocation 4 delayed (dependent step [11] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,072 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 13 outputs of invocation 4 delayed (dependent step [11] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,072 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 14 outputs of invocation 4 delayed (dependent step [13] delayed, so this step must be delayed)
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:47,086 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow invocation [4] scheduled
galaxy.jobs.runners.drmaa DEBUG 2019-07-31 09:38:47,465 [p:63775,w:2,m:0] [SlurmRunner.monitor_thread] (10/24674328) state change: job finished normally
galaxy.model.metadata DEBUG 2019-07-31 09:38:47,587 [p:63775,w:2,m:0] [SlurmRunner.work_thread-0] loading metadata from file for: HistoryDatasetAssociation 16
galaxy.jobs INFO 2019-07-31 09:38:47,692 [p:63775,w:2,m:0] [SlurmRunner.work_thread-0] Collecting metrics for Job 10 in /project/6004808/ncm3/galaxy-19.05-git/galaxy-database/jobs_directory/000/10
galaxy.jobs DEBUG 2019-07-31 09:38:47,703 [p:63775,w:2,m:0] [SlurmRunner.work_thread-0] job 10 ended (finish() executed in (159.904 ms))
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:48,090 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Attempting to schedule workflow invocation [(4,)]
galaxy.managers.collections DEBUG 2019-07-31 09:38:48,247 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Created collection with 2 elements
/project/6004808/ncm3/galaxy-19.05-git/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:268: SAWarning: Unicode type received non-unicode bind param value 'list'. (this warning may be suppressed after 10 occurrences)
(util.ellipses_string(value),),
/project/6004808/ncm3/galaxy-19.05-git/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:268: SAWarning: Unicode type received non-unicode bind param value 'output'. (this warning may be suppressed after 10 occurrences)
(util.ellipses_string(value),),
galaxy.tools.actions.model_operations INFO 2019-07-31 09:38:48,343 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Calling produce_outputs, tool is <galaxy.tools.BuildListCollectionTool object at 0x7f32a9f2a690>
galaxy.tools.execute DEBUG 2019-07-31 09:38:48,348 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Tool [__BUILD_LIST__] created job [11] (153.141 ms)
galaxy.tools.execute DEBUG 2019-07-31 09:38:48,358 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Executed 1 job(s) for tool __BUILD_LIST__ request: (178.532 ms)
galaxy.workflow.run DEBUG 2019-07-31 09:38:48,368 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 10 of invocation 4 invoked (189.270 ms)
galaxy.managers.collections DEBUG 2019-07-31 09:38:48,441 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Created collection with 0 elements
galaxy.managers.collections DEBUG 2019-07-31 09:38:48,469 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Created collection with 2 elements
/project/6004808/ncm3/galaxy-19.05-git/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:268: SAWarning: Unicode type received non-unicode bind param value 'input'. (this warning may be suppressed after 10 occurrences)
(util.ellipses_string(value),),
/project/6004808/ncm3/galaxy-19.05-git/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:268: SAWarning: Unicode type received non-unicode bind param value 'output_discarded'. (this warning may be suppressed after 10 occurrences)
(util.ellipses_string(value),),
galaxy.tools.actions.model_operations INFO 2019-07-31 09:38:48,512 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Calling produce_outputs, tool is <galaxy.tools.FilterFromFileTool object at 0x7f32a9f63810>
galaxy.tools.execute DEBUG 2019-07-31 09:38:48,516 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Tool [__FILTER_FROM_FILE__] created job [12] (106.710 ms)
galaxy.tools.execute DEBUG 2019-07-31 09:38:48,524 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Executed 1 job(s) for tool __FILTER_FROM_FILE__ request: (140.242 ms)
galaxy.workflow.run DEBUG 2019-07-31 09:38:48,533 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 11 of invocation 4 invoked (164.122 ms)
galaxy.workflow.run ERROR 2019-07-31 09:38:48,553 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Failed to schedule Workflow[id=2,name=test merge empty (imported from uploaded file)], problem occurred on WorkflowStep[index=4,type=tool].
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 1189, in execute
collection_info = self.compute_collection_info(progress, step, all_inputs)
File "lib/galaxy/workflow/modules.py", line 309, in compute_collection_info
progress, step, all_inputs
File "lib/galaxy/workflow/modules.py", line 327, in _find_collections_to_match
data = progress.replacement_for_input(step, input_dict)
File "lib/galaxy/workflow/run.py", line 337, in replacement_for_input
replacement = self.replacement_for_connection(connection[0], is_data=is_data)
File "lib/galaxy/workflow/run.py", line 358, in replacement_for_connection
raise Exception(message)
Exception: Workflow evaluation problem - failed to find output_name output in step_outputs {}
galaxy.workflow.run ERROR 2019-07-31 09:38:48,697 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Failed to execute scheduled workflow.
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 83, in __invoke
outputs = invoker.invoke()
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 1189, in execute
collection_info = self.compute_collection_info(progress, step, all_inputs)
File "lib/galaxy/workflow/modules.py", line 309, in compute_collection_info
progress, step, all_inputs
File "lib/galaxy/workflow/modules.py", line 327, in _find_collections_to_match
data = progress.replacement_for_input(step, input_dict)
File "lib/galaxy/workflow/run.py", line 337, in replacement_for_input
replacement = self.replacement_for_connection(connection[0], is_data=is_data)
File "lib/galaxy/workflow/run.py", line 358, in replacement_for_connection
raise Exception(message)
Exception: Workflow evaluation problem - failed to find output_name output in step_outputs {}
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:48,712 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow invocation [4] scheduled
|
Exception
|
def replacement_for_connection(self, connection, is_data=True):
output_step_id = connection.output_step.id
if output_step_id not in self.outputs:
template = "No outputs found for step id %s, outputs are %s"
message = template % (output_step_id, self.outputs)
raise Exception(message)
step_outputs = self.outputs[output_step_id]
if step_outputs is STEP_OUTPUT_DELAYED:
delayed_why = (
"dependent step [%s] delayed, so this step must be delayed" % output_step_id
)
raise modules.DelayedWorkflowEvaluation(why=delayed_why)
output_name = connection.output_name
try:
replacement = step_outputs[output_name]
except KeyError:
# Must resolve.
template = "Workflow evaluation problem - failed to find output_name %s in step_outputs %s"
message = template % (output_name, step_outputs)
raise Exception(message)
if isinstance(replacement, model.HistoryDatasetCollectionAssociation):
if not replacement.collection.populated:
if not replacement.collection.waiting_for_elements:
# If we are not waiting for elements, there was some
# problem creating the collection. Collection will never
# be populated.
# TODO: consider distinguish between cancelled and failed?
raise modules.CancelWorkflowEvaluation()
delayed_why = (
"dependent collection [%s] not yet populated with datasets"
% replacement.id
)
raise modules.DelayedWorkflowEvaluation(why=delayed_why)
data_inputs = (
model.HistoryDatasetAssociation,
model.HistoryDatasetCollectionAssociation,
model.DatasetCollection,
)
if not is_data and isinstance(replacement, data_inputs):
if isinstance(replacement, model.HistoryDatasetAssociation):
if replacement.is_pending:
raise modules.DelayedWorkflowEvaluation()
if not replacement.is_ok:
raise modules.CancelWorkflowEvaluation()
else:
if not replacement.collection.populated:
raise modules.DelayedWorkflowEvaluation()
pending = False
for dataset_instance in replacement.dataset_instances:
if dataset_instance.is_pending:
pending = True
elif not dataset_instance.is_ok:
raise modules.CancelWorkflowEvaluation()
if pending:
raise modules.DelayedWorkflowEvaluation()
return replacement
|
def replacement_for_connection(self, connection, is_data=True):
output_step_id = connection.output_step.id
if output_step_id not in self.outputs:
template = "No outputs found for step id %s, outputs are %s"
message = template % (output_step_id, self.outputs)
raise Exception(message)
step_outputs = self.outputs[output_step_id]
if step_outputs is STEP_OUTPUT_DELAYED:
delayed_why = (
"dependent step [%s] delayed, so this step must be delayed" % output_step_id
)
raise modules.DelayedWorkflowEvaluation(why=delayed_why)
output_name = connection.output_name
try:
replacement = step_outputs[output_name]
except KeyError:
replacement = self.inputs_by_step_id.get(output_step_id)
if (
connection.output_step.type == "parameter_input"
and output_step_id is not None
):
# FIXME: parameter_input step outputs should be properly recorded as step outputs, but for now we can
# short-circuit and just pick the input value
pass
else:
# Must resolve.
template = "Workflow evaluation problem - failed to find output_name %s in step_outputs %s"
message = template % (output_name, step_outputs)
raise Exception(message)
if isinstance(replacement, model.HistoryDatasetCollectionAssociation):
if not replacement.collection.populated:
if not replacement.collection.waiting_for_elements:
# If we are not waiting for elements, there was some
# problem creating the collection. Collection will never
# be populated.
# TODO: consider distinguish between cancelled and failed?
raise modules.CancelWorkflowEvaluation()
delayed_why = (
"dependent collection [%s] not yet populated with datasets"
% replacement.id
)
raise modules.DelayedWorkflowEvaluation(why=delayed_why)
data_inputs = (
model.HistoryDatasetAssociation,
model.HistoryDatasetCollectionAssociation,
model.DatasetCollection,
)
if not is_data and isinstance(replacement, data_inputs):
if isinstance(replacement, model.HistoryDatasetAssociation):
if replacement.is_pending:
raise modules.DelayedWorkflowEvaluation()
if not replacement.is_ok:
raise modules.CancelWorkflowEvaluation()
else:
if not replacement.collection.populated:
raise modules.DelayedWorkflowEvaluation()
pending = False
for dataset_instance in replacement.dataset_instances:
if dataset_instance.is_pending:
pending = True
elif not dataset_instance.is_ok:
raise modules.CancelWorkflowEvaluation()
if pending:
raise modules.DelayedWorkflowEvaluation()
return replacement
|
https://github.com/galaxyproject/galaxy/issues/8390
|
[pid: 63775|app: 0|req: 160/596] 127.0.0.1 () {50 vars in 1324 bytes} [Wed Jul 31 09:38:45 2019] GET /api/histories/1cd8e2f6b131e891/contents?details=3f5830403180d620&order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-07-31T16%3A38%3A41.000Z&qv=False&qv=False => generated 2 bytes in 53 msecs (HTTP/1.1 200) 3 headers in 139 bytes (1 switches on core 3)
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:45,835 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Attempting to schedule workflow invocation [(4,)]
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,941 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 10 outputs of invocation 4 delayed (tool [__BUILD_LIST__] inputs are not ready, this special tool requires inputs to be ready)
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,941 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 11 outputs of invocation 4 delayed (dependent step [10] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,941 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 12 outputs of invocation 4 delayed (dependent step [11] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,942 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 13 outputs of invocation 4 delayed (dependent step [11] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,942 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 14 outputs of invocation 4 delayed (dependent step [13] delayed, so this step must be delayed)
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:45,957 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow invocation [4] scheduled
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:46,960 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Attempting to schedule workflow invocation [(4,)]
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,070 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 10 outputs of invocation 4 delayed (tool [__BUILD_LIST__] inputs are not ready, this special tool requires inputs to be ready)
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,071 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 11 outputs of invocation 4 delayed (dependent step [10] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,071 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 12 outputs of invocation 4 delayed (dependent step [11] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,072 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 13 outputs of invocation 4 delayed (dependent step [11] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,072 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 14 outputs of invocation 4 delayed (dependent step [13] delayed, so this step must be delayed)
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:47,086 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow invocation [4] scheduled
galaxy.jobs.runners.drmaa DEBUG 2019-07-31 09:38:47,465 [p:63775,w:2,m:0] [SlurmRunner.monitor_thread] (10/24674328) state change: job finished normally
galaxy.model.metadata DEBUG 2019-07-31 09:38:47,587 [p:63775,w:2,m:0] [SlurmRunner.work_thread-0] loading metadata from file for: HistoryDatasetAssociation 16
galaxy.jobs INFO 2019-07-31 09:38:47,692 [p:63775,w:2,m:0] [SlurmRunner.work_thread-0] Collecting metrics for Job 10 in /project/6004808/ncm3/galaxy-19.05-git/galaxy-database/jobs_directory/000/10
galaxy.jobs DEBUG 2019-07-31 09:38:47,703 [p:63775,w:2,m:0] [SlurmRunner.work_thread-0] job 10 ended (finish() executed in (159.904 ms))
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:48,090 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Attempting to schedule workflow invocation [(4,)]
galaxy.managers.collections DEBUG 2019-07-31 09:38:48,247 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Created collection with 2 elements
/project/6004808/ncm3/galaxy-19.05-git/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:268: SAWarning: Unicode type received non-unicode bind param value 'list'. (this warning may be suppressed after 10 occurrences)
(util.ellipses_string(value),),
/project/6004808/ncm3/galaxy-19.05-git/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:268: SAWarning: Unicode type received non-unicode bind param value 'output'. (this warning may be suppressed after 10 occurrences)
(util.ellipses_string(value),),
galaxy.tools.actions.model_operations INFO 2019-07-31 09:38:48,343 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Calling produce_outputs, tool is <galaxy.tools.BuildListCollectionTool object at 0x7f32a9f2a690>
galaxy.tools.execute DEBUG 2019-07-31 09:38:48,348 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Tool [__BUILD_LIST__] created job [11] (153.141 ms)
galaxy.tools.execute DEBUG 2019-07-31 09:38:48,358 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Executed 1 job(s) for tool __BUILD_LIST__ request: (178.532 ms)
galaxy.workflow.run DEBUG 2019-07-31 09:38:48,368 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 10 of invocation 4 invoked (189.270 ms)
galaxy.managers.collections DEBUG 2019-07-31 09:38:48,441 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Created collection with 0 elements
galaxy.managers.collections DEBUG 2019-07-31 09:38:48,469 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Created collection with 2 elements
/project/6004808/ncm3/galaxy-19.05-git/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:268: SAWarning: Unicode type received non-unicode bind param value 'input'. (this warning may be suppressed after 10 occurrences)
(util.ellipses_string(value),),
/project/6004808/ncm3/galaxy-19.05-git/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:268: SAWarning: Unicode type received non-unicode bind param value 'output_discarded'. (this warning may be suppressed after 10 occurrences)
(util.ellipses_string(value),),
galaxy.tools.actions.model_operations INFO 2019-07-31 09:38:48,512 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Calling produce_outputs, tool is <galaxy.tools.FilterFromFileTool object at 0x7f32a9f63810>
galaxy.tools.execute DEBUG 2019-07-31 09:38:48,516 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Tool [__FILTER_FROM_FILE__] created job [12] (106.710 ms)
galaxy.tools.execute DEBUG 2019-07-31 09:38:48,524 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Executed 1 job(s) for tool __FILTER_FROM_FILE__ request: (140.242 ms)
galaxy.workflow.run DEBUG 2019-07-31 09:38:48,533 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 11 of invocation 4 invoked (164.122 ms)
galaxy.workflow.run ERROR 2019-07-31 09:38:48,553 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Failed to schedule Workflow[id=2,name=test merge empty (imported from uploaded file)], problem occurred on WorkflowStep[index=4,type=tool].
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 1189, in execute
collection_info = self.compute_collection_info(progress, step, all_inputs)
File "lib/galaxy/workflow/modules.py", line 309, in compute_collection_info
progress, step, all_inputs
File "lib/galaxy/workflow/modules.py", line 327, in _find_collections_to_match
data = progress.replacement_for_input(step, input_dict)
File "lib/galaxy/workflow/run.py", line 337, in replacement_for_input
replacement = self.replacement_for_connection(connection[0], is_data=is_data)
File "lib/galaxy/workflow/run.py", line 358, in replacement_for_connection
raise Exception(message)
Exception: Workflow evaluation problem - failed to find output_name output in step_outputs {}
galaxy.workflow.run ERROR 2019-07-31 09:38:48,697 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Failed to execute scheduled workflow.
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 83, in __invoke
outputs = invoker.invoke()
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 1189, in execute
collection_info = self.compute_collection_info(progress, step, all_inputs)
File "lib/galaxy/workflow/modules.py", line 309, in compute_collection_info
progress, step, all_inputs
File "lib/galaxy/workflow/modules.py", line 327, in _find_collections_to_match
data = progress.replacement_for_input(step, input_dict)
File "lib/galaxy/workflow/run.py", line 337, in replacement_for_input
replacement = self.replacement_for_connection(connection[0], is_data=is_data)
File "lib/galaxy/workflow/run.py", line 358, in replacement_for_connection
raise Exception(message)
Exception: Workflow evaluation problem - failed to find output_name output in step_outputs {}
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:48,712 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow invocation [4] scheduled
|
Exception
|
def set_step_outputs(self, invocation_step, outputs, already_persisted=False):
step = invocation_step.workflow_step
if invocation_step.output_value:
outputs[invocation_step.output_value.workflow_output.output_name] = (
invocation_step.output_value.value
)
self.outputs[step.id] = outputs
if not already_persisted:
for output_name, output_object in outputs.items():
if hasattr(output_object, "history_content_type"):
invocation_step.add_output(output_name, output_object)
else:
# This is a problem, this non-data, non-collection output
# won't be recovered on a subsequent workflow scheduling
# iteration. This seems to have been a pre-existing problem
# prior to #4584 though.
pass
for workflow_output in step.workflow_outputs:
output_name = workflow_output.output_name
if output_name not in outputs:
message = (
"Failed to find expected workflow output [%s] in step outputs [%s]"
% (output_name, outputs)
)
# raise KeyError(message)
# Pre-18.01 we would have never even detected this output wasn't configured
# and even in 18.01 we don't have a way to tell the user something bad is
# happening so I guess we just log a debug message and continue sadly for now.
# Once https://github.com/galaxyproject/galaxy/issues/5142 is complete we could
# at least tell the user what happened, give them a warning.
log.debug(message)
continue
output = outputs[output_name]
self._record_workflow_output(
step,
workflow_output,
output=output,
)
|
def set_step_outputs(self, invocation_step, outputs, already_persisted=False):
step = invocation_step.workflow_step
self.outputs[step.id] = outputs
if not already_persisted:
for output_name, output_object in outputs.items():
if hasattr(output_object, "history_content_type"):
invocation_step.add_output(output_name, output_object)
else:
# This is a problem, this non-data, non-collection output
# won't be recovered on a subsequent workflow scheduling
# iteration. This seems to have been a pre-existing problem
# prior to #4584 though.
pass
for workflow_output in step.workflow_outputs:
output_name = workflow_output.output_name
if output_name not in outputs:
message = (
"Failed to find expected workflow output [%s] in step outputs [%s]"
% (output_name, outputs)
)
# raise KeyError(message)
# Pre-18.01 we would have never even detected this output wasn't configured
# and even in 18.01 we don't have a way to tell the user something bad is
# happening so I guess we just log a debug message and continue sadly for now.
# Once https://github.com/galaxyproject/galaxy/issues/5142 is complete we could
# at least tell the user what happened, give them a warning.
log.debug(message)
continue
output = outputs[output_name]
self._record_workflow_output(
step,
workflow_output,
output=output,
)
|
https://github.com/galaxyproject/galaxy/issues/8390
|
[pid: 63775|app: 0|req: 160/596] 127.0.0.1 () {50 vars in 1324 bytes} [Wed Jul 31 09:38:45 2019] GET /api/histories/1cd8e2f6b131e891/contents?details=3f5830403180d620&order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-07-31T16%3A38%3A41.000Z&qv=False&qv=False => generated 2 bytes in 53 msecs (HTTP/1.1 200) 3 headers in 139 bytes (1 switches on core 3)
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:45,835 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Attempting to schedule workflow invocation [(4,)]
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,941 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 10 outputs of invocation 4 delayed (tool [__BUILD_LIST__] inputs are not ready, this special tool requires inputs to be ready)
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,941 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 11 outputs of invocation 4 delayed (dependent step [10] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,941 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 12 outputs of invocation 4 delayed (dependent step [11] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,942 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 13 outputs of invocation 4 delayed (dependent step [11] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:45,942 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 14 outputs of invocation 4 delayed (dependent step [13] delayed, so this step must be delayed)
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:45,957 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow invocation [4] scheduled
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:46,960 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Attempting to schedule workflow invocation [(4,)]
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,070 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 10 outputs of invocation 4 delayed (tool [__BUILD_LIST__] inputs are not ready, this special tool requires inputs to be ready)
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,071 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 11 outputs of invocation 4 delayed (dependent step [10] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,071 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 12 outputs of invocation 4 delayed (dependent step [11] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,072 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 13 outputs of invocation 4 delayed (dependent step [11] delayed, so this step must be delayed)
galaxy.workflow.run DEBUG 2019-07-31 09:38:47,072 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Marking step 14 outputs of invocation 4 delayed (dependent step [13] delayed, so this step must be delayed)
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:47,086 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow invocation [4] scheduled
galaxy.jobs.runners.drmaa DEBUG 2019-07-31 09:38:47,465 [p:63775,w:2,m:0] [SlurmRunner.monitor_thread] (10/24674328) state change: job finished normally
galaxy.model.metadata DEBUG 2019-07-31 09:38:47,587 [p:63775,w:2,m:0] [SlurmRunner.work_thread-0] loading metadata from file for: HistoryDatasetAssociation 16
galaxy.jobs INFO 2019-07-31 09:38:47,692 [p:63775,w:2,m:0] [SlurmRunner.work_thread-0] Collecting metrics for Job 10 in /project/6004808/ncm3/galaxy-19.05-git/galaxy-database/jobs_directory/000/10
galaxy.jobs DEBUG 2019-07-31 09:38:47,703 [p:63775,w:2,m:0] [SlurmRunner.work_thread-0] job 10 ended (finish() executed in (159.904 ms))
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:48,090 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Attempting to schedule workflow invocation [(4,)]
galaxy.managers.collections DEBUG 2019-07-31 09:38:48,247 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Created collection with 2 elements
/project/6004808/ncm3/galaxy-19.05-git/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:268: SAWarning: Unicode type received non-unicode bind param value 'list'. (this warning may be suppressed after 10 occurrences)
(util.ellipses_string(value),),
/project/6004808/ncm3/galaxy-19.05-git/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:268: SAWarning: Unicode type received non-unicode bind param value 'output'. (this warning may be suppressed after 10 occurrences)
(util.ellipses_string(value),),
galaxy.tools.actions.model_operations INFO 2019-07-31 09:38:48,343 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Calling produce_outputs, tool is <galaxy.tools.BuildListCollectionTool object at 0x7f32a9f2a690>
galaxy.tools.execute DEBUG 2019-07-31 09:38:48,348 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Tool [__BUILD_LIST__] created job [11] (153.141 ms)
galaxy.tools.execute DEBUG 2019-07-31 09:38:48,358 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Executed 1 job(s) for tool __BUILD_LIST__ request: (178.532 ms)
galaxy.workflow.run DEBUG 2019-07-31 09:38:48,368 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 10 of invocation 4 invoked (189.270 ms)
galaxy.managers.collections DEBUG 2019-07-31 09:38:48,441 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Created collection with 0 elements
galaxy.managers.collections DEBUG 2019-07-31 09:38:48,469 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Created collection with 2 elements
/project/6004808/ncm3/galaxy-19.05-git/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:268: SAWarning: Unicode type received non-unicode bind param value 'input'. (this warning may be suppressed after 10 occurrences)
(util.ellipses_string(value),),
/project/6004808/ncm3/galaxy-19.05-git/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:268: SAWarning: Unicode type received non-unicode bind param value 'output_discarded'. (this warning may be suppressed after 10 occurrences)
(util.ellipses_string(value),),
galaxy.tools.actions.model_operations INFO 2019-07-31 09:38:48,512 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Calling produce_outputs, tool is <galaxy.tools.FilterFromFileTool object at 0x7f32a9f63810>
galaxy.tools.execute DEBUG 2019-07-31 09:38:48,516 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Tool [__FILTER_FROM_FILE__] created job [12] (106.710 ms)
galaxy.tools.execute DEBUG 2019-07-31 09:38:48,524 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Executed 1 job(s) for tool __FILTER_FROM_FILE__ request: (140.242 ms)
galaxy.workflow.run DEBUG 2019-07-31 09:38:48,533 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 11 of invocation 4 invoked (164.122 ms)
galaxy.workflow.run ERROR 2019-07-31 09:38:48,553 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Failed to schedule Workflow[id=2,name=test merge empty (imported from uploaded file)], problem occurred on WorkflowStep[index=4,type=tool].
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 1189, in execute
collection_info = self.compute_collection_info(progress, step, all_inputs)
File "lib/galaxy/workflow/modules.py", line 309, in compute_collection_info
progress, step, all_inputs
File "lib/galaxy/workflow/modules.py", line 327, in _find_collections_to_match
data = progress.replacement_for_input(step, input_dict)
File "lib/galaxy/workflow/run.py", line 337, in replacement_for_input
replacement = self.replacement_for_connection(connection[0], is_data=is_data)
File "lib/galaxy/workflow/run.py", line 358, in replacement_for_connection
raise Exception(message)
Exception: Workflow evaluation problem - failed to find output_name output in step_outputs {}
galaxy.workflow.run ERROR 2019-07-31 09:38:48,697 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Failed to execute scheduled workflow.
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 83, in __invoke
outputs = invoker.invoke()
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 1189, in execute
collection_info = self.compute_collection_info(progress, step, all_inputs)
File "lib/galaxy/workflow/modules.py", line 309, in compute_collection_info
progress, step, all_inputs
File "lib/galaxy/workflow/modules.py", line 327, in _find_collections_to_match
data = progress.replacement_for_input(step, input_dict)
File "lib/galaxy/workflow/run.py", line 337, in replacement_for_input
replacement = self.replacement_for_connection(connection[0], is_data=is_data)
File "lib/galaxy/workflow/run.py", line 358, in replacement_for_connection
raise Exception(message)
Exception: Workflow evaluation problem - failed to find output_name output in step_outputs {}
galaxy.workflow.scheduling_manager DEBUG 2019-07-31 09:38:48,712 [p:63775,w:2,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow invocation [4] scheduled
|
Exception
|
def extract_steps(
trans,
history=None,
job_ids=None,
dataset_ids=None,
dataset_collection_ids=None,
dataset_names=None,
dataset_collection_names=None,
):
# Ensure job_ids and dataset_ids are lists (possibly empty)
job_ids = listify(job_ids)
dataset_ids = listify(dataset_ids)
dataset_collection_ids = listify(dataset_collection_ids)
# Convert both sets of ids to integers
job_ids = [int(_) for _ in job_ids]
dataset_ids = [int(_) for _ in dataset_ids]
dataset_collection_ids = [int(_) for _ in dataset_collection_ids]
# Find each job, for security we (implicitly) check that they are
# associated with a job in the current history.
summary = WorkflowSummary(trans, history)
jobs = summary.jobs
steps = []
hid_to_output_pair = {}
# Input dataset steps
for i, hid in enumerate(dataset_ids):
step = model.WorkflowStep()
step.type = "data_input"
if dataset_names:
name = dataset_names[i]
else:
name = "Input Dataset"
step.tool_inputs = dict(name=name)
hid_to_output_pair[hid] = (step, "output")
steps.append(step)
for i, hid in enumerate(dataset_collection_ids):
step = model.WorkflowStep()
step.type = "data_collection_input"
if hid not in summary.collection_types:
raise exceptions.RequestParameterInvalidException(
"hid %s does not appear to be a collection" % hid
)
collection_type = summary.collection_types[hid]
if dataset_collection_names:
name = dataset_collection_names[i]
else:
name = "Input Dataset Collection"
step.tool_inputs = dict(name=name, collection_type=collection_type)
hid_to_output_pair[hid] = (step, "output")
steps.append(step)
# Tool steps
for job_id in job_ids:
if job_id not in summary.job_id2representative_job:
log.warning(
"job_id %s not found in job_id2representative_job %s"
% (job_id, summary.job_id2representative_job)
)
raise AssertionError(
"Attempt to create workflow with job not connected to current history"
)
job = summary.job_id2representative_job[job_id]
tool_inputs, associations = step_inputs(trans, job)
step = model.WorkflowStep()
step.type = "tool"
step.tool_id = job.tool_id
step.tool_version = job.tool_version
step.tool_inputs = tool_inputs
# NOTE: We shouldn't need to do two passes here since only
# an earlier job can be used as an input to a later
# job.
for other_hid, input_name in associations:
if job in summary.implicit_map_jobs:
an_implicit_output_collection = jobs[job][0][1]
input_collection = (
an_implicit_output_collection.find_implicit_input_collection(
input_name
)
)
if input_collection:
other_hid = input_collection.hid
else:
log.info(
"Cannot find implicit input collection for %s" % input_name
)
if other_hid in hid_to_output_pair:
step_input = step.get_or_add_input(input_name)
other_step, other_name = hid_to_output_pair[other_hid]
conn = model.WorkflowStepConnection()
conn.input_step_input = step_input
# Should always be connected to an earlier step
conn.output_step = other_step
conn.output_name = other_name
steps.append(step)
# Store created dataset hids
for assoc in job.output_datasets + job.output_dataset_collection_instances:
assoc_name = assoc.name
if ToolOutputCollectionPart.is_named_collection_part_name(assoc_name):
continue
if job in summary.implicit_map_jobs:
hid = None
for implicit_pair in jobs[job]:
query_assoc_name, dataset_collection = implicit_pair
if query_assoc_name == assoc_name or assoc_name.startswith(
"__new_primary_file_%s|" % query_assoc_name
):
hid = dataset_collection.hid
if hid is None:
template = "Failed to find matching implicit job - job id is %s, implicit pairs are %s, assoc_name is %s."
message = template % (job.id, jobs[job], assoc_name)
log.warning(message)
raise Exception("Failed to extract job.")
else:
if hasattr(assoc, "dataset"):
hid = assoc.dataset.hid
else:
hid = assoc.dataset_collection_instance.hid
hid_to_output_pair[hid] = (step, assoc.name)
return steps
|
def extract_steps(
trans,
history=None,
job_ids=None,
dataset_ids=None,
dataset_collection_ids=None,
dataset_names=None,
dataset_collection_names=None,
):
# Ensure job_ids and dataset_ids are lists (possibly empty)
if job_ids is None:
job_ids = []
elif type(job_ids) is not list:
job_ids = [job_ids]
if dataset_ids is None:
dataset_ids = []
elif type(dataset_ids) is not list:
dataset_ids = [dataset_ids]
if dataset_collection_ids is None:
dataset_collection_ids = []
elif type(dataset_collection_ids) is not list:
dataset_collection_ids = [dataset_collection_ids]
# Convert both sets of ids to integers
job_ids = [int(_) for _ in job_ids]
dataset_ids = [int(_) for _ in dataset_ids]
dataset_collection_ids = [int(_) for _ in dataset_collection_ids]
# Find each job, for security we (implicitly) check that they are
# associated with a job in the current history.
summary = WorkflowSummary(trans, history)
jobs = summary.jobs
steps = []
hid_to_output_pair = {}
# Input dataset steps
for i, hid in enumerate(dataset_ids):
step = model.WorkflowStep()
step.type = "data_input"
if dataset_names:
name = dataset_names[i]
else:
name = "Input Dataset"
step.tool_inputs = dict(name=name)
hid_to_output_pair[hid] = (step, "output")
steps.append(step)
for i, hid in enumerate(dataset_collection_ids):
step = model.WorkflowStep()
step.type = "data_collection_input"
if hid not in summary.collection_types:
raise exceptions.RequestParameterInvalidException(
"hid %s does not appear to be a collection" % hid
)
collection_type = summary.collection_types[hid]
if dataset_collection_names:
name = dataset_collection_names[i]
else:
name = "Input Dataset Collection"
step.tool_inputs = dict(name=name, collection_type=collection_type)
hid_to_output_pair[hid] = (step, "output")
steps.append(step)
# Tool steps
for job_id in job_ids:
if job_id not in summary.job_id2representative_job:
log.warning(
"job_id %s not found in job_id2representative_job %s"
% (job_id, summary.job_id2representative_job)
)
raise AssertionError(
"Attempt to create workflow with job not connected to current history"
)
job = summary.job_id2representative_job[job_id]
tool_inputs, associations = step_inputs(trans, job)
step = model.WorkflowStep()
step.type = "tool"
step.tool_id = job.tool_id
step.tool_version = job.tool_version
step.tool_inputs = tool_inputs
# NOTE: We shouldn't need to do two passes here since only
# an earlier job can be used as an input to a later
# job.
for other_hid, input_name in associations:
if job in summary.implicit_map_jobs:
an_implicit_output_collection = jobs[job][0][1]
input_collection = (
an_implicit_output_collection.find_implicit_input_collection(
input_name
)
)
if input_collection:
other_hid = input_collection.hid
else:
log.info(
"Cannot find implicit input collection for %s" % input_name
)
if other_hid in hid_to_output_pair:
step_input = step.get_or_add_input(input_name)
other_step, other_name = hid_to_output_pair[other_hid]
conn = model.WorkflowStepConnection()
conn.input_step_input = step_input
# Should always be connected to an earlier step
conn.output_step = other_step
conn.output_name = other_name
steps.append(step)
# Store created dataset hids
for assoc in job.output_datasets + job.output_dataset_collection_instances:
assoc_name = assoc.name
if ToolOutputCollectionPart.is_named_collection_part_name(assoc_name):
continue
if job in summary.implicit_map_jobs:
hid = None
for implicit_pair in jobs[job]:
query_assoc_name, dataset_collection = implicit_pair
if query_assoc_name == assoc_name or assoc_name.startswith(
"__new_primary_file_%s|" % query_assoc_name
):
hid = dataset_collection.hid
if hid is None:
template = "Failed to find matching implicit job - job id is %s, implicit pairs are %s, assoc_name is %s."
message = template % (job.id, jobs[job], assoc_name)
log.warning(message)
raise Exception("Failed to extract job.")
else:
if hasattr(assoc, "dataset"):
hid = assoc.dataset.hid
else:
hid = assoc.dataset_collection_instance.hid
hid_to_output_pair[hid] = (step, assoc.name)
return steps
|
https://github.com/galaxyproject/galaxy/issues/10452
|
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: 80.138.30.147 - - [18/Oct/2020:23:43:09 +0200] "POST /workflow/build_from_current_history HTTP/1.1" 500 - "https://usegalaxy.eu/workflow/build_from_current_history" "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:81.0) Gecko/20100101 Firefox/81.0"
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: Traceback (most recent call last):
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/web/framework/middleware/error.py", line 154, in __call__
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: app_iter = self.application(environ, sr_checker)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/paste/recursive.py", line 85, in __call__
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: return self.application(environ, start_response)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/web/framework/middleware/statsd.py", line 34, in __call__
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: req = self.application(environ, start_response)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/paste/httpexceptions.py", line 640, in __call__
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: return self.application(environ, start_response)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/web/framework/base.py", line 145, in __call__
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: return self.handle_request(environ, start_response)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/web/framework/base.py", line 224, in handle_request
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: body = method(trans, **kwargs)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/webapps/galaxy/controllers/workflow.py", line 854, in build_from_current_history
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: dataset_collection_names=dataset_collection_names
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/workflow/extract.py", line 29, in extract_workflow
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: steps = extract_steps(trans, history=history, job_ids=job_ids, dataset_ids=dataset_ids, dataset_collection_ids=dataset_collection_ids, dataset_names=dataset_names, dataset_collection_names=None)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/workflow/extract.py", line 109, in extract_steps
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: tool_inputs, associations = step_inputs(trans, job)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/workflow/extract.py", line 310, in step_inputs
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: associations = __cleanup_param_values(tool.inputs, param_values)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/workflow/extract.py", line 372, in __cleanup_param_values
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: cleanup("", inputs, values)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/workflow/extract.py", line 368, in cleanup
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: cleanup("%s%s|" % (prefix, key), input.cases[current_case].inputs, group_values)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/workflow/extract.py", line 342, in cleanup
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: associations.extend([(t.hid, prefix + key) for t in tmp])
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/workflow/extract.py", line 342, in <listcomp>
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: associations.extend([(t.hid, prefix + key) for t in tmp])
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: AttributeError: 'DatasetCollectionElement' object has no attribute 'hid'
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: The above exception was the direct cause of the
|
AttributeError
|
def __cleanup_param_values(inputs, values):
"""
Remove 'Data' values from `param_values`, along with metadata cruft,
but track the associations.
"""
associations = []
# dbkey is pushed in by the framework
if "dbkey" in values:
del values["dbkey"]
root_values = values
root_input_keys = inputs.keys()
# Recursively clean data inputs and dynamic selects
def cleanup(prefix, inputs, values):
for key, input in inputs.items():
if isinstance(input, DataToolParameter) or isinstance(
input, DataCollectionToolParameter
):
items = values[key]
values[key] = None
# HACK: Nested associations are not yet working, but we
# still need to clean them up so we can serialize
# if not( prefix ):
for item in listify(items):
if isinstance(item, model.DatasetCollectionElement):
item = item.first_dataset_instance()
if item: # this is false for a non-set optional dataset
associations.append((item.hid, prefix + key))
# Cleanup the other deprecated crap associated with datasets
# as well. Worse, for nested datasets all the metadata is
# being pushed into the root. FIXME: MUST REMOVE SOON
key = prefix + key + "_"
for k in root_values.keys():
if k not in root_input_keys and k.startswith(key):
del root_values[k]
elif isinstance(input, Repeat):
if key in values:
group_values = values[key]
for i, rep_values in enumerate(group_values):
rep_index = rep_values["__index__"]
cleanup(
"%s%s_%d|" % (prefix, key, rep_index),
input.inputs,
group_values[i],
)
elif isinstance(input, Conditional):
# Scrub dynamic resource related parameters from workflows,
# they cause problems and the workflow probably should include
# their state in workflow encoding.
if input.name == "__job_resource":
if input.name in values:
del values[input.name]
return
if input.name in values:
group_values = values[input.name]
current_case = group_values["__current_case__"]
cleanup(
"%s%s|" % (prefix, key),
input.cases[current_case].inputs,
group_values,
)
elif isinstance(input, Section):
if input.name in values:
cleanup("%s%s|" % (prefix, key), input.inputs, values[input.name])
cleanup("", inputs, values)
return associations
|
def __cleanup_param_values(inputs, values):
"""
Remove 'Data' values from `param_values`, along with metadata cruft,
but track the associations.
"""
associations = []
# dbkey is pushed in by the framework
if "dbkey" in values:
del values["dbkey"]
root_values = values
root_input_keys = inputs.keys()
# Recursively clean data inputs and dynamic selects
def cleanup(prefix, inputs, values):
for key, input in inputs.items():
if isinstance(input, DataToolParameter) or isinstance(
input, DataCollectionToolParameter
):
tmp = values[key]
values[key] = None
# HACK: Nested associations are not yet working, but we
# still need to clean them up so we can serialize
# if not( prefix ):
if isinstance(tmp, model.DatasetCollectionElement):
tmp = tmp.first_dataset_instance()
if tmp: # this is false for a non-set optional dataset
if not isinstance(tmp, list):
associations.append((tmp.hid, prefix + key))
else:
associations.extend([(t.hid, prefix + key) for t in tmp])
# Cleanup the other deprecated crap associated with datasets
# as well. Worse, for nested datasets all the metadata is
# being pushed into the root. FIXME: MUST REMOVE SOON
key = prefix + key + "_"
for k in root_values.keys():
if k not in root_input_keys and k.startswith(key):
del root_values[k]
elif isinstance(input, Repeat):
if key in values:
group_values = values[key]
for i, rep_values in enumerate(group_values):
rep_index = rep_values["__index__"]
cleanup(
"%s%s_%d|" % (prefix, key, rep_index),
input.inputs,
group_values[i],
)
elif isinstance(input, Conditional):
# Scrub dynamic resource related parameters from workflows,
# they cause problems and the workflow probably should include
# their state in workflow encoding.
if input.name == "__job_resource":
if input.name in values:
del values[input.name]
return
if input.name in values:
group_values = values[input.name]
current_case = group_values["__current_case__"]
cleanup(
"%s%s|" % (prefix, key),
input.cases[current_case].inputs,
group_values,
)
elif isinstance(input, Section):
if input.name in values:
cleanup("%s%s|" % (prefix, key), input.inputs, values[input.name])
cleanup("", inputs, values)
return associations
|
https://github.com/galaxyproject/galaxy/issues/10452
|
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: 80.138.30.147 - - [18/Oct/2020:23:43:09 +0200] "POST /workflow/build_from_current_history HTTP/1.1" 500 - "https://usegalaxy.eu/workflow/build_from_current_history" "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:81.0) Gecko/20100101 Firefox/81.0"
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: Traceback (most recent call last):
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/web/framework/middleware/error.py", line 154, in __call__
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: app_iter = self.application(environ, sr_checker)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/paste/recursive.py", line 85, in __call__
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: return self.application(environ, start_response)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/web/framework/middleware/statsd.py", line 34, in __call__
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: req = self.application(environ, start_response)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/paste/httpexceptions.py", line 640, in __call__
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: return self.application(environ, start_response)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/web/framework/base.py", line 145, in __call__
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: return self.handle_request(environ, start_response)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/web/framework/base.py", line 224, in handle_request
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: body = method(trans, **kwargs)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/webapps/galaxy/controllers/workflow.py", line 854, in build_from_current_history
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: dataset_collection_names=dataset_collection_names
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/workflow/extract.py", line 29, in extract_workflow
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: steps = extract_steps(trans, history=history, job_ids=job_ids, dataset_ids=dataset_ids, dataset_collection_ids=dataset_collection_ids, dataset_names=dataset_names, dataset_collection_names=None)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/workflow/extract.py", line 109, in extract_steps
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: tool_inputs, associations = step_inputs(trans, job)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/workflow/extract.py", line 310, in step_inputs
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: associations = __cleanup_param_values(tool.inputs, param_values)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/workflow/extract.py", line 372, in __cleanup_param_values
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: cleanup("", inputs, values)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/workflow/extract.py", line 368, in cleanup
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: cleanup("%s%s|" % (prefix, key), input.cases[current_case].inputs, group_values)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/workflow/extract.py", line 342, in cleanup
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: associations.extend([(t.hid, prefix + key) for t in tmp])
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/workflow/extract.py", line 342, in <listcomp>
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: associations.extend([(t.hid, prefix + key) for t in tmp])
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: AttributeError: 'DatasetCollectionElement' object has no attribute 'hid'
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: The above exception was the direct cause of the
|
AttributeError
|
def cleanup(prefix, inputs, values):
for key, input in inputs.items():
if isinstance(input, DataToolParameter) or isinstance(
input, DataCollectionToolParameter
):
items = values[key]
values[key] = None
# HACK: Nested associations are not yet working, but we
# still need to clean them up so we can serialize
# if not( prefix ):
for item in listify(items):
if isinstance(item, model.DatasetCollectionElement):
item = item.first_dataset_instance()
if item: # this is false for a non-set optional dataset
associations.append((item.hid, prefix + key))
# Cleanup the other deprecated crap associated with datasets
# as well. Worse, for nested datasets all the metadata is
# being pushed into the root. FIXME: MUST REMOVE SOON
key = prefix + key + "_"
for k in root_values.keys():
if k not in root_input_keys and k.startswith(key):
del root_values[k]
elif isinstance(input, Repeat):
if key in values:
group_values = values[key]
for i, rep_values in enumerate(group_values):
rep_index = rep_values["__index__"]
cleanup(
"%s%s_%d|" % (prefix, key, rep_index),
input.inputs,
group_values[i],
)
elif isinstance(input, Conditional):
# Scrub dynamic resource related parameters from workflows,
# they cause problems and the workflow probably should include
# their state in workflow encoding.
if input.name == "__job_resource":
if input.name in values:
del values[input.name]
return
if input.name in values:
group_values = values[input.name]
current_case = group_values["__current_case__"]
cleanup(
"%s%s|" % (prefix, key),
input.cases[current_case].inputs,
group_values,
)
elif isinstance(input, Section):
if input.name in values:
cleanup("%s%s|" % (prefix, key), input.inputs, values[input.name])
|
def cleanup(prefix, inputs, values):
for key, input in inputs.items():
if isinstance(input, DataToolParameter) or isinstance(
input, DataCollectionToolParameter
):
tmp = values[key]
values[key] = None
# HACK: Nested associations are not yet working, but we
# still need to clean them up so we can serialize
# if not( prefix ):
if isinstance(tmp, model.DatasetCollectionElement):
tmp = tmp.first_dataset_instance()
if tmp: # this is false for a non-set optional dataset
if not isinstance(tmp, list):
associations.append((tmp.hid, prefix + key))
else:
associations.extend([(t.hid, prefix + key) for t in tmp])
# Cleanup the other deprecated crap associated with datasets
# as well. Worse, for nested datasets all the metadata is
# being pushed into the root. FIXME: MUST REMOVE SOON
key = prefix + key + "_"
for k in root_values.keys():
if k not in root_input_keys and k.startswith(key):
del root_values[k]
elif isinstance(input, Repeat):
if key in values:
group_values = values[key]
for i, rep_values in enumerate(group_values):
rep_index = rep_values["__index__"]
cleanup(
"%s%s_%d|" % (prefix, key, rep_index),
input.inputs,
group_values[i],
)
elif isinstance(input, Conditional):
# Scrub dynamic resource related parameters from workflows,
# they cause problems and the workflow probably should include
# their state in workflow encoding.
if input.name == "__job_resource":
if input.name in values:
del values[input.name]
return
if input.name in values:
group_values = values[input.name]
current_case = group_values["__current_case__"]
cleanup(
"%s%s|" % (prefix, key),
input.cases[current_case].inputs,
group_values,
)
elif isinstance(input, Section):
if input.name in values:
cleanup("%s%s|" % (prefix, key), input.inputs, values[input.name])
|
https://github.com/galaxyproject/galaxy/issues/10452
|
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: 80.138.30.147 - - [18/Oct/2020:23:43:09 +0200] "POST /workflow/build_from_current_history HTTP/1.1" 500 - "https://usegalaxy.eu/workflow/build_from_current_history" "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:81.0) Gecko/20100101 Firefox/81.0"
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: Traceback (most recent call last):
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/web/framework/middleware/error.py", line 154, in __call__
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: app_iter = self.application(environ, sr_checker)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/paste/recursive.py", line 85, in __call__
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: return self.application(environ, start_response)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/web/framework/middleware/statsd.py", line 34, in __call__
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: req = self.application(environ, start_response)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/paste/httpexceptions.py", line 640, in __call__
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: return self.application(environ, start_response)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/web/framework/base.py", line 145, in __call__
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: return self.handle_request(environ, start_response)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/web/framework/base.py", line 224, in handle_request
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: body = method(trans, **kwargs)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/webapps/galaxy/controllers/workflow.py", line 854, in build_from_current_history
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: dataset_collection_names=dataset_collection_names
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/workflow/extract.py", line 29, in extract_workflow
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: steps = extract_steps(trans, history=history, job_ids=job_ids, dataset_ids=dataset_ids, dataset_collection_ids=dataset_collection_ids, dataset_names=dataset_names, dataset_collection_names=None)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/workflow/extract.py", line 109, in extract_steps
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: tool_inputs, associations = step_inputs(trans, job)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/workflow/extract.py", line 310, in step_inputs
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: associations = __cleanup_param_values(tool.inputs, param_values)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/workflow/extract.py", line 372, in __cleanup_param_values
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: cleanup("", inputs, values)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/workflow/extract.py", line 368, in cleanup
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: cleanup("%s%s|" % (prefix, key), input.cases[current_case].inputs, group_values)
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/workflow/extract.py", line 342, in cleanup
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: associations.extend([(t.hid, prefix + key) for t in tmp])
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: File "lib/galaxy/workflow/extract.py", line 342, in <listcomp>
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: associations.extend([(t.hid, prefix + key) for t in tmp])
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: AttributeError: 'DatasetCollectionElement' object has no attribute 'hid'
Oct 18 23:43:10 sn04.bi.uni-freiburg.de uwsgi[1008399]: The above exception was the direct cause of the
|
AttributeError
|
def precreate_dataset_collection(
self,
structure,
allow_unitialized_element=True,
completed_collection=None,
implicit_output_name=None,
):
has_structure = not structure.is_leaf and structure.children_known
if not has_structure and allow_unitialized_element:
dataset_collection = model.DatasetCollectionElement.UNINITIALIZED_ELEMENT
elif not has_structure:
collection_type_description = structure.collection_type_description
dataset_collection = model.DatasetCollection(populated=False)
dataset_collection.collection_type = collection_type_description.collection_type
else:
collection_type_description = structure.collection_type_description
dataset_collection = model.DatasetCollection(populated=False)
dataset_collection.collection_type = collection_type_description.collection_type
elements = []
for index, (identifier, substructure) in enumerate(structure.children):
# TODO: Open question - populate these now or later?
element = None
if completed_collection and implicit_output_name:
job = completed_collection[index]
if job:
it = (
jtiodca.dataset_collection
for jtiodca in job.output_dataset_collections
if jtiodca.name == implicit_output_name
)
element = next(it, None)
if element is None:
if substructure.is_leaf:
element = model.DatasetCollectionElement.UNINITIALIZED_ELEMENT
else:
element = self.precreate_dataset_collection(
substructure,
allow_unitialized_element=allow_unitialized_element,
)
element = model.DatasetCollectionElement(
collection=dataset_collection,
element=element,
element_identifier=identifier,
element_index=index,
)
elements.append(element)
dataset_collection.element_count = len(elements)
return dataset_collection
|
def precreate_dataset_collection(
self,
structure,
allow_unitialized_element=True,
completed_collection=None,
implicit_output_name=None,
):
has_structure = not structure.is_leaf and structure.children_known
if not has_structure and allow_unitialized_element:
dataset_collection = model.DatasetCollectionElement.UNINITIALIZED_ELEMENT
elif not has_structure:
collection_type_description = structure.collection_type_description
dataset_collection = model.DatasetCollection(populated=False)
dataset_collection.collection_type = collection_type_description.collection_type
else:
collection_type_description = structure.collection_type_description
dataset_collection = model.DatasetCollection(populated=False)
dataset_collection.collection_type = collection_type_description.collection_type
elements = []
for index, (identifier, substructure) in enumerate(structure.children):
# TODO: Open question - populate these now or later?
element = None
if completed_collection and implicit_output_name:
job = completed_collection[index]
if job:
it = (
jtiodca.dataset_collection
for jtiodca in job.output_dataset_collections
if jtiodca.name == implicit_output_name
)
element = next(it, None)
if element is None:
if substructure.is_leaf:
element = model.DatasetCollectionElement.UNINITIALIZED_ELEMENT
else:
element = self.precreate_dataset_collection(
substructure,
allow_unitialized_element=allow_unitialized_element,
)
element = model.DatasetCollectionElement(
element=element,
element_identifier=identifier,
element_index=index,
)
elements.append(element)
dataset_collection.elements = elements
dataset_collection.element_count = len(elements)
return dataset_collection
|
https://github.com/galaxyproject/galaxy/issues/10604
|
Nov 01 14:58:58 sn04.bi.uni-freiburg.de python[1827028]: galaxy.jobs DEBUG 2020-11-01 14:58:58,991 (12059080) Working directory for job is: /data/dnb03/galaxy_db/job_working_directory/012/059/12059080
Nov 01 14:58:58 sn04.bi.uni-freiburg.de python[1847375]: galaxy.jobs DEBUG 2020-11-01 14:58:58,998 (12059086) Working directory for job is: /data/dnb03/galaxy_db/job_working_directory/012/059/12059086
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1847375]: galaxy.objectstore DEBUG 2020-11-01 14:58:59,011 Using preferred backend 'files10' for creation of Dataset 27240484
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: galaxy.job_execution.output_collect ERROR 2020-11-01 14:58:59,036 Problem gathering output collection.
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: Traceback (most recent call last):
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1284, in _execute_context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: cursor, statement, parameters, context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: cursor.execute(statement, parameters)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: psycopg2.errors.NotNullViolation: null value in column "dataset_collection_id" violates not-null constraint
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: DETAIL: Failing row contains (13330967, null, 30637828, null, null, 0, ERR4597396__single).
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: The above exception was the direct cause of the following exception:
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: Traceback (most recent call last):
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/server/lib/galaxy/job_execution/output_collect.py", line 156, in collect_dynamic_outputs
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: final_job_state=job_context.final_job_state,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/server/lib/galaxy/model/store/discover.py", line 286, in populate_collection_elements
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: self.flush()
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/server/lib/galaxy/job_execution/output_collect.py", line 214, in flush
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: self.sa_session.flush()
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/scoping.py", line 163, in do
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: return getattr(self.registry(), name)(*args, **kwargs)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/session.py", line 2523, in flush
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: self._flush(objects)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/session.py", line 2664, in _flush
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: transaction.rollback(_capture_exception=True)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 69, in __exit__
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: exc_value, with_traceback=exc_tb,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: raise exception
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/session.py", line 2624, in _flush
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: flush_context.execute()
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: rec.execute(self)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 589, in execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: uow,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 236, in save_obj
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: update,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 995, in _emit_update_statements
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: statement, multiparams
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1020, in execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: return meth(self, multiparams, params)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: return connection._execute_clauseelement(self, multiparams, params)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1139, in _execute_clauseelement
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: distilled_params,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1324, in _execute_context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: e, statement, parameters, cursor, context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1518, in _handle_dbapi_exception
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: sqlalchemy_exception, with_traceback=exc_info[2], from_=e
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: raise exception
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1284, in _execute_context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: cursor, statement, parameters, context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: cursor.execute(statement, parameters)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: sqlalchemy.exc.IntegrityError: (psycopg2.errors.NotNullViolation) null value in column "dataset_collection_id" violates not-null constraint
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: DETAIL: Failing row contains (13330967, null, 30637828, null, null, 0, ERR4597396__single).
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: [SQL: UPDATE dataset_collection_element SET dataset_collection_id=%(dataset_collection_id)s WHERE dataset_collection_element.id = %(dataset_collection_element_id)s]
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: [parameters: {'dataset_collection_id': None, 'dataset_collection_element_id': 13330967}]
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: (Background on this error at: http://sqlalche.me/e/gkpj)
|
sqlalchemy.exc.IntegrityError
|
def set_collection_elements(dataset_collection, type, dataset_instances):
element_index = 0
elements = []
for element in type.generate_elements(dataset_instances):
element.element_index = element_index
element.collection = dataset_collection
elements.append(element)
element_index += 1
dataset_collection.element_count = element_index
return dataset_collection
|
def set_collection_elements(dataset_collection, type, dataset_instances):
element_index = 0
elements = []
for element in type.generate_elements(dataset_instances):
element.element_index = element_index
element.collection = dataset_collection
elements.append(element)
element_index += 1
dataset_collection.elements = elements
dataset_collection.element_count = element_index
return dataset_collection
|
https://github.com/galaxyproject/galaxy/issues/10604
|
Nov 01 14:58:58 sn04.bi.uni-freiburg.de python[1827028]: galaxy.jobs DEBUG 2020-11-01 14:58:58,991 (12059080) Working directory for job is: /data/dnb03/galaxy_db/job_working_directory/012/059/12059080
Nov 01 14:58:58 sn04.bi.uni-freiburg.de python[1847375]: galaxy.jobs DEBUG 2020-11-01 14:58:58,998 (12059086) Working directory for job is: /data/dnb03/galaxy_db/job_working_directory/012/059/12059086
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1847375]: galaxy.objectstore DEBUG 2020-11-01 14:58:59,011 Using preferred backend 'files10' for creation of Dataset 27240484
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: galaxy.job_execution.output_collect ERROR 2020-11-01 14:58:59,036 Problem gathering output collection.
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: Traceback (most recent call last):
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1284, in _execute_context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: cursor, statement, parameters, context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: cursor.execute(statement, parameters)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: psycopg2.errors.NotNullViolation: null value in column "dataset_collection_id" violates not-null constraint
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: DETAIL: Failing row contains (13330967, null, 30637828, null, null, 0, ERR4597396__single).
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: The above exception was the direct cause of the following exception:
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: Traceback (most recent call last):
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/server/lib/galaxy/job_execution/output_collect.py", line 156, in collect_dynamic_outputs
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: final_job_state=job_context.final_job_state,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/server/lib/galaxy/model/store/discover.py", line 286, in populate_collection_elements
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: self.flush()
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/server/lib/galaxy/job_execution/output_collect.py", line 214, in flush
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: self.sa_session.flush()
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/scoping.py", line 163, in do
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: return getattr(self.registry(), name)(*args, **kwargs)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/session.py", line 2523, in flush
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: self._flush(objects)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/session.py", line 2664, in _flush
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: transaction.rollback(_capture_exception=True)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 69, in __exit__
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: exc_value, with_traceback=exc_tb,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: raise exception
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/session.py", line 2624, in _flush
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: flush_context.execute()
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: rec.execute(self)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 589, in execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: uow,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 236, in save_obj
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: update,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 995, in _emit_update_statements
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: statement, multiparams
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1020, in execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: return meth(self, multiparams, params)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: return connection._execute_clauseelement(self, multiparams, params)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1139, in _execute_clauseelement
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: distilled_params,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1324, in _execute_context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: e, statement, parameters, cursor, context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1518, in _handle_dbapi_exception
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: sqlalchemy_exception, with_traceback=exc_info[2], from_=e
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: raise exception
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1284, in _execute_context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: cursor, statement, parameters, context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: cursor.execute(statement, parameters)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: sqlalchemy.exc.IntegrityError: (psycopg2.errors.NotNullViolation) null value in column "dataset_collection_id" violates not-null constraint
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: DETAIL: Failing row contains (13330967, null, 30637828, null, null, 0, ERR4597396__single).
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: [SQL: UPDATE dataset_collection_element SET dataset_collection_id=%(dataset_collection_id)s WHERE dataset_collection_element.id = %(dataset_collection_element_id)s]
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: [parameters: {'dataset_collection_id': None, 'dataset_collection_element_id': 13330967}]
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: (Background on this error at: http://sqlalche.me/e/gkpj)
|
sqlalchemy.exc.IntegrityError
|
def prototype(self, plugin_type):
plugin_type_object = self.get(plugin_type)
if not hasattr(plugin_type_object, "prototype_elements"):
raise Exception(
"Cannot pre-determine structure for collection of type %s" % plugin_type
)
dataset_collection = model.DatasetCollection()
for e in plugin_type_object.prototype_elements():
e.collection = dataset_collection
return dataset_collection
|
def prototype(self, plugin_type):
plugin_type_object = self.get(plugin_type)
if not hasattr(plugin_type_object, "prototype_elements"):
raise Exception(
"Cannot pre-determine structure for collection of type %s" % plugin_type
)
dataset_collection = model.DatasetCollection()
elements = [e for e in plugin_type_object.prototype_elements()]
dataset_collection.elements = elements
return dataset_collection
|
https://github.com/galaxyproject/galaxy/issues/10604
|
Nov 01 14:58:58 sn04.bi.uni-freiburg.de python[1827028]: galaxy.jobs DEBUG 2020-11-01 14:58:58,991 (12059080) Working directory for job is: /data/dnb03/galaxy_db/job_working_directory/012/059/12059080
Nov 01 14:58:58 sn04.bi.uni-freiburg.de python[1847375]: galaxy.jobs DEBUG 2020-11-01 14:58:58,998 (12059086) Working directory for job is: /data/dnb03/galaxy_db/job_working_directory/012/059/12059086
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1847375]: galaxy.objectstore DEBUG 2020-11-01 14:58:59,011 Using preferred backend 'files10' for creation of Dataset 27240484
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: galaxy.job_execution.output_collect ERROR 2020-11-01 14:58:59,036 Problem gathering output collection.
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: Traceback (most recent call last):
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1284, in _execute_context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: cursor, statement, parameters, context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: cursor.execute(statement, parameters)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: psycopg2.errors.NotNullViolation: null value in column "dataset_collection_id" violates not-null constraint
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: DETAIL: Failing row contains (13330967, null, 30637828, null, null, 0, ERR4597396__single).
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: The above exception was the direct cause of the following exception:
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: Traceback (most recent call last):
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/server/lib/galaxy/job_execution/output_collect.py", line 156, in collect_dynamic_outputs
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: final_job_state=job_context.final_job_state,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/server/lib/galaxy/model/store/discover.py", line 286, in populate_collection_elements
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: self.flush()
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/server/lib/galaxy/job_execution/output_collect.py", line 214, in flush
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: self.sa_session.flush()
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/scoping.py", line 163, in do
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: return getattr(self.registry(), name)(*args, **kwargs)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/session.py", line 2523, in flush
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: self._flush(objects)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/session.py", line 2664, in _flush
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: transaction.rollback(_capture_exception=True)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 69, in __exit__
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: exc_value, with_traceback=exc_tb,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: raise exception
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/session.py", line 2624, in _flush
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: flush_context.execute()
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: rec.execute(self)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 589, in execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: uow,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 236, in save_obj
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: update,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 995, in _emit_update_statements
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: statement, multiparams
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1020, in execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: return meth(self, multiparams, params)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: return connection._execute_clauseelement(self, multiparams, params)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1139, in _execute_clauseelement
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: distilled_params,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1324, in _execute_context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: e, statement, parameters, cursor, context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1518, in _handle_dbapi_exception
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: sqlalchemy_exception, with_traceback=exc_info[2], from_=e
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: raise exception
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1284, in _execute_context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: cursor, statement, parameters, context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: cursor.execute(statement, parameters)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: sqlalchemy.exc.IntegrityError: (psycopg2.errors.NotNullViolation) null value in column "dataset_collection_id" violates not-null constraint
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: DETAIL: Failing row contains (13330967, null, 30637828, null, null, 0, ERR4597396__single).
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: [SQL: UPDATE dataset_collection_element SET dataset_collection_id=%(dataset_collection_id)s WHERE dataset_collection_element.id = %(dataset_collection_element_id)s]
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: [parameters: {'dataset_collection_id': None, 'dataset_collection_element_id': 13330967}]
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: (Background on this error at: http://sqlalche.me/e/gkpj)
|
sqlalchemy.exc.IntegrityError
|
def collect_dynamic_outputs(
job_context,
output_collections,
):
# unmapped outputs do not correspond to explicit outputs of the tool, they were inferred entirely
# from the tool provided metadata (e.g. galaxy.json).
for unnamed_output_dict in job_context.tool_provided_metadata.get_unnamed_outputs():
assert "destination" in unnamed_output_dict
assert "elements" in unnamed_output_dict
destination = unnamed_output_dict["destination"]
elements = unnamed_output_dict["elements"]
assert "type" in destination
destination_type = destination["type"]
assert destination_type in ["library_folder", "hdca", "hdas"]
# three destination types we need to handle here - "library_folder" (place discovered files in a library folder),
# "hdca" (place discovered files in a history dataset collection), and "hdas" (place discovered files in a history
# as stand-alone datasets).
if destination_type == "library_folder":
# populate a library folder (needs to be already have been created)
library_folder = job_context.get_library_folder(destination)
persist_elements_to_folder(job_context, elements, library_folder)
elif destination_type == "hdca":
# create or populate a dataset collection in the history
assert "collection_type" in unnamed_output_dict
object_id = destination.get("object_id")
if object_id:
hdca = job_context.get_hdca(object_id)
else:
name = unnamed_output_dict.get("name", "unnamed collection")
collection_type = unnamed_output_dict["collection_type"]
collection_type_description = (
COLLECTION_TYPE_DESCRIPTION_FACTORY.for_collection_type(
collection_type
)
)
structure = UninitializedTree(collection_type_description)
hdca = job_context.create_hdca(name, structure)
error_message = unnamed_output_dict.get("error_message")
if error_message:
hdca.collection.handle_population_failed(error_message)
else:
persist_elements_to_hdca(
job_context, elements, hdca, collector=DEFAULT_DATASET_COLLECTOR
)
elif destination_type == "hdas":
persist_hdas(
elements, job_context, final_job_state=job_context.final_job_state
)
for name, has_collection in output_collections.items():
output_collection_def = job_context.output_collection_def(name)
if not output_collection_def:
continue
if not output_collection_def.dynamic_structure:
continue
# Could be HDCA for normal jobs or a DC for mapping
# jobs.
if hasattr(has_collection, "collection"):
collection = has_collection.collection
else:
collection = has_collection
# We are adding dynamic collections, which may be precreated, but their actually state is still new!
collection.populated_state = collection.populated_states.NEW
try:
collection_builder = builder.BoundCollectionBuilder(collection)
dataset_collectors = [
dataset_collector(description)
for description in output_collection_def.dataset_collector_descriptions
]
output_name = output_collection_def.name
filenames = job_context.find_files(
output_name, collection, dataset_collectors
)
job_context.populate_collection_elements(
collection,
collection_builder,
filenames,
name=output_collection_def.name,
metadata_source_name=output_collection_def.metadata_source,
final_job_state=job_context.final_job_state,
)
except Exception:
log.exception("Problem gathering output collection.")
collection.handle_population_failed(
"Problem building datasets for collection."
)
job_context.add_dataset_collection(has_collection)
|
def collect_dynamic_outputs(
job_context,
output_collections,
):
# unmapped outputs do not correspond to explicit outputs of the tool, they were inferred entirely
# from the tool provided metadata (e.g. galaxy.json).
for unnamed_output_dict in job_context.tool_provided_metadata.get_unnamed_outputs():
assert "destination" in unnamed_output_dict
assert "elements" in unnamed_output_dict
destination = unnamed_output_dict["destination"]
elements = unnamed_output_dict["elements"]
assert "type" in destination
destination_type = destination["type"]
assert destination_type in ["library_folder", "hdca", "hdas"]
# three destination types we need to handle here - "library_folder" (place discovered files in a library folder),
# "hdca" (place discovered files in a history dataset collection), and "hdas" (place discovered files in a history
# as stand-alone datasets).
if destination_type == "library_folder":
# populate a library folder (needs to be already have been created)
library_folder = job_context.get_library_folder(destination)
persist_elements_to_folder(job_context, elements, library_folder)
elif destination_type == "hdca":
# create or populate a dataset collection in the history
assert "collection_type" in unnamed_output_dict
object_id = destination.get("object_id")
if object_id:
hdca = job_context.get_hdca(object_id)
else:
name = unnamed_output_dict.get("name", "unnamed collection")
collection_type = unnamed_output_dict["collection_type"]
collection_type_description = (
COLLECTION_TYPE_DESCRIPTION_FACTORY.for_collection_type(
collection_type
)
)
structure = UninitializedTree(collection_type_description)
hdca = job_context.create_hdca(name, structure)
error_message = unnamed_output_dict.get("error_message")
if error_message:
hdca.collection.handle_population_failed(error_message)
else:
persist_elements_to_hdca(
job_context, elements, hdca, collector=DEFAULT_DATASET_COLLECTOR
)
elif destination_type == "hdas":
persist_hdas(
elements, job_context, final_job_state=job_context.final_job_state
)
for name, has_collection in output_collections.items():
output_collection_def = job_context.output_collection_def(name)
if not output_collection_def:
continue
if not output_collection_def.dynamic_structure:
continue
# Could be HDCA for normal jobs or a DC for mapping
# jobs.
if hasattr(has_collection, "collection"):
collection = has_collection.collection
else:
collection = has_collection
# We are adding dynamic collections, which may be precreated, but their actually state is still new!
collection.populated_state = collection.populated_states.NEW
try:
collection_builder = builder.BoundCollectionBuilder(collection)
dataset_collectors = [
dataset_collector(description)
for description in output_collection_def.dataset_collector_descriptions
]
output_name = output_collection_def.name
filenames = job_context.find_files(
output_name, collection, dataset_collectors
)
job_context.populate_collection_elements(
collection,
collection_builder,
filenames,
name=output_collection_def.name,
metadata_source_name=output_collection_def.metadata_source,
final_job_state=job_context.final_job_state,
)
collection_builder.populate()
except Exception:
log.exception("Problem gathering output collection.")
collection.handle_population_failed(
"Problem building datasets for collection."
)
job_context.add_dataset_collection(has_collection)
|
https://github.com/galaxyproject/galaxy/issues/10604
|
Nov 01 14:58:58 sn04.bi.uni-freiburg.de python[1827028]: galaxy.jobs DEBUG 2020-11-01 14:58:58,991 (12059080) Working directory for job is: /data/dnb03/galaxy_db/job_working_directory/012/059/12059080
Nov 01 14:58:58 sn04.bi.uni-freiburg.de python[1847375]: galaxy.jobs DEBUG 2020-11-01 14:58:58,998 (12059086) Working directory for job is: /data/dnb03/galaxy_db/job_working_directory/012/059/12059086
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1847375]: galaxy.objectstore DEBUG 2020-11-01 14:58:59,011 Using preferred backend 'files10' for creation of Dataset 27240484
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: galaxy.job_execution.output_collect ERROR 2020-11-01 14:58:59,036 Problem gathering output collection.
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: Traceback (most recent call last):
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1284, in _execute_context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: cursor, statement, parameters, context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: cursor.execute(statement, parameters)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: psycopg2.errors.NotNullViolation: null value in column "dataset_collection_id" violates not-null constraint
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: DETAIL: Failing row contains (13330967, null, 30637828, null, null, 0, ERR4597396__single).
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: The above exception was the direct cause of the following exception:
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: Traceback (most recent call last):
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/server/lib/galaxy/job_execution/output_collect.py", line 156, in collect_dynamic_outputs
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: final_job_state=job_context.final_job_state,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/server/lib/galaxy/model/store/discover.py", line 286, in populate_collection_elements
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: self.flush()
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/server/lib/galaxy/job_execution/output_collect.py", line 214, in flush
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: self.sa_session.flush()
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/scoping.py", line 163, in do
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: return getattr(self.registry(), name)(*args, **kwargs)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/session.py", line 2523, in flush
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: self._flush(objects)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/session.py", line 2664, in _flush
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: transaction.rollback(_capture_exception=True)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 69, in __exit__
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: exc_value, with_traceback=exc_tb,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: raise exception
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/session.py", line 2624, in _flush
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: flush_context.execute()
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: rec.execute(self)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 589, in execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: uow,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 236, in save_obj
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: update,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 995, in _emit_update_statements
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: statement, multiparams
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1020, in execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: return meth(self, multiparams, params)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: return connection._execute_clauseelement(self, multiparams, params)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1139, in _execute_clauseelement
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: distilled_params,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1324, in _execute_context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: e, statement, parameters, cursor, context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1518, in _handle_dbapi_exception
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: sqlalchemy_exception, with_traceback=exc_info[2], from_=e
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: raise exception
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1284, in _execute_context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: cursor, statement, parameters, context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: cursor.execute(statement, parameters)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: sqlalchemy.exc.IntegrityError: (psycopg2.errors.NotNullViolation) null value in column "dataset_collection_id" violates not-null constraint
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: DETAIL: Failing row contains (13330967, null, 30637828, null, null, 0, ERR4597396__single).
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: [SQL: UPDATE dataset_collection_element SET dataset_collection_id=%(dataset_collection_id)s WHERE dataset_collection_element.id = %(dataset_collection_element_id)s]
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: [parameters: {'dataset_collection_id': None, 'dataset_collection_element_id': 13330967}]
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: (Background on this error at: http://sqlalche.me/e/gkpj)
|
sqlalchemy.exc.IntegrityError
|
def populate_collection_elements(
self,
collection,
root_collection_builder,
filenames,
name=None,
metadata_source_name=None,
final_job_state="ok",
):
# TODO: allow configurable sorting.
# <sort by="lexical" /> <!-- default -->
# <sort by="reverse_lexical" />
# <sort regex="example.(\d+).fastq" by="1:numerical" />
# <sort regex="part_(\d+)_sample_([^_]+).fastq" by="2:lexical,1:numerical" />
if name is None:
name = "unnamed output"
element_datasets = {
"element_identifiers": [],
"datasets": [],
"tag_lists": [],
"paths": [],
"extra_files": [],
}
for filename, discovered_file in filenames.items():
create_dataset_timer = ExecutionTimer()
fields_match = discovered_file.match
if not fields_match:
raise Exception("Problem parsing metadata fields for file %s" % filename)
element_identifiers = fields_match.element_identifiers
designation = fields_match.designation
visible = fields_match.visible
ext = fields_match.ext
dbkey = fields_match.dbkey
extra_files = fields_match.extra_files
# galaxy.tools.parser.output_collection_def.INPUT_DBKEY_TOKEN
if dbkey == "__input__":
dbkey = self.input_dbkey
# Create new primary dataset
dataset_name = fields_match.name or designation
link_data = discovered_file.match.link_data
sources = discovered_file.match.sources
hashes = discovered_file.match.hashes
created_from_basename = discovered_file.match.created_from_basename
dataset = self.create_dataset(
ext=ext,
designation=designation,
visible=visible,
dbkey=dbkey,
name=dataset_name,
metadata_source_name=metadata_source_name,
link_data=link_data,
sources=sources,
hashes=hashes,
created_from_basename=created_from_basename,
final_job_state=final_job_state,
)
log.debug(
"(%s) Created dynamic collection dataset for path [%s] with element identifier [%s] for output [%s] %s",
self.job_id(),
filename,
designation,
name,
create_dataset_timer,
)
element_datasets["element_identifiers"].append(element_identifiers)
element_datasets["extra_files"].append(extra_files)
element_datasets["datasets"].append(dataset)
element_datasets["tag_lists"].append(discovered_file.match.tag_list)
element_datasets["paths"].append(filename)
self.add_tags_to_datasets(
datasets=element_datasets["datasets"], tag_lists=element_datasets["tag_lists"]
)
for element_identifiers, dataset in zip(
element_datasets["element_identifiers"], element_datasets["datasets"]
):
current_builder = root_collection_builder
for element_identifier in element_identifiers[:-1]:
current_builder = current_builder.get_level(element_identifier)
current_builder.add_dataset(element_identifiers[-1], dataset)
# Associate new dataset with job
element_identifier_str = ":".join(element_identifiers)
association_name = "__new_primary_file_{}|{}__".format(
name, element_identifier_str
)
self.add_output_dataset_association(association_name, dataset)
root_collection_builder.populate()
self.flush()
self.update_object_store_with_datasets(
datasets=element_datasets["datasets"],
paths=element_datasets["paths"],
extra_files=element_datasets["extra_files"],
)
add_datasets_timer = ExecutionTimer()
self.add_datasets_to_history(element_datasets["datasets"])
log.debug(
"(%s) Add dynamic collection datasets to history for output [%s] %s",
self.job_id(),
name,
add_datasets_timer,
)
self.set_datasets_metadata(datasets=element_datasets["datasets"])
|
def populate_collection_elements(
self,
collection,
root_collection_builder,
filenames,
name=None,
metadata_source_name=None,
final_job_state="ok",
):
# TODO: allow configurable sorting.
# <sort by="lexical" /> <!-- default -->
# <sort by="reverse_lexical" />
# <sort regex="example.(\d+).fastq" by="1:numerical" />
# <sort regex="part_(\d+)_sample_([^_]+).fastq" by="2:lexical,1:numerical" />
if name is None:
name = "unnamed output"
element_datasets = {
"element_identifiers": [],
"datasets": [],
"tag_lists": [],
"paths": [],
"extra_files": [],
}
for filename, discovered_file in filenames.items():
create_dataset_timer = ExecutionTimer()
fields_match = discovered_file.match
if not fields_match:
raise Exception("Problem parsing metadata fields for file %s" % filename)
element_identifiers = fields_match.element_identifiers
designation = fields_match.designation
visible = fields_match.visible
ext = fields_match.ext
dbkey = fields_match.dbkey
extra_files = fields_match.extra_files
# galaxy.tools.parser.output_collection_def.INPUT_DBKEY_TOKEN
if dbkey == "__input__":
dbkey = self.input_dbkey
# Create new primary dataset
dataset_name = fields_match.name or designation
link_data = discovered_file.match.link_data
sources = discovered_file.match.sources
hashes = discovered_file.match.hashes
created_from_basename = discovered_file.match.created_from_basename
dataset = self.create_dataset(
ext=ext,
designation=designation,
visible=visible,
dbkey=dbkey,
name=dataset_name,
metadata_source_name=metadata_source_name,
link_data=link_data,
sources=sources,
hashes=hashes,
created_from_basename=created_from_basename,
final_job_state=final_job_state,
)
log.debug(
"(%s) Created dynamic collection dataset for path [%s] with element identifier [%s] for output [%s] %s",
self.job_id(),
filename,
designation,
name,
create_dataset_timer,
)
element_datasets["element_identifiers"].append(element_identifiers)
element_datasets["extra_files"].append(extra_files)
element_datasets["datasets"].append(dataset)
element_datasets["tag_lists"].append(discovered_file.match.tag_list)
element_datasets["paths"].append(filename)
self.add_tags_to_datasets(
datasets=element_datasets["datasets"], tag_lists=element_datasets["tag_lists"]
)
for element_identifiers, dataset in zip(
element_datasets["element_identifiers"], element_datasets["datasets"]
):
current_builder = root_collection_builder
for element_identifier in element_identifiers[:-1]:
current_builder = current_builder.get_level(element_identifier)
current_builder.add_dataset(element_identifiers[-1], dataset)
# Associate new dataset with job
element_identifier_str = ":".join(element_identifiers)
association_name = "__new_primary_file_{}|{}__".format(
name, element_identifier_str
)
self.add_output_dataset_association(association_name, dataset)
self.flush()
self.update_object_store_with_datasets(
datasets=element_datasets["datasets"],
paths=element_datasets["paths"],
extra_files=element_datasets["extra_files"],
)
add_datasets_timer = ExecutionTimer()
self.add_datasets_to_history(element_datasets["datasets"])
log.debug(
"(%s) Add dynamic collection datasets to history for output [%s] %s",
self.job_id(),
name,
add_datasets_timer,
)
self.set_datasets_metadata(datasets=element_datasets["datasets"])
|
https://github.com/galaxyproject/galaxy/issues/10604
|
Nov 01 14:58:58 sn04.bi.uni-freiburg.de python[1827028]: galaxy.jobs DEBUG 2020-11-01 14:58:58,991 (12059080) Working directory for job is: /data/dnb03/galaxy_db/job_working_directory/012/059/12059080
Nov 01 14:58:58 sn04.bi.uni-freiburg.de python[1847375]: galaxy.jobs DEBUG 2020-11-01 14:58:58,998 (12059086) Working directory for job is: /data/dnb03/galaxy_db/job_working_directory/012/059/12059086
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1847375]: galaxy.objectstore DEBUG 2020-11-01 14:58:59,011 Using preferred backend 'files10' for creation of Dataset 27240484
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: galaxy.job_execution.output_collect ERROR 2020-11-01 14:58:59,036 Problem gathering output collection.
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: Traceback (most recent call last):
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1284, in _execute_context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: cursor, statement, parameters, context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: cursor.execute(statement, parameters)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: psycopg2.errors.NotNullViolation: null value in column "dataset_collection_id" violates not-null constraint
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: DETAIL: Failing row contains (13330967, null, 30637828, null, null, 0, ERR4597396__single).
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: The above exception was the direct cause of the following exception:
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: Traceback (most recent call last):
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/server/lib/galaxy/job_execution/output_collect.py", line 156, in collect_dynamic_outputs
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: final_job_state=job_context.final_job_state,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/server/lib/galaxy/model/store/discover.py", line 286, in populate_collection_elements
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: self.flush()
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/server/lib/galaxy/job_execution/output_collect.py", line 214, in flush
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: self.sa_session.flush()
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/scoping.py", line 163, in do
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: return getattr(self.registry(), name)(*args, **kwargs)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/session.py", line 2523, in flush
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: self._flush(objects)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/session.py", line 2664, in _flush
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: transaction.rollback(_capture_exception=True)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/util/langhelpers.py", line 69, in __exit__
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: exc_value, with_traceback=exc_tb,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: raise exception
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/session.py", line 2624, in _flush
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: flush_context.execute()
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: rec.execute(self)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/unitofwork.py", line 589, in execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: uow,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 236, in save_obj
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: update,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/orm/persistence.py", line 995, in _emit_update_statements
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: statement, multiparams
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1020, in execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: return meth(self, multiparams, params)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: return connection._execute_clauseelement(self, multiparams, params)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1139, in _execute_clauseelement
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: distilled_params,
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1324, in _execute_context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: e, statement, parameters, cursor, context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1518, in _handle_dbapi_exception
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: sqlalchemy_exception, with_traceback=exc_info[2], from_=e
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: raise exception
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/base.py", line 1284, in _execute_context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: cursor, statement, parameters, context
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: File "/opt/galaxy/venv/lib64/python3.6/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: cursor.execute(statement, parameters)
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: sqlalchemy.exc.IntegrityError: (psycopg2.errors.NotNullViolation) null value in column "dataset_collection_id" violates not-null constraint
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: DETAIL: Failing row contains (13330967, null, 30637828, null, null, 0, ERR4597396__single).
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: [SQL: UPDATE dataset_collection_element SET dataset_collection_id=%(dataset_collection_id)s WHERE dataset_collection_element.id = %(dataset_collection_element_id)s]
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: [parameters: {'dataset_collection_id': None, 'dataset_collection_element_id': 13330967}]
Nov 01 14:58:59 sn04.bi.uni-freiburg.de python[1819184]: (Background on this error at: http://sqlalche.me/e/gkpj)
|
sqlalchemy.exc.IntegrityError
|
def export_archive(
self,
trans,
id=None,
gzip=True,
include_hidden=False,
include_deleted=False,
preview=False,
):
"""Export a history to an archive."""
#
# Get history to export.
#
if id:
history = self.history_manager.get_accessible(
self.decode_id(id), trans.user, current_history=trans.history
)
else:
# Use current history.
history = trans.history
id = trans.security.encode_id(history.id)
if not history:
return trans.show_error_message(
"This history does not exist or you cannot export this history."
)
# If history has already been exported and it has not changed since export, stream it.
jeha = history.latest_export
if jeha and jeha.up_to_date:
if jeha.ready:
if preview:
url = url_for(
controller="history", action="export_archive", id=id, qualified=True
)
return trans.show_message(
"History Ready: '%(n)s'. Use this link to download "
"the archive or import it to another Galaxy server: "
"<a href='%(u)s'>%(u)s</a>" % ({"n": history.name, "u": url})
)
else:
return self.serve_ready_history_export(trans, jeha)
elif jeha.preparing:
return trans.show_message(
"Still exporting history %(n)s; please check back soon. Link: <a href='%(s)s'>%(s)s</a>"
% (
{
"n": history.name,
"s": url_for(
controller="history",
action="export_archive",
id=id,
qualified=True,
),
}
)
)
self.queue_history_export(
trans,
history,
gzip=gzip,
include_hidden=include_hidden,
include_deleted=include_deleted,
)
url = url_for(controller="history", action="export_archive", id=id, qualified=True)
return trans.show_message(
"Exporting History '%(n)s'. You will need to <a href='%(share)s' target='_top'>make this history 'accessible'</a> in order to import this to another galaxy sever. <br/>"
"Use this link to download the archive or import it to another Galaxy server: "
"<a href='%(u)s'>%(u)s</a>"
% ({"share": url_for("/histories/sharing", id=id), "n": history.name, "u": url})
)
|
def export_archive(
self,
trans,
id=None,
gzip=True,
include_hidden=False,
include_deleted=False,
preview=False,
):
"""Export a history to an archive."""
#
# Get history to export.
#
if id:
history = self.history_manager.get_accessible(
self.decode_id(id), trans.user, current_history=trans.history
)
else:
# Use current history.
history = trans.history
id = trans.security.encode_id(history.id)
if not history:
return trans.show_error_message(
"This history does not exist or you cannot export this history."
)
# If history has already been exported and it has not changed since export, stream it.
jeha = history.latest_export
if jeha and jeha.up_to_date:
if jeha.ready:
if preview:
url = url_for(
controller="history", action="export_archive", id=id, qualified=True
)
return trans.show_message(
"History Ready: '%(n)s'. Use this link to download "
"the archive or import it to another Galaxy server: "
"<a href='%(u)s'>%(u)s</a>" % ({"n": history.name, "u": url})
)
else:
return self.serve_ready_history_export(trans, jeha)
elif jeha.preparing:
return trans.show_message(
"Still exporting history %(n)s; please check back soon. Link: <a href='%(s)s'>%(s)s</a>"
% (
{
"n": history.name,
"s": url_for(
controller="history",
action="export_archive",
id=id,
qualified=True,
),
}
)
)
self.queue_history_export(
trans,
history,
gzip=gzip,
include_hidden=include_hidden,
include_deleted=include_deleted,
)
url = url_for(controller="history", action="export_archive", id=id, qualified=True)
return trans.show_message(
"Exporting History '%(n)s'. You will need to <a href='%(share)s'>make this history 'accessible'</a> in order to import this to another galaxy sever. <br/>"
"Use this link to download the archive or import it to another Galaxy server: "
"<a href='%(u)s'>%(u)s</a>"
% (
{
"share": url_for(controller="history", action="sharing"),
"n": history.name,
"u": url,
}
)
)
|
https://github.com/galaxyproject/galaxy/issues/10040
|
Traceback (most recent call last):
File "lib/galaxy/web/framework/decorators.py", line 170, in decorator
rval = func(self, trans, *args, **kwargs)
TypeError: sharing() missing 1 required positional argument: 'id'
|
TypeError
|
def execute(
self,
tool,
trans,
incoming=None,
return_job=False,
set_output_hid=True,
history=None,
job_params=None,
rerun_remap_job_id=None,
execution_cache=None,
dataset_collection_elements=None,
completed_job=None,
collection_info=None,
):
"""
Executes a tool, creating job and tool outputs, associating them, and
submitting the job to the job queue. If history is not specified, use
trans.history as destination for tool's output datasets.
"""
trans.check_user_activation()
incoming = incoming or {}
self._check_access(tool, trans)
app = trans.app
if execution_cache is None:
execution_cache = ToolExecutionCache(trans)
current_user_roles = execution_cache.current_user_roles
history, inp_data, inp_dataset_collections, preserved_tags, all_permissions = (
self._collect_inputs(
tool, trans, incoming, history, current_user_roles, collection_info
)
)
# Build name for output datasets based on tool name and input names
on_text = self._get_on_text(inp_data)
# format='input" previously would give you a random extension from
# the input extensions, now it should just give "input" as the output
# format.
input_ext = "data" if tool.profile < 16.04 else "input"
input_dbkey = incoming.get("dbkey", "?")
for name, data in reversed(list(inp_data.items())):
if not data:
data = NoneDataset(datatypes_registry=app.datatypes_registry)
continue
# Convert LDDA to an HDA.
if isinstance(data, LibraryDatasetDatasetAssociation) and not completed_job:
data = data.to_history_dataset_association(None)
inp_data[name] = data
if tool.profile < 16.04:
input_ext = data.ext
if data.dbkey not in [None, "?"]:
input_dbkey = data.dbkey
identifier = getattr(data, "element_identifier", None)
if identifier is not None:
incoming["%s|__identifier__" % name] = identifier
# Collect chromInfo dataset and add as parameters to incoming
(chrom_info, db_dataset) = execution_cache.get_chrom_info(tool.id, input_dbkey)
if db_dataset:
inp_data.update({"chromInfo": db_dataset})
incoming["chromInfo"] = chrom_info
if not completed_job:
# Determine output dataset permission/roles list
existing_datasets = [inp for inp in inp_data.values() if inp]
if existing_datasets:
output_permissions = app.security_agent.guess_derived_permissions(
all_permissions
)
else:
# No valid inputs, we will use history defaults
output_permissions = app.security_agent.history_get_default_permissions(
history
)
# Add the dbkey to the incoming parameters
incoming["dbkey"] = input_dbkey
# wrapped params are used by change_format action and by output.label; only perform this wrapping once, as needed
wrapped_params = self._wrapped_params(trans, tool, incoming, inp_data)
out_data = OrderedDict()
input_collections = dict((k, v[0][0]) for k, v in inp_dataset_collections.items())
output_collections = OutputCollections(
trans,
history,
tool=tool,
tool_action=self,
input_collections=input_collections,
dataset_collection_elements=dataset_collection_elements,
on_text=on_text,
incoming=incoming,
params=wrapped_params.params,
job_params=job_params,
tags=preserved_tags,
)
# Keep track of parent / child relationships, we'll create all the
# datasets first, then create the associations
parent_to_child_pairs = []
child_dataset_names = set()
object_store_populator = ObjectStorePopulator(app)
async_tool = tool.tool_type == "data_source_async"
def handle_output(name, output, hidden=None):
if output.parent:
parent_to_child_pairs.append((output.parent, name))
child_dataset_names.add(name)
if async_tool and name in incoming:
# HACK: output data has already been created as a result of the async controller
dataid = incoming[name]
data = trans.sa_session.query(app.model.HistoryDatasetAssociation).get(
dataid
)
assert data is not None
out_data[name] = data
else:
ext = determine_output_format(
output,
wrapped_params.params,
inp_data,
inp_dataset_collections,
input_ext,
python_template_version=tool.python_template_version,
)
create_datasets = True
dataset = None
if completed_job:
for output_dataset in completed_job.output_datasets:
if output_dataset.name == name:
create_datasets = False
completed_data = output_dataset.dataset
dataset = output_dataset.dataset.dataset
break
data = app.model.HistoryDatasetAssociation(
extension=ext,
dataset=dataset,
create_dataset=create_datasets,
flush=False,
)
if create_datasets:
from_work_dir = output.from_work_dir
if from_work_dir is not None:
data.dataset.created_from_basename = os.path.basename(from_work_dir)
if hidden is None:
hidden = output.hidden
if (
not hidden and dataset_collection_elements is not None
): # Mapping over a collection - hide datasets
hidden = True
if hidden:
data.visible = False
if (
dataset_collection_elements is not None
and name in dataset_collection_elements
):
dataset_collection_elements[name].hda = data
trans.sa_session.add(data)
if not completed_job:
trans.app.security_agent.set_all_dataset_permissions(
data.dataset, output_permissions, new=True
)
data.copy_tags_to(preserved_tags)
if (
not completed_job
and trans.app.config.legacy_eager_objectstore_initialization
):
# Must flush before setting object store id currently.
trans.sa_session.flush()
object_store_populator.set_object_store_id(data)
# This may not be neccesary with the new parent/child associations
data.designation = name
# Copy metadata from one of the inputs if requested.
# metadata source can be either a string referencing an input
# or an actual object to copy.
metadata_source = output.metadata_source
if metadata_source:
if isinstance(metadata_source, string_types):
metadata_source = inp_data.get(metadata_source)
if metadata_source is not None:
data.init_meta(copy_from=metadata_source)
else:
data.init_meta()
# Take dbkey from LAST input
data.dbkey = str(input_dbkey)
# Set state
if completed_job:
data.blurb = completed_data.blurb
data.peek = completed_data.peek
data._metadata = completed_data._metadata
else:
data.blurb = "queued"
# Set output label
data.name = self.get_output_name(
output,
data,
tool,
on_text,
trans,
incoming,
history,
wrapped_params.params,
job_params,
)
# Store output
out_data[name] = data
if output.actions:
# Apply pre-job tool-output-dataset actions; e.g. setting metadata, changing format
output_action_params = dict(out_data)
output_action_params.update(incoming)
output.actions.apply_action(data, output_action_params)
# Also set the default values of actions of type metadata
self.set_metadata_defaults(
output,
data,
tool,
on_text,
trans,
incoming,
history,
wrapped_params.params,
job_params,
)
# Flush all datasets at once.
return data
for name, output in tool.outputs.items():
if not filter_output(output, incoming):
handle_output_timer = ExecutionTimer()
if output.collection:
collections_manager = app.dataset_collections_service
element_identifiers = []
known_outputs = output.known_outputs(
input_collections, collections_manager.type_registry
)
created_element_datasets = []
# Just to echo TODO elsewhere - this should be restructured to allow
# nested collections.
for output_part_def in known_outputs:
# Add elements to top-level collection, unless nested...
current_element_identifiers = element_identifiers
current_collection_type = output.structure.collection_type
for parent_id in output_part_def.parent_ids or []:
# TODO: replace following line with formal abstractions for doing this.
current_collection_type = ":".join(
current_collection_type.split(":")[1:]
)
name_to_index = dict(
(value["name"], index)
for (index, value) in enumerate(current_element_identifiers)
)
if parent_id not in name_to_index:
if parent_id not in current_element_identifiers:
index = len(current_element_identifiers)
current_element_identifiers.append(
dict(
name=parent_id,
collection_type=current_collection_type,
src="new_collection",
element_identifiers=[],
)
)
else:
index = name_to_index[parent_id]
current_element_identifiers = current_element_identifiers[
index
]["element_identifiers"]
effective_output_name = output_part_def.effective_output_name
element = handle_output(
effective_output_name, output_part_def.output_def, hidden=True
)
created_element_datasets.append(element)
# TODO: this shouldn't exist in the top-level of the history at all
# but for now we are still working around that by hiding the contents
# there.
# Following hack causes dataset to no be added to history...
child_dataset_names.add(effective_output_name)
trans.sa_session.add(element)
current_element_identifiers.append(
{
"__object__": element,
"name": output_part_def.element_identifier,
}
)
history.add_datasets(
trans.sa_session,
created_element_datasets,
set_hid=set_output_hid,
quota=False,
flush=True,
)
if output.dynamic_structure:
assert not element_identifiers # known_outputs must have been empty
element_kwds = dict(
elements=collections_manager.ELEMENTS_UNINITIALIZED
)
else:
element_kwds = dict(element_identifiers=element_identifiers)
output_collections.create_collection(
output=output, name=name, **element_kwds
)
log.info(
"Handled collection output named %s for tool %s %s"
% (name, tool.id, handle_output_timer)
)
else:
handle_output(name, output)
log.info(
"Handled output named %s for tool %s %s"
% (name, tool.id, handle_output_timer)
)
add_datasets_timer = ExecutionTimer()
# Add all the top-level (non-child) datasets to the history unless otherwise specified
datasets_to_persist = []
for name, data in out_data.items():
if (
name not in child_dataset_names and name not in incoming
): # don't add children; or already existing datasets, i.e. async created
datasets_to_persist.append(data)
# Set HID and add to history.
# This is brand new and certainly empty so don't worry about quota.
history.add_datasets(
trans.sa_session,
datasets_to_persist,
set_hid=set_output_hid,
quota=False,
flush=False,
)
# Add all the children to their parents
for parent_name, child_name in parent_to_child_pairs:
parent_dataset = out_data[parent_name]
child_dataset = out_data[child_name]
parent_dataset.children.append(child_dataset)
log.info("Added output datasets to history %s" % add_datasets_timer)
job_setup_timer = ExecutionTimer()
# Create the job object
job, galaxy_session = self._new_job_for_session(trans, tool, history)
self._record_inputs(trans, tool, job, incoming, inp_data, inp_dataset_collections)
self._record_outputs(job, out_data, output_collections)
job.object_store_id = object_store_populator.object_store_id
if job_params:
job.params = dumps(job_params)
if completed_job:
job.set_copied_from_job_id(completed_job.id)
trans.sa_session.add(job)
# Now that we have a job id, we can remap any outputs if this is a rerun and the user chose to continue dependent jobs
# This functionality requires tracking jobs in the database.
if app.config.track_jobs_in_database and rerun_remap_job_id is not None:
self._remap_job_on_rerun(
trans=trans,
galaxy_session=galaxy_session,
rerun_remap_job_id=rerun_remap_job_id,
current_job=job,
out_data=out_data,
)
log.info(
"Setup for job %s complete, ready to be enqueued %s"
% (job.log_str(), job_setup_timer)
)
# Some tools are not really executable, but jobs are still created for them ( for record keeping ).
# Examples include tools that redirect to other applications ( epigraph ). These special tools must
# include something that can be retrieved from the params ( e.g., REDIRECT_URL ) to keep the job
# from being queued.
if "REDIRECT_URL" in incoming:
# Get the dataset - there should only be 1
for name in inp_data.keys():
dataset = inp_data[name]
redirect_url = tool.parse_redirect_url(dataset, incoming)
# GALAXY_URL should be include in the tool params to enable the external application
# to send back to the current Galaxy instance
GALAXY_URL = incoming.get("GALAXY_URL", None)
assert GALAXY_URL is not None, "GALAXY_URL parameter missing in tool config."
redirect_url += "&GALAXY_URL=%s" % GALAXY_URL
# Job should not be queued, so set state to ok
job.set_state(app.model.Job.states.OK)
job.info = "Redirected to: %s" % redirect_url
trans.sa_session.add(job)
trans.sa_session.flush()
trans.response.send_redirect(
url_for(
controller="tool_runner", action="redirect", redirect_url=redirect_url
)
)
else:
# Dispatch to a job handler. enqueue() is responsible for flushing the job
app.job_manager.enqueue(job, tool=tool)
trans.log_event(
"Added job to the job queue, id: %s" % str(job.id), tool_id=job.tool_id
)
return job, out_data
|
def execute(
self,
tool,
trans,
incoming=None,
return_job=False,
set_output_hid=True,
history=None,
job_params=None,
rerun_remap_job_id=None,
execution_cache=None,
dataset_collection_elements=None,
completed_job=None,
collection_info=None,
):
"""
Executes a tool, creating job and tool outputs, associating them, and
submitting the job to the job queue. If history is not specified, use
trans.history as destination for tool's output datasets.
"""
trans.check_user_activation()
incoming = incoming or {}
self._check_access(tool, trans)
app = trans.app
if execution_cache is None:
execution_cache = ToolExecutionCache(trans)
current_user_roles = execution_cache.current_user_roles
history, inp_data, inp_dataset_collections, preserved_tags, all_permissions = (
self._collect_inputs(
tool, trans, incoming, history, current_user_roles, collection_info
)
)
# Build name for output datasets based on tool name and input names
on_text = self._get_on_text(inp_data)
# format='input" previously would give you a random extension from
# the input extensions, now it should just give "input" as the output
# format.
input_ext = "data" if tool.profile < 16.04 else "input"
input_dbkey = incoming.get("dbkey", "?")
for name, data in reversed(list(inp_data.items())):
if not data:
data = NoneDataset(datatypes_registry=app.datatypes_registry)
continue
# Convert LDDA to an HDA.
if isinstance(data, LibraryDatasetDatasetAssociation) and not completed_job:
data = data.to_history_dataset_association(None)
inp_data[name] = data
if tool.profile < 16.04:
input_ext = data.ext
if data.dbkey not in [None, "?"]:
input_dbkey = data.dbkey
identifier = getattr(data, "element_identifier", None)
if identifier is not None:
incoming["%s|__identifier__" % name] = identifier
# Collect chromInfo dataset and add as parameters to incoming
(chrom_info, db_dataset) = execution_cache.get_chrom_info(tool.id, input_dbkey)
if db_dataset:
inp_data.update({"chromInfo": db_dataset})
incoming["chromInfo"] = chrom_info
if not completed_job:
# Determine output dataset permission/roles list
existing_datasets = [inp for inp in inp_data.values() if inp]
if existing_datasets:
output_permissions = app.security_agent.guess_derived_permissions(
all_permissions
)
else:
# No valid inputs, we will use history defaults
output_permissions = app.security_agent.history_get_default_permissions(
history
)
# Add the dbkey to the incoming parameters
incoming["dbkey"] = input_dbkey
# wrapped params are used by change_format action and by output.label; only perform this wrapping once, as needed
wrapped_params = self._wrapped_params(trans, tool, incoming, inp_data)
out_data = OrderedDict()
input_collections = dict((k, v[0][0]) for k, v in inp_dataset_collections.items())
output_collections = OutputCollections(
trans,
history,
tool=tool,
tool_action=self,
input_collections=input_collections,
dataset_collection_elements=dataset_collection_elements,
on_text=on_text,
incoming=incoming,
params=wrapped_params.params,
job_params=job_params,
tags=preserved_tags,
)
# Keep track of parent / child relationships, we'll create all the
# datasets first, then create the associations
parent_to_child_pairs = []
child_dataset_names = set()
object_store_populator = ObjectStorePopulator(app)
def handle_output(name, output, hidden=None):
if output.parent:
parent_to_child_pairs.append((output.parent, name))
child_dataset_names.add(name)
# What is the following hack for? Need to document under what
# conditions can the following occur? (james@bx.psu.edu)
# HACK: the output data has already been created
# this happens i.e. as a result of the async controller
if name in incoming:
dataid = incoming[name]
data = trans.sa_session.query(app.model.HistoryDatasetAssociation).get(
dataid
)
assert data is not None
out_data[name] = data
else:
ext = determine_output_format(
output,
wrapped_params.params,
inp_data,
inp_dataset_collections,
input_ext,
python_template_version=tool.python_template_version,
)
create_datasets = True
dataset = None
if completed_job:
for output_dataset in completed_job.output_datasets:
if output_dataset.name == name:
create_datasets = False
completed_data = output_dataset.dataset
dataset = output_dataset.dataset.dataset
break
data = app.model.HistoryDatasetAssociation(
extension=ext,
dataset=dataset,
create_dataset=create_datasets,
flush=False,
)
if create_datasets:
from_work_dir = output.from_work_dir
if from_work_dir is not None:
data.dataset.created_from_basename = os.path.basename(from_work_dir)
if hidden is None:
hidden = output.hidden
if (
not hidden and dataset_collection_elements is not None
): # Mapping over a collection - hide datasets
hidden = True
if hidden:
data.visible = False
if (
dataset_collection_elements is not None
and name in dataset_collection_elements
):
dataset_collection_elements[name].hda = data
trans.sa_session.add(data)
if not completed_job:
trans.app.security_agent.set_all_dataset_permissions(
data.dataset, output_permissions, new=True
)
data.copy_tags_to(preserved_tags)
if (
not completed_job
and trans.app.config.legacy_eager_objectstore_initialization
):
# Must flush before setting object store id currently.
trans.sa_session.flush()
object_store_populator.set_object_store_id(data)
# This may not be neccesary with the new parent/child associations
data.designation = name
# Copy metadata from one of the inputs if requested.
# metadata source can be either a string referencing an input
# or an actual object to copy.
metadata_source = output.metadata_source
if metadata_source:
if isinstance(metadata_source, string_types):
metadata_source = inp_data.get(metadata_source)
if metadata_source is not None:
data.init_meta(copy_from=metadata_source)
else:
data.init_meta()
# Take dbkey from LAST input
data.dbkey = str(input_dbkey)
# Set state
if completed_job:
data.blurb = completed_data.blurb
data.peek = completed_data.peek
data._metadata = completed_data._metadata
else:
data.blurb = "queued"
# Set output label
data.name = self.get_output_name(
output,
data,
tool,
on_text,
trans,
incoming,
history,
wrapped_params.params,
job_params,
)
# Store output
out_data[name] = data
if output.actions:
# Apply pre-job tool-output-dataset actions; e.g. setting metadata, changing format
output_action_params = dict(out_data)
output_action_params.update(incoming)
output.actions.apply_action(data, output_action_params)
# Also set the default values of actions of type metadata
self.set_metadata_defaults(
output,
data,
tool,
on_text,
trans,
incoming,
history,
wrapped_params.params,
job_params,
)
# Flush all datasets at once.
return data
for name, output in tool.outputs.items():
if not filter_output(output, incoming):
handle_output_timer = ExecutionTimer()
if output.collection:
collections_manager = app.dataset_collections_service
element_identifiers = []
known_outputs = output.known_outputs(
input_collections, collections_manager.type_registry
)
created_element_datasets = []
# Just to echo TODO elsewhere - this should be restructured to allow
# nested collections.
for output_part_def in known_outputs:
# Add elements to top-level collection, unless nested...
current_element_identifiers = element_identifiers
current_collection_type = output.structure.collection_type
for parent_id in output_part_def.parent_ids or []:
# TODO: replace following line with formal abstractions for doing this.
current_collection_type = ":".join(
current_collection_type.split(":")[1:]
)
name_to_index = dict(
(value["name"], index)
for (index, value) in enumerate(current_element_identifiers)
)
if parent_id not in name_to_index:
if parent_id not in current_element_identifiers:
index = len(current_element_identifiers)
current_element_identifiers.append(
dict(
name=parent_id,
collection_type=current_collection_type,
src="new_collection",
element_identifiers=[],
)
)
else:
index = name_to_index[parent_id]
current_element_identifiers = current_element_identifiers[
index
]["element_identifiers"]
effective_output_name = output_part_def.effective_output_name
element = handle_output(
effective_output_name, output_part_def.output_def, hidden=True
)
created_element_datasets.append(element)
# TODO: this shouldn't exist in the top-level of the history at all
# but for now we are still working around that by hiding the contents
# there.
# Following hack causes dataset to no be added to history...
child_dataset_names.add(effective_output_name)
trans.sa_session.add(element)
current_element_identifiers.append(
{
"__object__": element,
"name": output_part_def.element_identifier,
}
)
history.add_datasets(
trans.sa_session,
created_element_datasets,
set_hid=set_output_hid,
quota=False,
flush=True,
)
if output.dynamic_structure:
assert not element_identifiers # known_outputs must have been empty
element_kwds = dict(
elements=collections_manager.ELEMENTS_UNINITIALIZED
)
else:
element_kwds = dict(element_identifiers=element_identifiers)
output_collections.create_collection(
output=output, name=name, **element_kwds
)
log.info(
"Handled collection output named %s for tool %s %s"
% (name, tool.id, handle_output_timer)
)
else:
handle_output(name, output)
log.info(
"Handled output named %s for tool %s %s"
% (name, tool.id, handle_output_timer)
)
add_datasets_timer = ExecutionTimer()
# Add all the top-level (non-child) datasets to the history unless otherwise specified
datasets_to_persist = []
for name, data in out_data.items():
if (
name not in child_dataset_names and name not in incoming
): # don't add children; or already existing datasets, i.e. async created
datasets_to_persist.append(data)
# Set HID and add to history.
# This is brand new and certainly empty so don't worry about quota.
history.add_datasets(
trans.sa_session,
datasets_to_persist,
set_hid=set_output_hid,
quota=False,
flush=False,
)
# Add all the children to their parents
for parent_name, child_name in parent_to_child_pairs:
parent_dataset = out_data[parent_name]
child_dataset = out_data[child_name]
parent_dataset.children.append(child_dataset)
log.info("Added output datasets to history %s" % add_datasets_timer)
job_setup_timer = ExecutionTimer()
# Create the job object
job, galaxy_session = self._new_job_for_session(trans, tool, history)
self._record_inputs(trans, tool, job, incoming, inp_data, inp_dataset_collections)
self._record_outputs(job, out_data, output_collections)
job.object_store_id = object_store_populator.object_store_id
if job_params:
job.params = dumps(job_params)
if completed_job:
job.set_copied_from_job_id(completed_job.id)
trans.sa_session.add(job)
# Now that we have a job id, we can remap any outputs if this is a rerun and the user chose to continue dependent jobs
# This functionality requires tracking jobs in the database.
if app.config.track_jobs_in_database and rerun_remap_job_id is not None:
self._remap_job_on_rerun(
trans=trans,
galaxy_session=galaxy_session,
rerun_remap_job_id=rerun_remap_job_id,
current_job=job,
out_data=out_data,
)
log.info(
"Setup for job %s complete, ready to be enqueued %s"
% (job.log_str(), job_setup_timer)
)
# Some tools are not really executable, but jobs are still created for them ( for record keeping ).
# Examples include tools that redirect to other applications ( epigraph ). These special tools must
# include something that can be retrieved from the params ( e.g., REDIRECT_URL ) to keep the job
# from being queued.
if "REDIRECT_URL" in incoming:
# Get the dataset - there should only be 1
for name in inp_data.keys():
dataset = inp_data[name]
redirect_url = tool.parse_redirect_url(dataset, incoming)
# GALAXY_URL should be include in the tool params to enable the external application
# to send back to the current Galaxy instance
GALAXY_URL = incoming.get("GALAXY_URL", None)
assert GALAXY_URL is not None, "GALAXY_URL parameter missing in tool config."
redirect_url += "&GALAXY_URL=%s" % GALAXY_URL
# Job should not be queued, so set state to ok
job.set_state(app.model.Job.states.OK)
job.info = "Redirected to: %s" % redirect_url
trans.sa_session.add(job)
trans.sa_session.flush()
trans.response.send_redirect(
url_for(
controller="tool_runner", action="redirect", redirect_url=redirect_url
)
)
else:
# Dispatch to a job handler. enqueue() is responsible for flushing the job
app.job_manager.enqueue(job, tool=tool)
trans.log_event(
"Added job to the job queue, id: %s" % str(job.id), tool_id=job.tool_id
)
return job, out_data
|
https://github.com/galaxyproject/galaxy/issues/9241
|
2020-01-20T10:34:14.9786332Z 2020-01-20 10:34:14,974 ERROR [galaxy.tools] Exception caught while attempting tool execution:
2020-01-20T10:34:14.9787128Z Traceback (most recent call last):
2020-01-20T10:34:14.9789375Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1246, in _execute_context
2020-01-20T10:34:14.9790073Z cursor, statement, parameters, context
2020-01-20T10:34:14.9790998Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 581, in do_execute
2020-01-20T10:34:14.9791567Z cursor.execute(statement, parameters)
2020-01-20T10:34:14.9792168Z psycopg2.errors.UndefinedFunction: operator does not exist: integer = boolean
2020-01-20T10:34:14.9792461Z LINE 3: WHERE history_dataset_association.id = true
2020-01-20T10:34:14.9792893Z ^
2020-01-20T10:34:14.9793157Z HINT: No operator matches the given name and argument types. You might need to add explicit type casts.
2020-01-20T10:34:14.9793379Z
2020-01-20T10:34:14.9793559Z
2020-01-20T10:34:14.9793800Z The above exception was the direct cause of the following exception:
2020-01-20T10:34:14.9793992Z
2020-01-20T10:34:14.9794231Z Traceback (most recent call last):
2020-01-20T10:34:14.9794979Z File "/tmp/tmpvahl86j6/galaxy-dev/lib/galaxy/tools/__init__.py", line 1517, in handle_single_execution
2020-01-20T10:34:14.9803112Z collection_info=collection_info,
2020-01-20T10:34:14.9804371Z File "/tmp/tmpvahl86j6/galaxy-dev/lib/galaxy/tools/__init__.py", line 1599, in execute
2020-01-20T10:34:14.9845469Z return self.tool_action.execute(self, trans, incoming=incoming, set_output_hid=set_output_hid, history=history, **kwargs)
2020-01-20T10:34:14.9846453Z File "/tmp/tmpvahl86j6/galaxy-dev/lib/galaxy/tools/actions/__init__.py", line 517, in execute
2020-01-20T10:34:14.9846652Z handle_output(name, output)
2020-01-20T10:34:14.9847218Z File "/tmp/tmpvahl86j6/galaxy-dev/lib/galaxy/tools/actions/__init__.py", line 374, in handle_output
2020-01-20T10:34:14.9901226Z data = trans.sa_session.query(app.model.HistoryDatasetAssociation).get(dataid)
2020-01-20T10:34:14.9902192Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 982, in get
2020-01-20T10:34:14.9902563Z return self._get_impl(ident, loading.load_on_pk_identity)
2020-01-20T10:34:14.9903210Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 1094, in _get_impl
2020-01-20T10:34:14.9903870Z return db_load_fn(self, primary_key_identity)
2020-01-20T10:34:14.9904659Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/orm/loading.py", line 284, in load_on_pk_identity
2020-01-20T10:34:14.9905130Z return q.one()
2020-01-20T10:34:14.9905712Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3325, in one
2020-01-20T10:34:14.9906010Z ret = self.one_or_none()
2020-01-20T10:34:14.9906608Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3294, in one_or_none
2020-01-20T10:34:14.9906934Z ret = list(self)
2020-01-20T10:34:14.9907533Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3367, in __iter__
2020-01-20T10:34:14.9908680Z return self._execute_and_instances(context)
2020-01-20T10:34:14.9909655Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3392, in _execute_and_instances
2020-01-20T10:34:14.9910213Z result = conn.execute(querycontext.statement, self._params)
2020-01-20T10:34:14.9910856Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 982, in execute
2020-01-20T10:34:14.9911149Z return meth(self, multiparams, params)
2020-01-20T10:34:14.9911929Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/sql/elements.py", line 287, in _execute_on_connection
2020-01-20T10:34:14.9912190Z return connection._execute_clauseelement(self, multiparams, params)
2020-01-20T10:34:14.9913022Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1101, in _execute_clauseelement
2020-01-20T10:34:14.9913329Z distilled_params,
2020-01-20T10:34:14.9914298Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1250, in _execute_context
2020-01-20T10:34:14.9914575Z e, statement, parameters, cursor, context
2020-01-20T10:34:14.9915355Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1476, in _handle_dbapi_exception
2020-01-20T10:34:14.9915624Z util.raise_from_cause(sqlalchemy_exception, exc_info)
2020-01-20T10:34:14.9916183Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 398, in raise_from_cause
2020-01-20T10:34:14.9916453Z reraise(type(exception), exception, tb=exc_tb, cause=cause)
2020-01-20T10:34:14.9917093Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 152, in reraise
2020-01-20T10:34:14.9918037Z raise value.with_traceback(tb)
2020-01-20T10:34:14.9918720Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1246, in _execute_context
2020-01-20T10:34:14.9919011Z cursor, statement, parameters, context
2020-01-20T10:34:14.9919626Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 581, in do_execute
2020-01-20T10:34:14.9919938Z cursor.execute(statement, parameters)
2020-01-20T10:34:14.9920164Z sqlalchemy.exc.ProgrammingError: (psycopg2.errors.UndefinedFunction) operator does not exist: integer = boolean
2020-01-20T10:34:14.9920430Z LINE 3: WHERE history_dataset_association.id = true
2020-01-20T10:34:14.9920667Z ^
2020-01-20T10:34:14.9920906Z HINT: No operator matches the given name and argument types. You might need to add explicit type casts.
2020-01-20T10:34:14.9921594Z
2020-01-20T10:34:14.9923082Z [SQL: SELECT history_dataset_association.id AS history_dataset_association_id, history_dataset_association.history_id AS history_dataset_association_history_id, history_dataset_association.dataset_id AS history_dataset_association_dataset_id, history_dataset_association.create_time AS history_dataset_association_create_time, history_dataset_association.update_time AS history_dataset_association_update_time, history_dataset_association.state AS history_dataset_association_state, history_dataset_association.copied_from_history_dataset_association_id AS history_dataset_association_copied_from_history_dataset_a_1, history_dataset_association.copied_from_library_dataset_dataset_association_id AS history_dataset_association_copied_from_library_dataset_d_2, history_dataset_association.name AS history_dataset_association_name, history_dataset_association.info AS history_dataset_association_info, history_dataset_association.blurb AS history_dataset_association_blurb, history_dataset_association.peek AS history_dataset_association_peek, history_dataset_association.tool_version AS history_dataset_association_tool_version, history_dataset_association.extension AS history_dataset_association_extension, history_dataset_association.parent_id AS history_dataset_association_parent_id, history_dataset_association.designation AS history_dataset_association_designation, history_dataset_association.deleted AS history_dataset_association_deleted, history_dataset_association.visible AS history_dataset_association_visible, history_dataset_association.extended_metadata_id AS history_dataset_association_extended_metadata_id, history_dataset_association.version AS history_dataset_association_version, history_dataset_association.hid AS history_dataset_association_hid, history_dataset_association.purged AS history_dataset_association_purged, history_dataset_association.validated_state AS history_dataset_association_validated_state, history_dataset_association.validated_state_message AS history_dataset_association_validated_state_message, history_dataset_association.hidden_beneath_collection_instance_id AS history_dataset_association_hidden_beneath_collection_ins_3, dataset_1.id AS dataset_1_id, dataset_1.create_time AS dataset_1_create_time, dataset_1.update_time AS dataset_1_update_time, dataset_1.state AS dataset_1_state, dataset_1.deleted AS dataset_1_deleted, dataset_1.purged AS dataset_1_purged, dataset_1.purgable AS dataset_1_purgable, dataset_1.object_store_id AS dataset_1_object_store_id, dataset_1.external_filename AS dataset_1_external_filename, dataset_1._extra_files_path AS dataset_1__extra_files_path, dataset_1.created_from_basename AS dataset_1_created_from_basename, dataset_1.file_size AS dataset_1_file_size, dataset_1.total_size AS dataset_1_total_size, dataset_1.uuid AS dataset_1_uuid
2020-01-20T10:34:14.9924421Z FROM history_dataset_association LEFT OUTER JOIN dataset AS dataset_1 ON dataset_1.id = history_dataset_association.dataset_id
2020-01-20T10:34:14.9925146Z WHERE history_dataset_association.id = %(param_1)s]
2020-01-20T10:34:14.9925764Z [parameters: {'param_1': True}]
2020-01-20T10:34:14.9926632Z (Background on this error at: http://sqlalche.me/e/f405)
2020-01-20T10:34:14.9927166Z 2020-01-20 10:34:14,978 WARNI [galaxy.tools.execute] There was a failure executing a job for tool [disco] - Error executing tool: (psycopg2.errors.UndefinedFunction) operator does not exist: integer = boolean
2020-01-20T10:34:14.9927664Z LINE 3: WHERE history_dataset_association.id = true
2020-01-20T10:34:14.9927807Z ^
2020-01-20T10:34:14.9928361Z HINT: No operator matches the given name and argument types. You might need to add explicit type casts.
2020-01-20T10:34:14.9928441Z
2020-01-20T10:34:14.9929588Z [SQL: SELECT history_dataset_association.id AS history_dataset_association_id, history_dataset_association.history_id AS history_dataset_association_history_id, history_dataset_association.dataset_id AS history_dataset_association_dataset_id, history_dataset_association.create_time AS history_dataset_association_create_time, history_dataset_association.update_time AS history_dataset_association_update_time, history_dataset_association.state AS history_dataset_association_state, history_dataset_association.copied_from_history_dataset_association_id AS history_dataset_association_copied_from_history_dataset_a_1, history_dataset_association.copied_from_library_dataset_dataset_association_id AS history_dataset_association_copied_from_library_dataset_d_2, history_dataset_association.name AS history_dataset_association_name, history_dataset_association.info AS history_dataset_association_info, history_dataset_association.blurb AS history_dataset_association_blurb, history_dataset_association.peek AS history_dataset_association_peek, history_dataset_association.tool_version AS history_dataset_association_tool_version, history_dataset_association.extension AS history_dataset_association_extension, history_dataset_association.parent_id AS history_dataset_association_parent_id, history_dataset_association.designation AS history_dataset_association_designation, history_dataset_association.deleted AS history_dataset_association_deleted, history_dataset_association.visible AS history_dataset_association_visible, history_dataset_association.extended_metadata_id AS history_dataset_association_extended_metadata_id, history_dataset_association.version AS history_dataset_association_version, history_dataset_association.hid AS history_dataset_association_hid, history_dataset_association.purged AS history_dataset_association_purged, history_dataset_association.validated_state AS history_dataset_association_validated_state, history_dataset_association.validated_state_message AS history_dataset_association_validated_state_message, history_dataset_association.hidden_beneath_collection_instance_id AS history_dataset_association_hidden_beneath_collection_ins_3, dataset_1.id AS dataset_1_id, dataset_1.create_time AS dataset_1_create_time, dataset_1.update_time AS dataset_1_update_time, dataset_1.state AS dataset_1_state, dataset_1.deleted AS dataset_1_deleted, dataset_1.purged AS dataset_1_purged, dataset_1.purgable AS dataset_1_purgable, dataset_1.object_store_id AS dataset_1_object_store_id, dataset_1.external_filename AS dataset_1_external_filename, dataset_1._extra_files_path AS dataset_1__extra_files_path, dataset_1.created_from_basename AS dataset_1_created_from_basename, dataset_1.file_size AS dataset_1_file_size, dataset_1.total_size AS dataset_1_total_size, dataset_1.uuid AS dataset_1_uuid
2020-01-20T10:34:14.9930717Z FROM history_dataset_association LEFT OUTER JOIN dataset AS dataset_1 ON dataset_1.id = history_dataset_association.dataset_id
2020-01-20T10:34:14.9930866Z WHERE history_dataset_association.id = %(param_1)s]
2020-01-20T10:34:14.9931266Z [parameters: {'param_1': True}]
2020-01-20T10:34:14.9931395Z (Background on this error at: http://sqlalche.me/e/f405)
2020-01-20T10:34:14.9931685Z ERROR
|
sqlalchemy.exc.ProgrammingError
|
def handle_output(name, output, hidden=None):
if output.parent:
parent_to_child_pairs.append((output.parent, name))
child_dataset_names.add(name)
if async_tool and name in incoming:
# HACK: output data has already been created as a result of the async controller
dataid = incoming[name]
data = trans.sa_session.query(app.model.HistoryDatasetAssociation).get(dataid)
assert data is not None
out_data[name] = data
else:
ext = determine_output_format(
output,
wrapped_params.params,
inp_data,
inp_dataset_collections,
input_ext,
python_template_version=tool.python_template_version,
)
create_datasets = True
dataset = None
if completed_job:
for output_dataset in completed_job.output_datasets:
if output_dataset.name == name:
create_datasets = False
completed_data = output_dataset.dataset
dataset = output_dataset.dataset.dataset
break
data = app.model.HistoryDatasetAssociation(
extension=ext, dataset=dataset, create_dataset=create_datasets, flush=False
)
if create_datasets:
from_work_dir = output.from_work_dir
if from_work_dir is not None:
data.dataset.created_from_basename = os.path.basename(from_work_dir)
if hidden is None:
hidden = output.hidden
if (
not hidden and dataset_collection_elements is not None
): # Mapping over a collection - hide datasets
hidden = True
if hidden:
data.visible = False
if (
dataset_collection_elements is not None
and name in dataset_collection_elements
):
dataset_collection_elements[name].hda = data
trans.sa_session.add(data)
if not completed_job:
trans.app.security_agent.set_all_dataset_permissions(
data.dataset, output_permissions, new=True
)
data.copy_tags_to(preserved_tags)
if not completed_job and trans.app.config.legacy_eager_objectstore_initialization:
# Must flush before setting object store id currently.
trans.sa_session.flush()
object_store_populator.set_object_store_id(data)
# This may not be neccesary with the new parent/child associations
data.designation = name
# Copy metadata from one of the inputs if requested.
# metadata source can be either a string referencing an input
# or an actual object to copy.
metadata_source = output.metadata_source
if metadata_source:
if isinstance(metadata_source, string_types):
metadata_source = inp_data.get(metadata_source)
if metadata_source is not None:
data.init_meta(copy_from=metadata_source)
else:
data.init_meta()
# Take dbkey from LAST input
data.dbkey = str(input_dbkey)
# Set state
if completed_job:
data.blurb = completed_data.blurb
data.peek = completed_data.peek
data._metadata = completed_data._metadata
else:
data.blurb = "queued"
# Set output label
data.name = self.get_output_name(
output,
data,
tool,
on_text,
trans,
incoming,
history,
wrapped_params.params,
job_params,
)
# Store output
out_data[name] = data
if output.actions:
# Apply pre-job tool-output-dataset actions; e.g. setting metadata, changing format
output_action_params = dict(out_data)
output_action_params.update(incoming)
output.actions.apply_action(data, output_action_params)
# Also set the default values of actions of type metadata
self.set_metadata_defaults(
output,
data,
tool,
on_text,
trans,
incoming,
history,
wrapped_params.params,
job_params,
)
# Flush all datasets at once.
return data
|
def handle_output(name, output, hidden=None):
if output.parent:
parent_to_child_pairs.append((output.parent, name))
child_dataset_names.add(name)
# What is the following hack for? Need to document under what
# conditions can the following occur? (james@bx.psu.edu)
# HACK: the output data has already been created
# this happens i.e. as a result of the async controller
if name in incoming:
dataid = incoming[name]
data = trans.sa_session.query(app.model.HistoryDatasetAssociation).get(dataid)
assert data is not None
out_data[name] = data
else:
ext = determine_output_format(
output,
wrapped_params.params,
inp_data,
inp_dataset_collections,
input_ext,
python_template_version=tool.python_template_version,
)
create_datasets = True
dataset = None
if completed_job:
for output_dataset in completed_job.output_datasets:
if output_dataset.name == name:
create_datasets = False
completed_data = output_dataset.dataset
dataset = output_dataset.dataset.dataset
break
data = app.model.HistoryDatasetAssociation(
extension=ext, dataset=dataset, create_dataset=create_datasets, flush=False
)
if create_datasets:
from_work_dir = output.from_work_dir
if from_work_dir is not None:
data.dataset.created_from_basename = os.path.basename(from_work_dir)
if hidden is None:
hidden = output.hidden
if (
not hidden and dataset_collection_elements is not None
): # Mapping over a collection - hide datasets
hidden = True
if hidden:
data.visible = False
if (
dataset_collection_elements is not None
and name in dataset_collection_elements
):
dataset_collection_elements[name].hda = data
trans.sa_session.add(data)
if not completed_job:
trans.app.security_agent.set_all_dataset_permissions(
data.dataset, output_permissions, new=True
)
data.copy_tags_to(preserved_tags)
if not completed_job and trans.app.config.legacy_eager_objectstore_initialization:
# Must flush before setting object store id currently.
trans.sa_session.flush()
object_store_populator.set_object_store_id(data)
# This may not be neccesary with the new parent/child associations
data.designation = name
# Copy metadata from one of the inputs if requested.
# metadata source can be either a string referencing an input
# or an actual object to copy.
metadata_source = output.metadata_source
if metadata_source:
if isinstance(metadata_source, string_types):
metadata_source = inp_data.get(metadata_source)
if metadata_source is not None:
data.init_meta(copy_from=metadata_source)
else:
data.init_meta()
# Take dbkey from LAST input
data.dbkey = str(input_dbkey)
# Set state
if completed_job:
data.blurb = completed_data.blurb
data.peek = completed_data.peek
data._metadata = completed_data._metadata
else:
data.blurb = "queued"
# Set output label
data.name = self.get_output_name(
output,
data,
tool,
on_text,
trans,
incoming,
history,
wrapped_params.params,
job_params,
)
# Store output
out_data[name] = data
if output.actions:
# Apply pre-job tool-output-dataset actions; e.g. setting metadata, changing format
output_action_params = dict(out_data)
output_action_params.update(incoming)
output.actions.apply_action(data, output_action_params)
# Also set the default values of actions of type metadata
self.set_metadata_defaults(
output,
data,
tool,
on_text,
trans,
incoming,
history,
wrapped_params.params,
job_params,
)
# Flush all datasets at once.
return data
|
https://github.com/galaxyproject/galaxy/issues/9241
|
2020-01-20T10:34:14.9786332Z 2020-01-20 10:34:14,974 ERROR [galaxy.tools] Exception caught while attempting tool execution:
2020-01-20T10:34:14.9787128Z Traceback (most recent call last):
2020-01-20T10:34:14.9789375Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1246, in _execute_context
2020-01-20T10:34:14.9790073Z cursor, statement, parameters, context
2020-01-20T10:34:14.9790998Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 581, in do_execute
2020-01-20T10:34:14.9791567Z cursor.execute(statement, parameters)
2020-01-20T10:34:14.9792168Z psycopg2.errors.UndefinedFunction: operator does not exist: integer = boolean
2020-01-20T10:34:14.9792461Z LINE 3: WHERE history_dataset_association.id = true
2020-01-20T10:34:14.9792893Z ^
2020-01-20T10:34:14.9793157Z HINT: No operator matches the given name and argument types. You might need to add explicit type casts.
2020-01-20T10:34:14.9793379Z
2020-01-20T10:34:14.9793559Z
2020-01-20T10:34:14.9793800Z The above exception was the direct cause of the following exception:
2020-01-20T10:34:14.9793992Z
2020-01-20T10:34:14.9794231Z Traceback (most recent call last):
2020-01-20T10:34:14.9794979Z File "/tmp/tmpvahl86j6/galaxy-dev/lib/galaxy/tools/__init__.py", line 1517, in handle_single_execution
2020-01-20T10:34:14.9803112Z collection_info=collection_info,
2020-01-20T10:34:14.9804371Z File "/tmp/tmpvahl86j6/galaxy-dev/lib/galaxy/tools/__init__.py", line 1599, in execute
2020-01-20T10:34:14.9845469Z return self.tool_action.execute(self, trans, incoming=incoming, set_output_hid=set_output_hid, history=history, **kwargs)
2020-01-20T10:34:14.9846453Z File "/tmp/tmpvahl86j6/galaxy-dev/lib/galaxy/tools/actions/__init__.py", line 517, in execute
2020-01-20T10:34:14.9846652Z handle_output(name, output)
2020-01-20T10:34:14.9847218Z File "/tmp/tmpvahl86j6/galaxy-dev/lib/galaxy/tools/actions/__init__.py", line 374, in handle_output
2020-01-20T10:34:14.9901226Z data = trans.sa_session.query(app.model.HistoryDatasetAssociation).get(dataid)
2020-01-20T10:34:14.9902192Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 982, in get
2020-01-20T10:34:14.9902563Z return self._get_impl(ident, loading.load_on_pk_identity)
2020-01-20T10:34:14.9903210Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 1094, in _get_impl
2020-01-20T10:34:14.9903870Z return db_load_fn(self, primary_key_identity)
2020-01-20T10:34:14.9904659Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/orm/loading.py", line 284, in load_on_pk_identity
2020-01-20T10:34:14.9905130Z return q.one()
2020-01-20T10:34:14.9905712Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3325, in one
2020-01-20T10:34:14.9906010Z ret = self.one_or_none()
2020-01-20T10:34:14.9906608Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3294, in one_or_none
2020-01-20T10:34:14.9906934Z ret = list(self)
2020-01-20T10:34:14.9907533Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3367, in __iter__
2020-01-20T10:34:14.9908680Z return self._execute_and_instances(context)
2020-01-20T10:34:14.9909655Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3392, in _execute_and_instances
2020-01-20T10:34:14.9910213Z result = conn.execute(querycontext.statement, self._params)
2020-01-20T10:34:14.9910856Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 982, in execute
2020-01-20T10:34:14.9911149Z return meth(self, multiparams, params)
2020-01-20T10:34:14.9911929Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/sql/elements.py", line 287, in _execute_on_connection
2020-01-20T10:34:14.9912190Z return connection._execute_clauseelement(self, multiparams, params)
2020-01-20T10:34:14.9913022Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1101, in _execute_clauseelement
2020-01-20T10:34:14.9913329Z distilled_params,
2020-01-20T10:34:14.9914298Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1250, in _execute_context
2020-01-20T10:34:14.9914575Z e, statement, parameters, cursor, context
2020-01-20T10:34:14.9915355Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1476, in _handle_dbapi_exception
2020-01-20T10:34:14.9915624Z util.raise_from_cause(sqlalchemy_exception, exc_info)
2020-01-20T10:34:14.9916183Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 398, in raise_from_cause
2020-01-20T10:34:14.9916453Z reraise(type(exception), exception, tb=exc_tb, cause=cause)
2020-01-20T10:34:14.9917093Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 152, in reraise
2020-01-20T10:34:14.9918037Z raise value.with_traceback(tb)
2020-01-20T10:34:14.9918720Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1246, in _execute_context
2020-01-20T10:34:14.9919011Z cursor, statement, parameters, context
2020-01-20T10:34:14.9919626Z File "/home/runner/.planemo/gx_venv_3.7_release_20.01/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 581, in do_execute
2020-01-20T10:34:14.9919938Z cursor.execute(statement, parameters)
2020-01-20T10:34:14.9920164Z sqlalchemy.exc.ProgrammingError: (psycopg2.errors.UndefinedFunction) operator does not exist: integer = boolean
2020-01-20T10:34:14.9920430Z LINE 3: WHERE history_dataset_association.id = true
2020-01-20T10:34:14.9920667Z ^
2020-01-20T10:34:14.9920906Z HINT: No operator matches the given name and argument types. You might need to add explicit type casts.
2020-01-20T10:34:14.9921594Z
2020-01-20T10:34:14.9923082Z [SQL: SELECT history_dataset_association.id AS history_dataset_association_id, history_dataset_association.history_id AS history_dataset_association_history_id, history_dataset_association.dataset_id AS history_dataset_association_dataset_id, history_dataset_association.create_time AS history_dataset_association_create_time, history_dataset_association.update_time AS history_dataset_association_update_time, history_dataset_association.state AS history_dataset_association_state, history_dataset_association.copied_from_history_dataset_association_id AS history_dataset_association_copied_from_history_dataset_a_1, history_dataset_association.copied_from_library_dataset_dataset_association_id AS history_dataset_association_copied_from_library_dataset_d_2, history_dataset_association.name AS history_dataset_association_name, history_dataset_association.info AS history_dataset_association_info, history_dataset_association.blurb AS history_dataset_association_blurb, history_dataset_association.peek AS history_dataset_association_peek, history_dataset_association.tool_version AS history_dataset_association_tool_version, history_dataset_association.extension AS history_dataset_association_extension, history_dataset_association.parent_id AS history_dataset_association_parent_id, history_dataset_association.designation AS history_dataset_association_designation, history_dataset_association.deleted AS history_dataset_association_deleted, history_dataset_association.visible AS history_dataset_association_visible, history_dataset_association.extended_metadata_id AS history_dataset_association_extended_metadata_id, history_dataset_association.version AS history_dataset_association_version, history_dataset_association.hid AS history_dataset_association_hid, history_dataset_association.purged AS history_dataset_association_purged, history_dataset_association.validated_state AS history_dataset_association_validated_state, history_dataset_association.validated_state_message AS history_dataset_association_validated_state_message, history_dataset_association.hidden_beneath_collection_instance_id AS history_dataset_association_hidden_beneath_collection_ins_3, dataset_1.id AS dataset_1_id, dataset_1.create_time AS dataset_1_create_time, dataset_1.update_time AS dataset_1_update_time, dataset_1.state AS dataset_1_state, dataset_1.deleted AS dataset_1_deleted, dataset_1.purged AS dataset_1_purged, dataset_1.purgable AS dataset_1_purgable, dataset_1.object_store_id AS dataset_1_object_store_id, dataset_1.external_filename AS dataset_1_external_filename, dataset_1._extra_files_path AS dataset_1__extra_files_path, dataset_1.created_from_basename AS dataset_1_created_from_basename, dataset_1.file_size AS dataset_1_file_size, dataset_1.total_size AS dataset_1_total_size, dataset_1.uuid AS dataset_1_uuid
2020-01-20T10:34:14.9924421Z FROM history_dataset_association LEFT OUTER JOIN dataset AS dataset_1 ON dataset_1.id = history_dataset_association.dataset_id
2020-01-20T10:34:14.9925146Z WHERE history_dataset_association.id = %(param_1)s]
2020-01-20T10:34:14.9925764Z [parameters: {'param_1': True}]
2020-01-20T10:34:14.9926632Z (Background on this error at: http://sqlalche.me/e/f405)
2020-01-20T10:34:14.9927166Z 2020-01-20 10:34:14,978 WARNI [galaxy.tools.execute] There was a failure executing a job for tool [disco] - Error executing tool: (psycopg2.errors.UndefinedFunction) operator does not exist: integer = boolean
2020-01-20T10:34:14.9927664Z LINE 3: WHERE history_dataset_association.id = true
2020-01-20T10:34:14.9927807Z ^
2020-01-20T10:34:14.9928361Z HINT: No operator matches the given name and argument types. You might need to add explicit type casts.
2020-01-20T10:34:14.9928441Z
2020-01-20T10:34:14.9929588Z [SQL: SELECT history_dataset_association.id AS history_dataset_association_id, history_dataset_association.history_id AS history_dataset_association_history_id, history_dataset_association.dataset_id AS history_dataset_association_dataset_id, history_dataset_association.create_time AS history_dataset_association_create_time, history_dataset_association.update_time AS history_dataset_association_update_time, history_dataset_association.state AS history_dataset_association_state, history_dataset_association.copied_from_history_dataset_association_id AS history_dataset_association_copied_from_history_dataset_a_1, history_dataset_association.copied_from_library_dataset_dataset_association_id AS history_dataset_association_copied_from_library_dataset_d_2, history_dataset_association.name AS history_dataset_association_name, history_dataset_association.info AS history_dataset_association_info, history_dataset_association.blurb AS history_dataset_association_blurb, history_dataset_association.peek AS history_dataset_association_peek, history_dataset_association.tool_version AS history_dataset_association_tool_version, history_dataset_association.extension AS history_dataset_association_extension, history_dataset_association.parent_id AS history_dataset_association_parent_id, history_dataset_association.designation AS history_dataset_association_designation, history_dataset_association.deleted AS history_dataset_association_deleted, history_dataset_association.visible AS history_dataset_association_visible, history_dataset_association.extended_metadata_id AS history_dataset_association_extended_metadata_id, history_dataset_association.version AS history_dataset_association_version, history_dataset_association.hid AS history_dataset_association_hid, history_dataset_association.purged AS history_dataset_association_purged, history_dataset_association.validated_state AS history_dataset_association_validated_state, history_dataset_association.validated_state_message AS history_dataset_association_validated_state_message, history_dataset_association.hidden_beneath_collection_instance_id AS history_dataset_association_hidden_beneath_collection_ins_3, dataset_1.id AS dataset_1_id, dataset_1.create_time AS dataset_1_create_time, dataset_1.update_time AS dataset_1_update_time, dataset_1.state AS dataset_1_state, dataset_1.deleted AS dataset_1_deleted, dataset_1.purged AS dataset_1_purged, dataset_1.purgable AS dataset_1_purgable, dataset_1.object_store_id AS dataset_1_object_store_id, dataset_1.external_filename AS dataset_1_external_filename, dataset_1._extra_files_path AS dataset_1__extra_files_path, dataset_1.created_from_basename AS dataset_1_created_from_basename, dataset_1.file_size AS dataset_1_file_size, dataset_1.total_size AS dataset_1_total_size, dataset_1.uuid AS dataset_1_uuid
2020-01-20T10:34:14.9930717Z FROM history_dataset_association LEFT OUTER JOIN dataset AS dataset_1 ON dataset_1.id = history_dataset_association.dataset_id
2020-01-20T10:34:14.9930866Z WHERE history_dataset_association.id = %(param_1)s]
2020-01-20T10:34:14.9931266Z [parameters: {'param_1': True}]
2020-01-20T10:34:14.9931395Z (Background on this error at: http://sqlalche.me/e/f405)
2020-01-20T10:34:14.9931685Z ERROR
|
sqlalchemy.exc.ProgrammingError
|
def get_shed_config_dict(self, app):
"""
Return the in-memory version of the shed_tool_conf file, which is stored in the config_elems entry
in the shed_tool_conf_dict.
"""
def _is_valid_shed_config_filename(filename):
for shed_tool_conf_dict in app.toolbox.dynamic_confs(
include_migrated_tool_conf=True
):
if filename == shed_tool_conf_dict["config_filename"]:
return True
return False
if not self.shed_config_filename or not _is_valid_shed_config_filename(
self.shed_config_filename
):
return self.guess_shed_config(app)
for shed_tool_conf_dict in app.toolbox.dynamic_confs(
include_migrated_tool_conf=True
):
if self.shed_config_filename == shed_tool_conf_dict["config_filename"]:
return shed_tool_conf_dict
return {}
|
def get_shed_config_dict(self, app, default=None):
"""
Return the in-memory version of the shed_tool_conf file, which is stored in the config_elems entry
in the shed_tool_conf_dict.
"""
def _is_valid_shed_config_filename(filename):
for shed_tool_conf_dict in app.toolbox.dynamic_confs(
include_migrated_tool_conf=True
):
if filename == shed_tool_conf_dict["config_filename"]:
return True
return False
if not self.shed_config_filename or not _is_valid_shed_config_filename(
self.shed_config_filename
):
self.guess_shed_config(app, default=default)
if self.shed_config_filename:
for shed_tool_conf_dict in app.toolbox.dynamic_confs(
include_migrated_tool_conf=True
):
if self.shed_config_filename == shed_tool_conf_dict["config_filename"]:
return shed_tool_conf_dict
return default
|
https://github.com/galaxyproject/galaxy/issues/8952
|
Nov 09 13:22:10 sn04.bi.uni-freiburg.de uwsgi[4107748]: [pid: 4124485|app: 0|req: 9911/42155] 88.66.136.83 () {84 vars in 1575 bytes} [Sat Nov 9 13:22:10 2019] POST /admin_toolshed/repository_installation_status_updates => generated 2 bytes in 23 msecs (HTTP/1.1 200) 3 headers in 96 bytes (1 switches on core 0)
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.datatypes.registry DEBUG 2019-11-09 13:22:11,317 [p:119216,w:4,m:0] [Thread-1] Loaded external metadata tool: __SET_METADATA__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,323 [p:119216,w:4,m:0] [Thread-1] Loaded history import tool: __IMPORT_HISTORY__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,327 [p:119216,w:4,m:0] [Thread-1] Loaded data fetch tool: __DATA_FETCH__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,330 [p:119216,w:4,m:0] [Thread-1] Loaded history export tool: __EXPORT_HISTORY__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker INFO 2019-11-09 13:22:11,332 [p:119216,w:4,m:0] [Thread-1] Queuing async task rebuild_toolbox_search_index for main.web.4.
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker DEBUG 2019-11-09 13:22:11,336 [p:119216,w:4,m:0] [Thread-1] Toolbox reload (29743.348 ms)
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker INFO 2019-11-09 13:22:11,338 [p:119216,w:4,m:0] [Thread-1] Instance 'main.web.4' received 'reload_toolbox' task, executing now.
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker DEBUG 2019-11-09 13:22:11,338 [p:119216,w:4,m:0] [Thread-1] Executing toolbox reload on 'main.web.4'
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: 115.146.92.55 - - [09/Nov/2019:13:22:11 +0200] "GET /api/jobs/4d16a9b9d521c9b3?key=59001665ead9d0bfd82dad0c0253dda0 HTTP/1.1" 200 - "-" "python-requests/2.22.0"
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: [pid: 3882852|app: 0|req: 11155/55634] 115.146.92.55 () {70 vars in 992 bytes} [Sat Nov 9 13:22:11 2019] GET /api/jobs/4d16a9b9d521c9b3?key=59001665ead9d0bfd82dad0c0253dda0 => generated 1118 bytes in 88 msecs (HTTP/1.1 200) 2 headers in 110 bytes (1 switches on core 0)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: galaxy.tools.search DEBUG 2019-11-09 13:22:12,036 [p:228435,w:4,m:0] [Thread-1] Toolbox index finished (29660.792 ms)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.util ERROR 2019-11-09 13:22:12,537 [p:119216,w:4,m:0] [Thread-1] Error parsing file /opt/galaxy/mutable-config/integrated_tool_panel.xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: Traceback (most recent call last):
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/util/__init__.py", line 236, in parse_xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: root = tree.parse(fname, parser=ElementTree.XMLParser(target=DoctypeSafeCallbackTarget()))
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 657, in parse
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._root = parser.close()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1654, in close
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._raiseerror(v)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: raise err
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: ParseError: unclosed token: line 10952, column 8
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker ERROR 2019-11-09 13:22:12,538 [p:119216,w:4,m:0] [Thread-1] Error running control task type: reload_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: Traceback (most recent call last):
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 372, in process_task
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: result = f(self.app, **body['kwargs'])
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 171, in reload_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: _get_new_toolbox(app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 188, in _get_new_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: new_toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/__init__.py", line 253, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: app=app,
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 1163, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: super(BaseGalaxyToolBox, self).__init__(config_filenames, tool_root_dir, app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 100, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._init_integrated_tool_panel(app.config)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/integrated_panel.py", line 39, in _init_integrated_tool_panel
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._load_integrated_tool_panel_keys()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 445, in _load_integrated_tool_panel_keys
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: tree = parse_xml(self._integrated_tool_panel_config)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/util/__init__.py", line 236, in parse_xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: root = tree.parse(fname, parser=ElementTree.XMLParser(target=DoctypeSafeCallbackTarget()))
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 657, in parse
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._root = parser.close()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1654, in close
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._raiseerror(v)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: raise err
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: ParseError: unclosed token: line 10952, column 8
|
ParseError
|
def guess_shed_config(self, app):
tool_ids = []
for tool in self.metadata.get("tools", []):
tool_ids.append(tool.get("guid"))
for shed_tool_conf_dict in app.toolbox.dynamic_confs(
include_migrated_tool_conf=True
):
name = shed_tool_conf_dict["config_filename"]
for elem in shed_tool_conf_dict["config_elems"]:
if elem.tag == "tool":
for sub_elem in elem.findall("id"):
tool_id = sub_elem.text.strip()
if tool_id in tool_ids:
self.shed_config_filename = name
return shed_tool_conf_dict
elif elem.tag == "section":
for tool_elem in elem.findall("tool"):
for sub_elem in tool_elem.findall("id"):
tool_id = sub_elem.text.strip()
if tool_id in tool_ids:
self.shed_config_filename = name
return shed_tool_conf_dict
# We need to search by file paths here, which is less desirable.
tool_shed = common_util.remove_protocol_and_port_from_tool_shed_url(self.tool_shed)
for shed_tool_conf_dict in app.toolbox.dynamic_confs(
include_migrated_tool_conf=True
):
tool_path = shed_tool_conf_dict["tool_path"]
relative_path = os.path.join(
tool_path, tool_shed, "repos", self.owner, self.name
)
if os.path.exists(relative_path):
self.shed_config_filename = shed_tool_conf_dict["config_filename"]
return shed_tool_conf_dict
return {}
|
def guess_shed_config(self, app, default=None):
tool_ids = []
for tool in self.metadata.get("tools", []):
tool_ids.append(tool.get("guid"))
for shed_tool_conf_dict in app.toolbox.dynamic_confs(
include_migrated_tool_conf=True
):
name = shed_tool_conf_dict["config_filename"]
for elem in shed_tool_conf_dict["config_elems"]:
if elem.tag == "tool":
for sub_elem in elem.findall("id"):
tool_id = sub_elem.text.strip()
if tool_id in tool_ids:
self.shed_config_filename = name
return shed_tool_conf_dict
elif elem.tag == "section":
for tool_elem in elem.findall("tool"):
for sub_elem in tool_elem.findall("id"):
tool_id = sub_elem.text.strip()
if tool_id in tool_ids:
self.shed_config_filename = name
return shed_tool_conf_dict
if self.includes_datatypes or self.includes_data_managers:
# We need to search by file paths here, which is less desirable.
tool_shed = common_util.remove_protocol_and_port_from_tool_shed_url(
self.tool_shed
)
for shed_tool_conf_dict in app.toolbox.dynamic_confs(
include_migrated_tool_conf=True
):
tool_path = shed_tool_conf_dict["tool_path"]
relative_path = os.path.join(
tool_path,
tool_shed,
"repos",
self.owner,
self.name,
self.installed_changeset_revision,
)
if os.path.exists(relative_path):
self.shed_config_filename = shed_tool_conf_dict["config_filename"]
return shed_tool_conf_dict
return default
|
https://github.com/galaxyproject/galaxy/issues/8952
|
Nov 09 13:22:10 sn04.bi.uni-freiburg.de uwsgi[4107748]: [pid: 4124485|app: 0|req: 9911/42155] 88.66.136.83 () {84 vars in 1575 bytes} [Sat Nov 9 13:22:10 2019] POST /admin_toolshed/repository_installation_status_updates => generated 2 bytes in 23 msecs (HTTP/1.1 200) 3 headers in 96 bytes (1 switches on core 0)
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.datatypes.registry DEBUG 2019-11-09 13:22:11,317 [p:119216,w:4,m:0] [Thread-1] Loaded external metadata tool: __SET_METADATA__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,323 [p:119216,w:4,m:0] [Thread-1] Loaded history import tool: __IMPORT_HISTORY__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,327 [p:119216,w:4,m:0] [Thread-1] Loaded data fetch tool: __DATA_FETCH__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,330 [p:119216,w:4,m:0] [Thread-1] Loaded history export tool: __EXPORT_HISTORY__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker INFO 2019-11-09 13:22:11,332 [p:119216,w:4,m:0] [Thread-1] Queuing async task rebuild_toolbox_search_index for main.web.4.
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker DEBUG 2019-11-09 13:22:11,336 [p:119216,w:4,m:0] [Thread-1] Toolbox reload (29743.348 ms)
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker INFO 2019-11-09 13:22:11,338 [p:119216,w:4,m:0] [Thread-1] Instance 'main.web.4' received 'reload_toolbox' task, executing now.
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker DEBUG 2019-11-09 13:22:11,338 [p:119216,w:4,m:0] [Thread-1] Executing toolbox reload on 'main.web.4'
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: 115.146.92.55 - - [09/Nov/2019:13:22:11 +0200] "GET /api/jobs/4d16a9b9d521c9b3?key=59001665ead9d0bfd82dad0c0253dda0 HTTP/1.1" 200 - "-" "python-requests/2.22.0"
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: [pid: 3882852|app: 0|req: 11155/55634] 115.146.92.55 () {70 vars in 992 bytes} [Sat Nov 9 13:22:11 2019] GET /api/jobs/4d16a9b9d521c9b3?key=59001665ead9d0bfd82dad0c0253dda0 => generated 1118 bytes in 88 msecs (HTTP/1.1 200) 2 headers in 110 bytes (1 switches on core 0)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: galaxy.tools.search DEBUG 2019-11-09 13:22:12,036 [p:228435,w:4,m:0] [Thread-1] Toolbox index finished (29660.792 ms)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.util ERROR 2019-11-09 13:22:12,537 [p:119216,w:4,m:0] [Thread-1] Error parsing file /opt/galaxy/mutable-config/integrated_tool_panel.xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: Traceback (most recent call last):
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/util/__init__.py", line 236, in parse_xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: root = tree.parse(fname, parser=ElementTree.XMLParser(target=DoctypeSafeCallbackTarget()))
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 657, in parse
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._root = parser.close()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1654, in close
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._raiseerror(v)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: raise err
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: ParseError: unclosed token: line 10952, column 8
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker ERROR 2019-11-09 13:22:12,538 [p:119216,w:4,m:0] [Thread-1] Error running control task type: reload_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: Traceback (most recent call last):
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 372, in process_task
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: result = f(self.app, **body['kwargs'])
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 171, in reload_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: _get_new_toolbox(app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 188, in _get_new_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: new_toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/__init__.py", line 253, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: app=app,
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 1163, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: super(BaseGalaxyToolBox, self).__init__(config_filenames, tool_root_dir, app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 100, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._init_integrated_tool_panel(app.config)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/integrated_panel.py", line 39, in _init_integrated_tool_panel
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._load_integrated_tool_panel_keys()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 445, in _load_integrated_tool_panel_keys
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: tree = parse_xml(self._integrated_tool_panel_config)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/util/__init__.py", line 236, in parse_xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: root = tree.parse(fname, parser=ElementTree.XMLParser(target=DoctypeSafeCallbackTarget()))
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 657, in parse
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._root = parser.close()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1654, in close
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._raiseerror(v)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: raise err
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: ParseError: unclosed token: line 10952, column 8
|
ParseError
|
def install(self, tool_shed_url, name, owner, changeset_revision, install_options):
# Get all of the information necessary for installing the repository from the specified tool shed.
repository_revision_dict, repo_info_dicts = self.__get_install_info_from_tool_shed(
tool_shed_url, name, owner, changeset_revision
)
if changeset_revision != repository_revision_dict["changeset_revision"]:
# Demanded installation of a non-installable revision. Stop here if repository already installed.
repo = repository_util.get_installed_repository(
app=self.app,
tool_shed=tool_shed_url,
name=name,
owner=owner,
changeset_revision=repository_revision_dict["changeset_revision"],
)
if repo and repo.is_installed:
# Repo installed. Returning empty list indicated repo already installed.
return []
installed_tool_shed_repositories = self.__initiate_and_install_repositories(
tool_shed_url, repository_revision_dict, repo_info_dicts, install_options
)
return installed_tool_shed_repositories
|
def install(self, tool_shed_url, name, owner, changeset_revision, install_options):
# Get all of the information necessary for installing the repository from the specified tool shed.
repository_revision_dict, repo_info_dicts = self.__get_install_info_from_tool_shed(
tool_shed_url, name, owner, changeset_revision
)
installed_tool_shed_repositories = self.__initiate_and_install_repositories(
tool_shed_url, repository_revision_dict, repo_info_dicts, install_options
)
return installed_tool_shed_repositories
|
https://github.com/galaxyproject/galaxy/issues/8952
|
Nov 09 13:22:10 sn04.bi.uni-freiburg.de uwsgi[4107748]: [pid: 4124485|app: 0|req: 9911/42155] 88.66.136.83 () {84 vars in 1575 bytes} [Sat Nov 9 13:22:10 2019] POST /admin_toolshed/repository_installation_status_updates => generated 2 bytes in 23 msecs (HTTP/1.1 200) 3 headers in 96 bytes (1 switches on core 0)
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.datatypes.registry DEBUG 2019-11-09 13:22:11,317 [p:119216,w:4,m:0] [Thread-1] Loaded external metadata tool: __SET_METADATA__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,323 [p:119216,w:4,m:0] [Thread-1] Loaded history import tool: __IMPORT_HISTORY__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,327 [p:119216,w:4,m:0] [Thread-1] Loaded data fetch tool: __DATA_FETCH__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,330 [p:119216,w:4,m:0] [Thread-1] Loaded history export tool: __EXPORT_HISTORY__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker INFO 2019-11-09 13:22:11,332 [p:119216,w:4,m:0] [Thread-1] Queuing async task rebuild_toolbox_search_index for main.web.4.
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker DEBUG 2019-11-09 13:22:11,336 [p:119216,w:4,m:0] [Thread-1] Toolbox reload (29743.348 ms)
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker INFO 2019-11-09 13:22:11,338 [p:119216,w:4,m:0] [Thread-1] Instance 'main.web.4' received 'reload_toolbox' task, executing now.
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker DEBUG 2019-11-09 13:22:11,338 [p:119216,w:4,m:0] [Thread-1] Executing toolbox reload on 'main.web.4'
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: 115.146.92.55 - - [09/Nov/2019:13:22:11 +0200] "GET /api/jobs/4d16a9b9d521c9b3?key=59001665ead9d0bfd82dad0c0253dda0 HTTP/1.1" 200 - "-" "python-requests/2.22.0"
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: [pid: 3882852|app: 0|req: 11155/55634] 115.146.92.55 () {70 vars in 992 bytes} [Sat Nov 9 13:22:11 2019] GET /api/jobs/4d16a9b9d521c9b3?key=59001665ead9d0bfd82dad0c0253dda0 => generated 1118 bytes in 88 msecs (HTTP/1.1 200) 2 headers in 110 bytes (1 switches on core 0)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: galaxy.tools.search DEBUG 2019-11-09 13:22:12,036 [p:228435,w:4,m:0] [Thread-1] Toolbox index finished (29660.792 ms)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.util ERROR 2019-11-09 13:22:12,537 [p:119216,w:4,m:0] [Thread-1] Error parsing file /opt/galaxy/mutable-config/integrated_tool_panel.xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: Traceback (most recent call last):
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/util/__init__.py", line 236, in parse_xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: root = tree.parse(fname, parser=ElementTree.XMLParser(target=DoctypeSafeCallbackTarget()))
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 657, in parse
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._root = parser.close()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1654, in close
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._raiseerror(v)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: raise err
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: ParseError: unclosed token: line 10952, column 8
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker ERROR 2019-11-09 13:22:12,538 [p:119216,w:4,m:0] [Thread-1] Error running control task type: reload_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: Traceback (most recent call last):
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 372, in process_task
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: result = f(self.app, **body['kwargs'])
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 171, in reload_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: _get_new_toolbox(app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 188, in _get_new_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: new_toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/__init__.py", line 253, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: app=app,
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 1163, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: super(BaseGalaxyToolBox, self).__init__(config_filenames, tool_root_dir, app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 100, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._init_integrated_tool_panel(app.config)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/integrated_panel.py", line 39, in _init_integrated_tool_panel
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._load_integrated_tool_panel_keys()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 445, in _load_integrated_tool_panel_keys
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: tree = parse_xml(self._integrated_tool_panel_config)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/util/__init__.py", line 236, in parse_xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: root = tree.parse(fname, parser=ElementTree.XMLParser(target=DoctypeSafeCallbackTarget()))
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 657, in parse
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._root = parser.close()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1654, in close
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._raiseerror(v)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: raise err
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: ParseError: unclosed token: line 10952, column 8
|
ParseError
|
def __initiate_and_install_repositories(
self, tool_shed_url, repository_revision_dict, repo_info_dicts, install_options
):
try:
has_repository_dependencies = repository_revision_dict[
"has_repository_dependencies"
]
except KeyError:
raise exceptions.InternalServerError(
"Tool shed response missing required parameter 'has_repository_dependencies'."
)
try:
includes_tools = repository_revision_dict["includes_tools"]
except KeyError:
raise exceptions.InternalServerError(
"Tool shed response missing required parameter 'includes_tools'."
)
try:
includes_tool_dependencies = repository_revision_dict[
"includes_tool_dependencies"
]
except KeyError:
raise exceptions.InternalServerError(
"Tool shed response missing required parameter 'includes_tool_dependencies'."
)
try:
includes_tools_for_display_in_tool_panel = repository_revision_dict[
"includes_tools_for_display_in_tool_panel"
]
except KeyError:
raise exceptions.InternalServerError(
"Tool shed response missing required parameter 'includes_tools_for_display_in_tool_panel'."
)
# Get the information about the Galaxy components (e.g., tool panel section, tool config file, etc) that will contain the repository information.
install_repository_dependencies = install_options.get(
"install_repository_dependencies", False
)
install_resolver_dependencies = install_options.get(
"install_resolver_dependencies", False
)
install_tool_dependencies = install_options.get("install_tool_dependencies", False)
if install_tool_dependencies:
self.__assert_can_install_dependencies()
new_tool_panel_section_label = install_options.get(
"new_tool_panel_section_label", ""
)
tool_panel_section_mapping = install_options.get("tool_panel_section_mapping", {})
shed_tool_conf = install_options.get("shed_tool_conf", None)
if shed_tool_conf:
# Get the tool_path setting.
shed_conf_dict = self.tpm.get_shed_tool_conf_dict(shed_tool_conf)
tool_path = shed_conf_dict["tool_path"]
else:
# Don't use migrated_tools_conf.xml and prefer shed_tool_config_file.
try:
for shed_config_dict in self.app.toolbox.dynamic_confs(
include_migrated_tool_conf=False
):
if (
shed_config_dict.get("config_filename")
== self.app.config.shed_tool_config_file
):
break
else:
shed_config_dict = self.app.toolbox.dynamic_confs(
include_migrated_tool_conf=False
)[0]
except IndexError:
raise exceptions.RequestParameterMissingException(
"Missing required parameter 'shed_tool_conf'."
)
shed_tool_conf = shed_config_dict["config_filename"]
tool_path = shed_config_dict["tool_path"]
tool_panel_section_id = self.app.toolbox.find_section_id(
install_options.get("tool_panel_section_id", "")
)
# Build the dictionary of information necessary for creating tool_shed_repository database records
# for each repository being installed.
installation_dict = dict(
install_repository_dependencies=install_repository_dependencies,
new_tool_panel_section_label=new_tool_panel_section_label,
tool_panel_section_mapping=tool_panel_section_mapping,
no_changes_checked=False,
repo_info_dicts=repo_info_dicts,
tool_panel_section_id=tool_panel_section_id,
tool_path=tool_path,
tool_shed_url=tool_shed_url,
)
# Create the tool_shed_repository database records and gather additional information for repository installation.
(
created_or_updated_tool_shed_repositories,
tool_panel_section_keys,
repo_info_dicts,
filtered_repo_info_dicts,
) = self.handle_tool_shed_repositories(installation_dict)
if created_or_updated_tool_shed_repositories:
# Build the dictionary of information necessary for installing the repositories.
installation_dict = dict(
created_or_updated_tool_shed_repositories=created_or_updated_tool_shed_repositories,
filtered_repo_info_dicts=filtered_repo_info_dicts,
has_repository_dependencies=has_repository_dependencies,
includes_tool_dependencies=includes_tool_dependencies,
includes_tools=includes_tools,
includes_tools_for_display_in_tool_panel=includes_tools_for_display_in_tool_panel,
install_repository_dependencies=install_repository_dependencies,
install_resolver_dependencies=install_resolver_dependencies,
install_tool_dependencies=install_tool_dependencies,
message="",
new_tool_panel_section_label=new_tool_panel_section_label,
shed_tool_conf=shed_tool_conf,
status="done",
tool_panel_section_id=tool_panel_section_id,
tool_panel_section_keys=tool_panel_section_keys,
tool_panel_section_mapping=tool_panel_section_mapping,
tool_path=tool_path,
tool_shed_url=tool_shed_url,
)
# Prepare the repositories for installation. Even though this
# method receives a single combination of tool_shed_url, name,
# owner and changeset_revision, there may be multiple repositories
# for installation at this point because repository dependencies
# may have added additional repositories for installation along
# with the single specified repository.
encoded_kwd, query, tool_shed_repositories, encoded_repository_ids = (
self.initiate_repository_installation(installation_dict)
)
# Some repositories may have repository dependencies that are
# required to be installed before the dependent repository, so
# we'll order the list of tsr_ids to ensure all repositories
# install in the required order.
tsr_ids = [
self.app.security.encode_id(tool_shed_repository.id)
for tool_shed_repository in tool_shed_repositories
]
decoded_kwd = dict(
shed_tool_conf=shed_tool_conf,
tool_path=tool_path,
tool_panel_section_keys=tool_panel_section_keys,
repo_info_dicts=filtered_repo_info_dicts,
install_resolver_dependencies=install_resolver_dependencies,
install_tool_dependencies=install_tool_dependencies,
tool_panel_section_mapping=tool_panel_section_mapping,
)
return self.install_repositories(
tsr_ids, decoded_kwd, reinstalling=False, install_options=install_options
)
|
def __initiate_and_install_repositories(
self, tool_shed_url, repository_revision_dict, repo_info_dicts, install_options
):
try:
has_repository_dependencies = repository_revision_dict[
"has_repository_dependencies"
]
except KeyError:
raise exceptions.InternalServerError(
"Tool shed response missing required parameter 'has_repository_dependencies'."
)
try:
includes_tools = repository_revision_dict["includes_tools"]
except KeyError:
raise exceptions.InternalServerError(
"Tool shed response missing required parameter 'includes_tools'."
)
try:
includes_tool_dependencies = repository_revision_dict[
"includes_tool_dependencies"
]
except KeyError:
raise exceptions.InternalServerError(
"Tool shed response missing required parameter 'includes_tool_dependencies'."
)
try:
includes_tools_for_display_in_tool_panel = repository_revision_dict[
"includes_tools_for_display_in_tool_panel"
]
except KeyError:
raise exceptions.InternalServerError(
"Tool shed response missing required parameter 'includes_tools_for_display_in_tool_panel'."
)
# Get the information about the Galaxy components (e.g., tool pane section, tool config file, etc) that will contain the repository information.
install_repository_dependencies = install_options.get(
"install_repository_dependencies", False
)
install_resolver_dependencies = install_options.get(
"install_resolver_dependencies", False
)
install_tool_dependencies = install_options.get("install_tool_dependencies", False)
if install_tool_dependencies:
self.__assert_can_install_dependencies()
new_tool_panel_section_label = install_options.get(
"new_tool_panel_section_label", ""
)
tool_panel_section_mapping = install_options.get("tool_panel_section_mapping", {})
shed_tool_conf = install_options.get("shed_tool_conf", None)
if shed_tool_conf:
# Get the tool_path setting.
shed_conf_dict = self.tpm.get_shed_tool_conf_dict(shed_tool_conf)
tool_path = shed_conf_dict["tool_path"]
else:
# Don't use migrated_tools_conf.xml.
try:
shed_config_dict = self.app.toolbox.dynamic_confs(
include_migrated_tool_conf=False
)[0]
except IndexError:
raise exceptions.RequestParameterMissingException(
"Missing required parameter 'shed_tool_conf'."
)
shed_tool_conf = shed_config_dict["config_filename"]
tool_path = shed_config_dict["tool_path"]
tool_panel_section_id = self.app.toolbox.find_section_id(
install_options.get("tool_panel_section_id", "")
)
# Build the dictionary of information necessary for creating tool_shed_repository database records
# for each repository being installed.
installation_dict = dict(
install_repository_dependencies=install_repository_dependencies,
new_tool_panel_section_label=new_tool_panel_section_label,
tool_panel_section_mapping=tool_panel_section_mapping,
no_changes_checked=False,
repo_info_dicts=repo_info_dicts,
tool_panel_section_id=tool_panel_section_id,
tool_path=tool_path,
tool_shed_url=tool_shed_url,
)
# Create the tool_shed_repository database records and gather additional information for repository installation.
(
created_or_updated_tool_shed_repositories,
tool_panel_section_keys,
repo_info_dicts,
filtered_repo_info_dicts,
) = self.handle_tool_shed_repositories(installation_dict)
if created_or_updated_tool_shed_repositories:
# Build the dictionary of information necessary for installing the repositories.
installation_dict = dict(
created_or_updated_tool_shed_repositories=created_or_updated_tool_shed_repositories,
filtered_repo_info_dicts=filtered_repo_info_dicts,
has_repository_dependencies=has_repository_dependencies,
includes_tool_dependencies=includes_tool_dependencies,
includes_tools=includes_tools,
includes_tools_for_display_in_tool_panel=includes_tools_for_display_in_tool_panel,
install_repository_dependencies=install_repository_dependencies,
install_resolver_dependencies=install_resolver_dependencies,
install_tool_dependencies=install_tool_dependencies,
message="",
new_tool_panel_section_label=new_tool_panel_section_label,
shed_tool_conf=shed_tool_conf,
status="done",
tool_panel_section_id=tool_panel_section_id,
tool_panel_section_keys=tool_panel_section_keys,
tool_panel_section_mapping=tool_panel_section_mapping,
tool_path=tool_path,
tool_shed_url=tool_shed_url,
)
# Prepare the repositories for installation. Even though this
# method receives a single combination of tool_shed_url, name,
# owner and changeset_revision, there may be multiple repositories
# for installation at this point because repository dependencies
# may have added additional repositories for installation along
# with the single specified repository.
encoded_kwd, query, tool_shed_repositories, encoded_repository_ids = (
self.initiate_repository_installation(installation_dict)
)
# Some repositories may have repository dependencies that are
# required to be installed before the dependent repository, so
# we'll order the list of tsr_ids to ensure all repositories
# install in the required order.
tsr_ids = [
self.app.security.encode_id(tool_shed_repository.id)
for tool_shed_repository in tool_shed_repositories
]
decoded_kwd = dict(
shed_tool_conf=shed_tool_conf,
tool_path=tool_path,
tool_panel_section_keys=tool_panel_section_keys,
repo_info_dicts=filtered_repo_info_dicts,
install_resolver_dependencies=install_resolver_dependencies,
install_tool_dependencies=install_tool_dependencies,
tool_panel_section_mapping=tool_panel_section_mapping,
)
return self.install_repositories(
tsr_ids, decoded_kwd, reinstalling=False, install_options=install_options
)
|
https://github.com/galaxyproject/galaxy/issues/8952
|
Nov 09 13:22:10 sn04.bi.uni-freiburg.de uwsgi[4107748]: [pid: 4124485|app: 0|req: 9911/42155] 88.66.136.83 () {84 vars in 1575 bytes} [Sat Nov 9 13:22:10 2019] POST /admin_toolshed/repository_installation_status_updates => generated 2 bytes in 23 msecs (HTTP/1.1 200) 3 headers in 96 bytes (1 switches on core 0)
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.datatypes.registry DEBUG 2019-11-09 13:22:11,317 [p:119216,w:4,m:0] [Thread-1] Loaded external metadata tool: __SET_METADATA__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,323 [p:119216,w:4,m:0] [Thread-1] Loaded history import tool: __IMPORT_HISTORY__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,327 [p:119216,w:4,m:0] [Thread-1] Loaded data fetch tool: __DATA_FETCH__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,330 [p:119216,w:4,m:0] [Thread-1] Loaded history export tool: __EXPORT_HISTORY__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker INFO 2019-11-09 13:22:11,332 [p:119216,w:4,m:0] [Thread-1] Queuing async task rebuild_toolbox_search_index for main.web.4.
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker DEBUG 2019-11-09 13:22:11,336 [p:119216,w:4,m:0] [Thread-1] Toolbox reload (29743.348 ms)
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker INFO 2019-11-09 13:22:11,338 [p:119216,w:4,m:0] [Thread-1] Instance 'main.web.4' received 'reload_toolbox' task, executing now.
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker DEBUG 2019-11-09 13:22:11,338 [p:119216,w:4,m:0] [Thread-1] Executing toolbox reload on 'main.web.4'
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: 115.146.92.55 - - [09/Nov/2019:13:22:11 +0200] "GET /api/jobs/4d16a9b9d521c9b3?key=59001665ead9d0bfd82dad0c0253dda0 HTTP/1.1" 200 - "-" "python-requests/2.22.0"
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: [pid: 3882852|app: 0|req: 11155/55634] 115.146.92.55 () {70 vars in 992 bytes} [Sat Nov 9 13:22:11 2019] GET /api/jobs/4d16a9b9d521c9b3?key=59001665ead9d0bfd82dad0c0253dda0 => generated 1118 bytes in 88 msecs (HTTP/1.1 200) 2 headers in 110 bytes (1 switches on core 0)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: galaxy.tools.search DEBUG 2019-11-09 13:22:12,036 [p:228435,w:4,m:0] [Thread-1] Toolbox index finished (29660.792 ms)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.util ERROR 2019-11-09 13:22:12,537 [p:119216,w:4,m:0] [Thread-1] Error parsing file /opt/galaxy/mutable-config/integrated_tool_panel.xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: Traceback (most recent call last):
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/util/__init__.py", line 236, in parse_xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: root = tree.parse(fname, parser=ElementTree.XMLParser(target=DoctypeSafeCallbackTarget()))
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 657, in parse
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._root = parser.close()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1654, in close
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._raiseerror(v)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: raise err
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: ParseError: unclosed token: line 10952, column 8
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker ERROR 2019-11-09 13:22:12,538 [p:119216,w:4,m:0] [Thread-1] Error running control task type: reload_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: Traceback (most recent call last):
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 372, in process_task
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: result = f(self.app, **body['kwargs'])
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 171, in reload_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: _get_new_toolbox(app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 188, in _get_new_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: new_toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/__init__.py", line 253, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: app=app,
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 1163, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: super(BaseGalaxyToolBox, self).__init__(config_filenames, tool_root_dir, app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 100, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._init_integrated_tool_panel(app.config)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/integrated_panel.py", line 39, in _init_integrated_tool_panel
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._load_integrated_tool_panel_keys()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 445, in _load_integrated_tool_panel_keys
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: tree = parse_xml(self._integrated_tool_panel_config)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/util/__init__.py", line 236, in parse_xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: root = tree.parse(fname, parser=ElementTree.XMLParser(target=DoctypeSafeCallbackTarget()))
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 657, in parse
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._root = parser.close()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1654, in close
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._raiseerror(v)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: raise err
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: ParseError: unclosed token: line 10952, column 8
|
ParseError
|
def install_tool_shed_repository(
self,
tool_shed_repository,
repo_info_dict,
tool_panel_section_key,
shed_tool_conf,
tool_path,
install_resolver_dependencies,
install_tool_dependencies,
reinstalling=False,
tool_panel_section_mapping={},
install_options=None,
):
self.app.install_model.context.flush()
if tool_panel_section_key:
_, tool_section = self.app.toolbox.get_section(tool_panel_section_key)
if tool_section is None:
log.debug(
'Invalid tool_panel_section_key "%s" specified. Tools will be loaded outside of sections in the tool panel.',
str(tool_panel_section_key),
)
else:
tool_section = None
if isinstance(repo_info_dict, string_types):
repo_info_dict = encoding_util.tool_shed_decode(repo_info_dict)
repo_info_tuple = repo_info_dict[tool_shed_repository.name]
(
description,
repository_clone_url,
changeset_revision,
ctx_rev,
repository_owner,
repository_dependencies,
tool_dependencies,
) = repo_info_tuple
if changeset_revision != tool_shed_repository.changeset_revision:
# This is an update
tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry(
self.app, tool_shed_repository.tool_shed
)
return self.update_tool_shed_repository(
tool_shed_repository,
tool_shed_url,
ctx_rev,
changeset_revision,
install_options=install_options,
)
# Clone the repository to the configured location.
self.update_tool_shed_repository_status(
tool_shed_repository,
self.install_model.ToolShedRepository.installation_status.CLONING,
)
relative_clone_dir = repository_util.generate_tool_shed_repository_install_dir(
repository_clone_url, tool_shed_repository.installed_changeset_revision
)
relative_install_dir = os.path.join(relative_clone_dir, tool_shed_repository.name)
install_dir = os.path.abspath(os.path.join(tool_path, relative_install_dir))
log.info(
"Cloning repository '%s' at %s:%s",
repository_clone_url,
ctx_rev,
tool_shed_repository.changeset_revision,
)
if os.path.exists(install_dir):
# May exist from a previous failed install attempt, just try updating instead of cloning.
hg_util.pull_repository(install_dir, repository_clone_url, ctx_rev)
hg_util.update_repository(install_dir, ctx_rev)
cloned_ok = True
else:
cloned_ok, error_message = hg_util.clone_repository(
repository_clone_url, install_dir, ctx_rev
)
if cloned_ok:
if reinstalling:
# Since we're reinstalling the repository we need to find the latest changeset revision to
# which it can be updated.
changeset_revision_dict = self.app.update_repository_manager.get_update_to_changeset_revision_and_ctx_rev(
tool_shed_repository
)
current_changeset_revision = changeset_revision_dict.get(
"changeset_revision", None
)
current_ctx_rev = changeset_revision_dict.get("ctx_rev", None)
if current_ctx_rev != ctx_rev:
repo_path = os.path.abspath(install_dir)
hg_util.pull_repository(
repo_path, repository_clone_url, current_changeset_revision
)
hg_util.update_repository(repo_path, ctx_rev=current_ctx_rev)
self.__handle_repository_contents(
tool_shed_repository=tool_shed_repository,
tool_path=tool_path,
repository_clone_url=repository_clone_url,
relative_install_dir=relative_install_dir,
tool_shed=tool_shed_repository.tool_shed,
tool_section=tool_section,
shed_tool_conf=shed_tool_conf,
reinstalling=reinstalling,
tool_panel_section_mapping=tool_panel_section_mapping,
)
metadata = tool_shed_repository.metadata
if "tools" in metadata and install_resolver_dependencies:
self.update_tool_shed_repository_status(
tool_shed_repository,
self.install_model.ToolShedRepository.installation_status.INSTALLING_TOOL_DEPENDENCIES,
)
new_tools = [
self.app.toolbox._tools_by_id.get(tool_d["guid"], None)
for tool_d in metadata["tools"]
]
new_requirements = set(
[tool.requirements.packages for tool in new_tools if tool]
)
[self._view.install_dependencies(r) for r in new_requirements]
dependency_manager = self.app.toolbox.dependency_manager
if dependency_manager.cached:
[dependency_manager.build_cache(r) for r in new_requirements]
if (
install_tool_dependencies
and tool_shed_repository.tool_dependencies
and "tool_dependencies" in metadata
):
work_dir = tempfile.mkdtemp(prefix="tmp-toolshed-itsr")
# Install tool dependencies.
self.update_tool_shed_repository_status(
tool_shed_repository,
self.install_model.ToolShedRepository.installation_status.INSTALLING_TOOL_DEPENDENCIES,
)
# Get the tool_dependencies.xml file from the repository.
tool_dependencies_config = hg_util.get_config_from_disk(
"tool_dependencies.xml", install_dir
)
itdm = InstallToolDependencyManager(self.app)
itdm.install_specified_tool_dependencies(
tool_shed_repository=tool_shed_repository,
tool_dependencies_config=tool_dependencies_config,
tool_dependencies=tool_shed_repository.tool_dependencies,
from_tool_migration_manager=False,
)
basic_util.remove_dir(work_dir)
self.update_tool_shed_repository_status(
tool_shed_repository,
self.install_model.ToolShedRepository.installation_status.INSTALLED,
)
if self.app.config.manage_dependency_relationships:
# Add the installed repository and any tool dependencies to the in-memory dictionaries
# in the installed_repository_manager.
self.app.installed_repository_manager.handle_repository_install(
tool_shed_repository
)
else:
# An error occurred while cloning the repository, so reset everything necessary to enable another attempt.
repository_util.set_repository_attributes(
self.app,
tool_shed_repository,
status=self.install_model.ToolShedRepository.installation_status.ERROR,
error_message=error_message,
deleted=False,
uninstalled=False,
remove_from_disk=True,
)
|
def install_tool_shed_repository(
self,
tool_shed_repository,
repo_info_dict,
tool_panel_section_key,
shed_tool_conf,
tool_path,
install_resolver_dependencies,
install_tool_dependencies,
reinstalling=False,
tool_panel_section_mapping={},
install_options=None,
):
self.app.install_model.context.flush()
if tool_panel_section_key:
_, tool_section = self.app.toolbox.get_section(tool_panel_section_key)
if tool_section is None:
log.debug(
'Invalid tool_panel_section_key "%s" specified. Tools will be loaded outside of sections in the tool panel.',
str(tool_panel_section_key),
)
else:
tool_section = None
if isinstance(repo_info_dict, string_types):
repo_info_dict = encoding_util.tool_shed_decode(repo_info_dict)
repo_info_tuple = repo_info_dict[tool_shed_repository.name]
(
description,
repository_clone_url,
changeset_revision,
ctx_rev,
repository_owner,
repository_dependencies,
tool_dependencies,
) = repo_info_tuple
if changeset_revision != tool_shed_repository.changeset_revision:
# This is an update
tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry(
self.app, tool_shed_repository.tool_shed
)
return self.update_tool_shed_repository(
tool_shed_repository,
tool_shed_url,
ctx_rev,
changeset_revision,
install_options=install_options,
)
# Clone the repository to the configured location.
self.update_tool_shed_repository_status(
tool_shed_repository,
self.install_model.ToolShedRepository.installation_status.CLONING,
)
relative_clone_dir = repository_util.generate_tool_shed_repository_install_dir(
repository_clone_url, tool_shed_repository.installed_changeset_revision
)
relative_install_dir = os.path.join(relative_clone_dir, tool_shed_repository.name)
install_dir = os.path.join(tool_path, relative_install_dir)
log.info(
"Cloning repository '%s' at %s:%s",
repository_clone_url,
ctx_rev,
tool_shed_repository.changeset_revision,
)
cloned_ok, error_message = hg_util.clone_repository(
repository_clone_url, os.path.abspath(install_dir), ctx_rev
)
if cloned_ok:
if reinstalling:
# Since we're reinstalling the repository we need to find the latest changeset revision to
# which it can be updated.
changeset_revision_dict = self.app.update_repository_manager.get_update_to_changeset_revision_and_ctx_rev(
tool_shed_repository
)
current_changeset_revision = changeset_revision_dict.get(
"changeset_revision", None
)
current_ctx_rev = changeset_revision_dict.get("ctx_rev", None)
if current_ctx_rev != ctx_rev:
repo_path = os.path.abspath(install_dir)
hg_util.pull_repository(
repo_path, repository_clone_url, current_changeset_revision
)
hg_util.update_repository(repo_path, ctx_rev=current_ctx_rev)
self.__handle_repository_contents(
tool_shed_repository=tool_shed_repository,
tool_path=tool_path,
repository_clone_url=repository_clone_url,
relative_install_dir=relative_install_dir,
tool_shed=tool_shed_repository.tool_shed,
tool_section=tool_section,
shed_tool_conf=shed_tool_conf,
reinstalling=reinstalling,
tool_panel_section_mapping=tool_panel_section_mapping,
)
metadata = tool_shed_repository.metadata
if "tools" in metadata and install_resolver_dependencies:
self.update_tool_shed_repository_status(
tool_shed_repository,
self.install_model.ToolShedRepository.installation_status.INSTALLING_TOOL_DEPENDENCIES,
)
new_tools = [
self.app.toolbox._tools_by_id.get(tool_d["guid"], None)
for tool_d in metadata["tools"]
]
new_requirements = set(
[tool.requirements.packages for tool in new_tools if tool]
)
[self._view.install_dependencies(r) for r in new_requirements]
dependency_manager = self.app.toolbox.dependency_manager
if dependency_manager.cached:
[dependency_manager.build_cache(r) for r in new_requirements]
if (
install_tool_dependencies
and tool_shed_repository.tool_dependencies
and "tool_dependencies" in metadata
):
work_dir = tempfile.mkdtemp(prefix="tmp-toolshed-itsr")
# Install tool dependencies.
self.update_tool_shed_repository_status(
tool_shed_repository,
self.install_model.ToolShedRepository.installation_status.INSTALLING_TOOL_DEPENDENCIES,
)
# Get the tool_dependencies.xml file from the repository.
tool_dependencies_config = hg_util.get_config_from_disk(
"tool_dependencies.xml", install_dir
)
itdm = InstallToolDependencyManager(self.app)
itdm.install_specified_tool_dependencies(
tool_shed_repository=tool_shed_repository,
tool_dependencies_config=tool_dependencies_config,
tool_dependencies=tool_shed_repository.tool_dependencies,
from_tool_migration_manager=False,
)
basic_util.remove_dir(work_dir)
self.update_tool_shed_repository_status(
tool_shed_repository,
self.install_model.ToolShedRepository.installation_status.INSTALLED,
)
if self.app.config.manage_dependency_relationships:
# Add the installed repository and any tool dependencies to the in-memory dictionaries
# in the installed_repository_manager.
self.app.installed_repository_manager.handle_repository_install(
tool_shed_repository
)
else:
# An error occurred while cloning the repository, so reset everything necessary to enable another attempt.
repository_util.set_repository_attributes(
self.app,
tool_shed_repository,
status=self.install_model.ToolShedRepository.installation_status.ERROR,
error_message=error_message,
deleted=False,
uninstalled=False,
remove_from_disk=True,
)
|
https://github.com/galaxyproject/galaxy/issues/8952
|
Nov 09 13:22:10 sn04.bi.uni-freiburg.de uwsgi[4107748]: [pid: 4124485|app: 0|req: 9911/42155] 88.66.136.83 () {84 vars in 1575 bytes} [Sat Nov 9 13:22:10 2019] POST /admin_toolshed/repository_installation_status_updates => generated 2 bytes in 23 msecs (HTTP/1.1 200) 3 headers in 96 bytes (1 switches on core 0)
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.datatypes.registry DEBUG 2019-11-09 13:22:11,317 [p:119216,w:4,m:0] [Thread-1] Loaded external metadata tool: __SET_METADATA__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,323 [p:119216,w:4,m:0] [Thread-1] Loaded history import tool: __IMPORT_HISTORY__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,327 [p:119216,w:4,m:0] [Thread-1] Loaded data fetch tool: __DATA_FETCH__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,330 [p:119216,w:4,m:0] [Thread-1] Loaded history export tool: __EXPORT_HISTORY__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker INFO 2019-11-09 13:22:11,332 [p:119216,w:4,m:0] [Thread-1] Queuing async task rebuild_toolbox_search_index for main.web.4.
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker DEBUG 2019-11-09 13:22:11,336 [p:119216,w:4,m:0] [Thread-1] Toolbox reload (29743.348 ms)
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker INFO 2019-11-09 13:22:11,338 [p:119216,w:4,m:0] [Thread-1] Instance 'main.web.4' received 'reload_toolbox' task, executing now.
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker DEBUG 2019-11-09 13:22:11,338 [p:119216,w:4,m:0] [Thread-1] Executing toolbox reload on 'main.web.4'
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: 115.146.92.55 - - [09/Nov/2019:13:22:11 +0200] "GET /api/jobs/4d16a9b9d521c9b3?key=59001665ead9d0bfd82dad0c0253dda0 HTTP/1.1" 200 - "-" "python-requests/2.22.0"
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: [pid: 3882852|app: 0|req: 11155/55634] 115.146.92.55 () {70 vars in 992 bytes} [Sat Nov 9 13:22:11 2019] GET /api/jobs/4d16a9b9d521c9b3?key=59001665ead9d0bfd82dad0c0253dda0 => generated 1118 bytes in 88 msecs (HTTP/1.1 200) 2 headers in 110 bytes (1 switches on core 0)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: galaxy.tools.search DEBUG 2019-11-09 13:22:12,036 [p:228435,w:4,m:0] [Thread-1] Toolbox index finished (29660.792 ms)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.util ERROR 2019-11-09 13:22:12,537 [p:119216,w:4,m:0] [Thread-1] Error parsing file /opt/galaxy/mutable-config/integrated_tool_panel.xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: Traceback (most recent call last):
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/util/__init__.py", line 236, in parse_xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: root = tree.parse(fname, parser=ElementTree.XMLParser(target=DoctypeSafeCallbackTarget()))
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 657, in parse
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._root = parser.close()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1654, in close
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._raiseerror(v)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: raise err
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: ParseError: unclosed token: line 10952, column 8
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker ERROR 2019-11-09 13:22:12,538 [p:119216,w:4,m:0] [Thread-1] Error running control task type: reload_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: Traceback (most recent call last):
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 372, in process_task
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: result = f(self.app, **body['kwargs'])
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 171, in reload_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: _get_new_toolbox(app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 188, in _get_new_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: new_toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/__init__.py", line 253, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: app=app,
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 1163, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: super(BaseGalaxyToolBox, self).__init__(config_filenames, tool_root_dir, app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 100, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._init_integrated_tool_panel(app.config)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/integrated_panel.py", line 39, in _init_integrated_tool_panel
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._load_integrated_tool_panel_keys()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 445, in _load_integrated_tool_panel_keys
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: tree = parse_xml(self._integrated_tool_panel_config)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/util/__init__.py", line 236, in parse_xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: root = tree.parse(fname, parser=ElementTree.XMLParser(target=DoctypeSafeCallbackTarget()))
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 657, in parse
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._root = parser.close()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1654, in close
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._raiseerror(v)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: raise err
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: ParseError: unclosed token: line 10952, column 8
|
ParseError
|
def update_tool_shed_repository(
self,
repository,
tool_shed_url,
latest_ctx_rev,
latest_changeset_revision,
install_new_dependencies=True,
install_options=None,
):
install_options = install_options or {}
original_metadata_dict = repository.metadata
original_repository_dependencies_dict = original_metadata_dict.get(
"repository_dependencies", {}
)
original_repository_dependencies = original_repository_dependencies_dict.get(
"repository_dependencies", []
)
original_tool_dependencies_dict = original_metadata_dict.get(
"tool_dependencies", {}
)
shed_tool_conf, tool_path, relative_install_dir = (
suc.get_tool_panel_config_tool_path_install_dir(self.app, repository)
)
if tool_path:
repo_files_dir = os.path.abspath(
os.path.join(tool_path, relative_install_dir, repository.name)
)
else:
repo_files_dir = os.path.abspath(
os.path.join(relative_install_dir, repository.name)
)
repository_clone_url = os.path.join(
tool_shed_url, "repos", repository.owner, repository.name
)
# Set a status, even though we're probably not cloning.
self.update_tool_shed_repository_status(
repository, status=repository.installation_status.CLONING
)
log.info(
"Updating repository '%s' to %s:%s",
repository.name,
latest_ctx_rev,
latest_changeset_revision,
)
if not os.path.exists(repo_files_dir):
log.debug(
"Repository directory '%s' does not exist, cloning repository instead of updating repository",
repo_files_dir,
)
hg_util.clone_repository(
repository_clone_url=repository_clone_url,
repository_file_dir=repo_files_dir,
ctx_rev=latest_ctx_rev,
)
hg_util.pull_repository(repo_files_dir, repository_clone_url, latest_ctx_rev)
hg_util.update_repository(repo_files_dir, latest_ctx_rev)
# Remove old Data Manager entries
if repository.includes_data_managers:
dmh = data_manager.DataManagerHandler(self.app)
dmh.remove_from_data_manager(repository)
# Update the repository metadata.
tpm = tool_panel_manager.ToolPanelManager(self.app)
irmm = InstalledRepositoryMetadataManager(
app=self.app,
tpm=tpm,
repository=repository,
changeset_revision=latest_changeset_revision,
repository_clone_url=repository_clone_url,
shed_config_dict=repository.get_shed_config_dict(self.app),
relative_install_dir=relative_install_dir,
repository_files_dir=None,
resetting_all_metadata_on_repository=False,
updating_installed_repository=True,
persist=True,
)
irmm.generate_metadata_for_changeset_revision()
irmm_metadata_dict = irmm.get_metadata_dict()
self.update_tool_shed_repository_status(
repository, status=repository.installation_status.INSTALLED
)
if "tools" in irmm_metadata_dict:
tool_panel_dict = irmm_metadata_dict.get("tool_panel_section", None)
if tool_panel_dict is None:
tool_panel_dict = tpm.generate_tool_panel_dict_from_shed_tool_conf_entries(
repository
)
repository_tools_tups = irmm.get_repository_tools_tups()
tpm.add_to_tool_panel(
repository_name=str(repository.name),
repository_clone_url=repository_clone_url,
changeset_revision=str(repository.installed_changeset_revision),
repository_tools_tups=repository_tools_tups,
owner=str(repository.owner),
shed_tool_conf=shed_tool_conf,
tool_panel_dict=tool_panel_dict,
new_install=False,
)
# Add new Data Manager entries
if "data_manager" in irmm_metadata_dict:
dmh = data_manager.DataManagerHandler(self.app)
dmh.install_data_managers(
self.app.config.shed_data_manager_config_file,
irmm_metadata_dict,
repository.get_shed_config_dict(self.app),
os.path.join(relative_install_dir, repository.name),
repository,
repository_tools_tups,
)
if (
"repository_dependencies" in irmm_metadata_dict
or "tool_dependencies" in irmm_metadata_dict
):
new_repository_dependencies_dict = irmm_metadata_dict.get(
"repository_dependencies", {}
)
new_repository_dependencies = new_repository_dependencies_dict.get(
"repository_dependencies", []
)
new_tool_dependencies_dict = irmm_metadata_dict.get("tool_dependencies", {})
if new_repository_dependencies:
# [[http://localhost:9009', package_picard_1_56_0', devteam', 910b0b056666', False', False']]
if new_repository_dependencies == original_repository_dependencies:
for new_repository_tup in new_repository_dependencies:
# Make sure all dependencies are installed.
# TODO: Repository dependencies that are not installed should be displayed to the user,
# giving them the option to install them or not. This is the same behavior as when initially
# installing and when re-installing.
(
new_tool_shed,
new_name,
new_owner,
new_changeset_revision,
new_pir,
new_oicct,
) = common_util.parse_repository_dependency_tuple(
new_repository_tup
)
# Mock up a repo_info_tupe that has the information needed to see if the repository dependency
# was previously installed.
repo_info_tuple = (
"",
new_tool_shed,
new_changeset_revision,
"",
new_owner,
[],
[],
)
# Since the value of new_changeset_revision came from a repository dependency
# definition, it may occur earlier in the Tool Shed's repository changelog than
# the Galaxy tool_shed_repository.installed_changeset_revision record value, so
# we set from_tip to True to make sure we get the entire set of changeset revisions
# from the Tool Shed.
new_repository_db_record, installed_changeset_revision = (
repository_util.repository_was_previously_installed(
self.app,
tool_shed_url,
new_name,
repo_info_tuple,
from_tip=True,
)
)
if (
new_repository_db_record
and new_repository_db_record.status
in [
self.install_model.ToolShedRepository.installation_status.ERROR,
self.install_model.ToolShedRepository.installation_status.NEW,
self.install_model.ToolShedRepository.installation_status.UNINSTALLED,
]
) or not new_repository_db_record:
log.debug(
"Update to %s contains new repository dependency %s/%s",
repository.name,
new_owner,
new_name,
)
if not install_new_dependencies:
return ("repository", irmm_metadata_dict)
else:
self.install(
tool_shed_url,
new_name,
new_owner,
new_changeset_revision,
install_options,
)
# Updates received did not include any newly defined repository dependencies but did include
# newly defined tool dependencies. If the newly defined tool dependencies are not the same
# as the originally defined tool dependencies, we need to install them.
if not install_new_dependencies:
for new_key, new_val in new_tool_dependencies_dict.items():
if new_key not in original_tool_dependencies_dict:
return ("tool", irmm_metadata_dict)
original_val = original_tool_dependencies_dict[new_key]
if new_val != original_val:
return ("tool", irmm_metadata_dict)
# Updates received did not include any newly defined repository dependencies or newly defined
# tool dependencies that need to be installed.
repository = self.app.update_repository_manager.update_repository_record(
repository=repository,
updated_metadata_dict=irmm_metadata_dict,
updated_changeset_revision=latest_changeset_revision,
updated_ctx_rev=latest_ctx_rev,
)
return (None, None)
|
def update_tool_shed_repository(
self,
repository,
tool_shed_url,
latest_ctx_rev,
latest_changeset_revision,
install_new_dependencies=True,
install_options=None,
):
install_options = install_options or {}
original_metadata_dict = repository.metadata
original_repository_dependencies_dict = original_metadata_dict.get(
"repository_dependencies", {}
)
original_repository_dependencies = original_repository_dependencies_dict.get(
"repository_dependencies", []
)
original_tool_dependencies_dict = original_metadata_dict.get(
"tool_dependencies", {}
)
shed_tool_conf, tool_path, relative_install_dir = (
suc.get_tool_panel_config_tool_path_install_dir(self.app, repository)
)
if tool_path:
repo_files_dir = os.path.abspath(
os.path.join(tool_path, relative_install_dir, repository.name)
)
else:
repo_files_dir = os.path.abspath(
os.path.join(relative_install_dir, repository.name)
)
repository_clone_url = os.path.join(
tool_shed_url, "repos", repository.owner, repository.name
)
log.info(
"Updating repository '%s' to %s:%s",
repository.name,
latest_ctx_rev,
latest_changeset_revision,
)
hg_util.pull_repository(repo_files_dir, repository_clone_url, latest_ctx_rev)
hg_util.update_repository(repo_files_dir, latest_ctx_rev)
# Remove old Data Manager entries
if repository.includes_data_managers:
dmh = data_manager.DataManagerHandler(self.app)
dmh.remove_from_data_manager(repository)
# Update the repository metadata.
tpm = tool_panel_manager.ToolPanelManager(self.app)
irmm = InstalledRepositoryMetadataManager(
app=self.app,
tpm=tpm,
repository=repository,
changeset_revision=latest_changeset_revision,
repository_clone_url=repository_clone_url,
shed_config_dict=repository.get_shed_config_dict(self.app),
relative_install_dir=relative_install_dir,
repository_files_dir=None,
resetting_all_metadata_on_repository=False,
updating_installed_repository=True,
persist=True,
)
irmm.generate_metadata_for_changeset_revision()
irmm_metadata_dict = irmm.get_metadata_dict()
if "tools" in irmm_metadata_dict:
tool_panel_dict = irmm_metadata_dict.get("tool_panel_section", None)
if tool_panel_dict is None:
tool_panel_dict = tpm.generate_tool_panel_dict_from_shed_tool_conf_entries(
repository
)
repository_tools_tups = irmm.get_repository_tools_tups()
tpm.add_to_tool_panel(
repository_name=str(repository.name),
repository_clone_url=repository_clone_url,
changeset_revision=str(repository.installed_changeset_revision),
repository_tools_tups=repository_tools_tups,
owner=str(repository.owner),
shed_tool_conf=shed_tool_conf,
tool_panel_dict=tool_panel_dict,
new_install=False,
)
# Add new Data Manager entries
if "data_manager" in irmm_metadata_dict:
dmh = data_manager.DataManagerHandler(self.app)
dmh.install_data_managers(
self.app.config.shed_data_manager_config_file,
irmm_metadata_dict,
repository.get_shed_config_dict(self.app),
os.path.join(relative_install_dir, repository.name),
repository,
repository_tools_tups,
)
if (
"repository_dependencies" in irmm_metadata_dict
or "tool_dependencies" in irmm_metadata_dict
):
new_repository_dependencies_dict = irmm_metadata_dict.get(
"repository_dependencies", {}
)
new_repository_dependencies = new_repository_dependencies_dict.get(
"repository_dependencies", []
)
new_tool_dependencies_dict = irmm_metadata_dict.get("tool_dependencies", {})
if new_repository_dependencies:
# [[http://localhost:9009', package_picard_1_56_0', devteam', 910b0b056666', False', False']]
if new_repository_dependencies == original_repository_dependencies:
for new_repository_tup in new_repository_dependencies:
# Make sure all dependencies are installed.
# TODO: Repository dependencies that are not installed should be displayed to the user,
# giving them the option to install them or not. This is the same behavior as when initially
# installing and when re-installing.
(
new_tool_shed,
new_name,
new_owner,
new_changeset_revision,
new_pir,
new_oicct,
) = common_util.parse_repository_dependency_tuple(
new_repository_tup
)
# Mock up a repo_info_tupe that has the information needed to see if the repository dependency
# was previously installed.
repo_info_tuple = (
"",
new_tool_shed,
new_changeset_revision,
"",
new_owner,
[],
[],
)
# Since the value of new_changeset_revision came from a repository dependency
# definition, it may occur earlier in the Tool Shed's repository changelog than
# the Galaxy tool_shed_repository.installed_changeset_revision record value, so
# we set from_tip to True to make sure we get the entire set of changeset revisions
# from the Tool Shed.
new_repository_db_record, installed_changeset_revision = (
repository_util.repository_was_previously_installed(
self.app,
tool_shed_url,
new_name,
repo_info_tuple,
from_tip=True,
)
)
if (
new_repository_db_record
and new_repository_db_record.status
in [
self.install_model.ToolShedRepository.installation_status.ERROR,
self.install_model.ToolShedRepository.installation_status.NEW,
self.install_model.ToolShedRepository.installation_status.UNINSTALLED,
]
) or not new_repository_db_record:
log.debug(
"Update to %s contains new repository dependency %s/%s",
repository.name,
new_owner,
new_name,
)
if not install_new_dependencies:
return ("repository", irmm_metadata_dict)
else:
self.install(
tool_shed_url,
new_name,
new_owner,
new_changeset_revision,
install_options,
)
# Updates received did not include any newly defined repository dependencies but did include
# newly defined tool dependencies. If the newly defined tool dependencies are not the same
# as the originally defined tool dependencies, we need to install them.
if not install_new_dependencies:
for new_key, new_val in new_tool_dependencies_dict.items():
if new_key not in original_tool_dependencies_dict:
return ("tool", irmm_metadata_dict)
original_val = original_tool_dependencies_dict[new_key]
if new_val != original_val:
return ("tool", irmm_metadata_dict)
# Updates received did not include any newly defined repository dependencies or newly defined
# tool dependencies that need to be installed.
repository = self.app.update_repository_manager.update_repository_record(
repository=repository,
updated_metadata_dict=irmm_metadata_dict,
updated_changeset_revision=latest_changeset_revision,
updated_ctx_rev=latest_ctx_rev,
)
return (None, None)
|
https://github.com/galaxyproject/galaxy/issues/8952
|
Nov 09 13:22:10 sn04.bi.uni-freiburg.de uwsgi[4107748]: [pid: 4124485|app: 0|req: 9911/42155] 88.66.136.83 () {84 vars in 1575 bytes} [Sat Nov 9 13:22:10 2019] POST /admin_toolshed/repository_installation_status_updates => generated 2 bytes in 23 msecs (HTTP/1.1 200) 3 headers in 96 bytes (1 switches on core 0)
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.datatypes.registry DEBUG 2019-11-09 13:22:11,317 [p:119216,w:4,m:0] [Thread-1] Loaded external metadata tool: __SET_METADATA__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,323 [p:119216,w:4,m:0] [Thread-1] Loaded history import tool: __IMPORT_HISTORY__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,327 [p:119216,w:4,m:0] [Thread-1] Loaded data fetch tool: __DATA_FETCH__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,330 [p:119216,w:4,m:0] [Thread-1] Loaded history export tool: __EXPORT_HISTORY__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker INFO 2019-11-09 13:22:11,332 [p:119216,w:4,m:0] [Thread-1] Queuing async task rebuild_toolbox_search_index for main.web.4.
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker DEBUG 2019-11-09 13:22:11,336 [p:119216,w:4,m:0] [Thread-1] Toolbox reload (29743.348 ms)
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker INFO 2019-11-09 13:22:11,338 [p:119216,w:4,m:0] [Thread-1] Instance 'main.web.4' received 'reload_toolbox' task, executing now.
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker DEBUG 2019-11-09 13:22:11,338 [p:119216,w:4,m:0] [Thread-1] Executing toolbox reload on 'main.web.4'
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: 115.146.92.55 - - [09/Nov/2019:13:22:11 +0200] "GET /api/jobs/4d16a9b9d521c9b3?key=59001665ead9d0bfd82dad0c0253dda0 HTTP/1.1" 200 - "-" "python-requests/2.22.0"
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: [pid: 3882852|app: 0|req: 11155/55634] 115.146.92.55 () {70 vars in 992 bytes} [Sat Nov 9 13:22:11 2019] GET /api/jobs/4d16a9b9d521c9b3?key=59001665ead9d0bfd82dad0c0253dda0 => generated 1118 bytes in 88 msecs (HTTP/1.1 200) 2 headers in 110 bytes (1 switches on core 0)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: galaxy.tools.search DEBUG 2019-11-09 13:22:12,036 [p:228435,w:4,m:0] [Thread-1] Toolbox index finished (29660.792 ms)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.util ERROR 2019-11-09 13:22:12,537 [p:119216,w:4,m:0] [Thread-1] Error parsing file /opt/galaxy/mutable-config/integrated_tool_panel.xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: Traceback (most recent call last):
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/util/__init__.py", line 236, in parse_xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: root = tree.parse(fname, parser=ElementTree.XMLParser(target=DoctypeSafeCallbackTarget()))
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 657, in parse
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._root = parser.close()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1654, in close
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._raiseerror(v)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: raise err
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: ParseError: unclosed token: line 10952, column 8
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker ERROR 2019-11-09 13:22:12,538 [p:119216,w:4,m:0] [Thread-1] Error running control task type: reload_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: Traceback (most recent call last):
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 372, in process_task
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: result = f(self.app, **body['kwargs'])
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 171, in reload_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: _get_new_toolbox(app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 188, in _get_new_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: new_toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/__init__.py", line 253, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: app=app,
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 1163, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: super(BaseGalaxyToolBox, self).__init__(config_filenames, tool_root_dir, app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 100, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._init_integrated_tool_panel(app.config)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/integrated_panel.py", line 39, in _init_integrated_tool_panel
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._load_integrated_tool_panel_keys()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 445, in _load_integrated_tool_panel_keys
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: tree = parse_xml(self._integrated_tool_panel_config)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/util/__init__.py", line 236, in parse_xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: root = tree.parse(fname, parser=ElementTree.XMLParser(target=DoctypeSafeCallbackTarget()))
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 657, in parse
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._root = parser.close()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1654, in close
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._raiseerror(v)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: raise err
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: ParseError: unclosed token: line 10952, column 8
|
ParseError
|
def get_repository_tools_tups(self):
"""
Return a list of tuples of the form (relative_path, guid, tool) for each tool defined
in the received tool shed repository metadata.
"""
repository_tools_tups = []
shed_conf_dict = self.tpm.get_shed_tool_conf_dict(
self.metadata_dict.get("shed_config_filename")
)
if "tools" in self.metadata_dict:
for tool_dict in self.metadata_dict["tools"]:
load_relative_path = relative_path = tool_dict.get("tool_config", None)
if shed_conf_dict.get("tool_path"):
load_relative_path = os.path.join(
shed_conf_dict.get("tool_path"), relative_path
)
guid = tool_dict.get("guid", None)
if relative_path and guid:
try:
tool = self.app.toolbox.load_tool(
os.path.abspath(load_relative_path), guid=guid, use_cached=False
)
except Exception:
log.exception(
"Error while loading tool at path '%s'", load_relative_path
)
tool = None
else:
tool = None
if tool:
repository_tools_tups.append((relative_path, guid, tool))
return repository_tools_tups
|
def get_repository_tools_tups(self):
"""
Return a list of tuples of the form (relative_path, guid, tool) for each tool defined
in the received tool shed repository metadata.
"""
repository_tools_tups = []
shed_conf_dict = self.tpm.get_shed_tool_conf_dict(
self.metadata_dict.get("shed_config_filename")
)
if "tools" in self.metadata_dict:
for tool_dict in self.metadata_dict["tools"]:
load_relative_path = relative_path = tool_dict.get("tool_config", None)
if shed_conf_dict.get("tool_path"):
load_relative_path = os.path.join(
shed_conf_dict.get("tool_path"), relative_path
)
guid = tool_dict.get("guid", None)
if relative_path and guid:
tool = self.app.toolbox.load_tool(
os.path.abspath(load_relative_path), guid=guid, use_cached=False
)
else:
tool = None
if tool:
repository_tools_tups.append((relative_path, guid, tool))
return repository_tools_tups
|
https://github.com/galaxyproject/galaxy/issues/8952
|
Nov 09 13:22:10 sn04.bi.uni-freiburg.de uwsgi[4107748]: [pid: 4124485|app: 0|req: 9911/42155] 88.66.136.83 () {84 vars in 1575 bytes} [Sat Nov 9 13:22:10 2019] POST /admin_toolshed/repository_installation_status_updates => generated 2 bytes in 23 msecs (HTTP/1.1 200) 3 headers in 96 bytes (1 switches on core 0)
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.datatypes.registry DEBUG 2019-11-09 13:22:11,317 [p:119216,w:4,m:0] [Thread-1] Loaded external metadata tool: __SET_METADATA__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,323 [p:119216,w:4,m:0] [Thread-1] Loaded history import tool: __IMPORT_HISTORY__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,327 [p:119216,w:4,m:0] [Thread-1] Loaded data fetch tool: __DATA_FETCH__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,330 [p:119216,w:4,m:0] [Thread-1] Loaded history export tool: __EXPORT_HISTORY__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker INFO 2019-11-09 13:22:11,332 [p:119216,w:4,m:0] [Thread-1] Queuing async task rebuild_toolbox_search_index for main.web.4.
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker DEBUG 2019-11-09 13:22:11,336 [p:119216,w:4,m:0] [Thread-1] Toolbox reload (29743.348 ms)
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker INFO 2019-11-09 13:22:11,338 [p:119216,w:4,m:0] [Thread-1] Instance 'main.web.4' received 'reload_toolbox' task, executing now.
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker DEBUG 2019-11-09 13:22:11,338 [p:119216,w:4,m:0] [Thread-1] Executing toolbox reload on 'main.web.4'
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: 115.146.92.55 - - [09/Nov/2019:13:22:11 +0200] "GET /api/jobs/4d16a9b9d521c9b3?key=59001665ead9d0bfd82dad0c0253dda0 HTTP/1.1" 200 - "-" "python-requests/2.22.0"
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: [pid: 3882852|app: 0|req: 11155/55634] 115.146.92.55 () {70 vars in 992 bytes} [Sat Nov 9 13:22:11 2019] GET /api/jobs/4d16a9b9d521c9b3?key=59001665ead9d0bfd82dad0c0253dda0 => generated 1118 bytes in 88 msecs (HTTP/1.1 200) 2 headers in 110 bytes (1 switches on core 0)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: galaxy.tools.search DEBUG 2019-11-09 13:22:12,036 [p:228435,w:4,m:0] [Thread-1] Toolbox index finished (29660.792 ms)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.util ERROR 2019-11-09 13:22:12,537 [p:119216,w:4,m:0] [Thread-1] Error parsing file /opt/galaxy/mutable-config/integrated_tool_panel.xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: Traceback (most recent call last):
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/util/__init__.py", line 236, in parse_xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: root = tree.parse(fname, parser=ElementTree.XMLParser(target=DoctypeSafeCallbackTarget()))
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 657, in parse
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._root = parser.close()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1654, in close
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._raiseerror(v)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: raise err
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: ParseError: unclosed token: line 10952, column 8
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker ERROR 2019-11-09 13:22:12,538 [p:119216,w:4,m:0] [Thread-1] Error running control task type: reload_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: Traceback (most recent call last):
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 372, in process_task
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: result = f(self.app, **body['kwargs'])
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 171, in reload_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: _get_new_toolbox(app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 188, in _get_new_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: new_toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/__init__.py", line 253, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: app=app,
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 1163, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: super(BaseGalaxyToolBox, self).__init__(config_filenames, tool_root_dir, app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 100, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._init_integrated_tool_panel(app.config)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/integrated_panel.py", line 39, in _init_integrated_tool_panel
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._load_integrated_tool_panel_keys()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 445, in _load_integrated_tool_panel_keys
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: tree = parse_xml(self._integrated_tool_panel_config)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/util/__init__.py", line 236, in parse_xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: root = tree.parse(fname, parser=ElementTree.XMLParser(target=DoctypeSafeCallbackTarget()))
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 657, in parse
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._root = parser.close()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1654, in close
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._raiseerror(v)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: raise err
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: ParseError: unclosed token: line 10952, column 8
|
ParseError
|
def set_repository(
self, repository, relative_install_dir=None, changeset_revision=None
):
self.repository = repository
# Shed related tool panel configs are only relevant to Galaxy.
if self.app.name == "galaxy":
if relative_install_dir is None and self.repository is not None:
tool_path, relative_install_dir = self.repository.get_tool_relative_path(
self.app
)
if changeset_revision is None and self.repository is not None:
self.set_changeset_revision(self.repository.changeset_revision)
else:
self.set_changeset_revision(changeset_revision)
self.shed_config_dict = repository.get_shed_config_dict(self.app)
self.metadata_dict = {
"shed_config_filename": self.shed_config_dict.get("config_filename", None)
}
else:
if relative_install_dir is None and self.repository is not None:
relative_install_dir = repository.repo_path(self.app)
if changeset_revision is None and self.repository is not None:
self.set_changeset_revision(self.repository.tip(self.app))
else:
self.set_changeset_revision(changeset_revision)
self.shed_config_dict = {}
self.metadata_dict = {}
self.set_relative_install_dir(relative_install_dir)
self.set_repository_files_dir()
self.resetting_all_metadata_on_repository = False
self.updating_installed_repository = False
self.persist = False
self.invalid_file_tups = []
|
def set_repository(
self, repository, relative_install_dir=None, changeset_revision=None
):
self.repository = repository
# Shed related tool panel configs are only relevant to Galaxy.
if self.app.name == "galaxy":
if relative_install_dir is None and self.repository is not None:
tool_path, relative_install_dir = self.repository.get_tool_relative_path(
self.app
)
if changeset_revision is None and self.repository is not None:
self.set_changeset_revision(self.repository.changeset_revision)
else:
self.set_changeset_revision(changeset_revision)
self.shed_config_dict = repository.get_shed_config_dict(self.app, {})
self.metadata_dict = {
"shed_config_filename": self.shed_config_dict.get("config_filename", None)
}
else:
if relative_install_dir is None and self.repository is not None:
relative_install_dir = repository.repo_path(self.app)
if changeset_revision is None and self.repository is not None:
self.set_changeset_revision(self.repository.tip(self.app))
else:
self.set_changeset_revision(changeset_revision)
self.shed_config_dict = {}
self.metadata_dict = {}
self.set_relative_install_dir(relative_install_dir)
self.set_repository_files_dir()
self.resetting_all_metadata_on_repository = False
self.updating_installed_repository = False
self.persist = False
self.invalid_file_tups = []
|
https://github.com/galaxyproject/galaxy/issues/8952
|
Nov 09 13:22:10 sn04.bi.uni-freiburg.de uwsgi[4107748]: [pid: 4124485|app: 0|req: 9911/42155] 88.66.136.83 () {84 vars in 1575 bytes} [Sat Nov 9 13:22:10 2019] POST /admin_toolshed/repository_installation_status_updates => generated 2 bytes in 23 msecs (HTTP/1.1 200) 3 headers in 96 bytes (1 switches on core 0)
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.datatypes.registry DEBUG 2019-11-09 13:22:11,317 [p:119216,w:4,m:0] [Thread-1] Loaded external metadata tool: __SET_METADATA__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,323 [p:119216,w:4,m:0] [Thread-1] Loaded history import tool: __IMPORT_HISTORY__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,327 [p:119216,w:4,m:0] [Thread-1] Loaded data fetch tool: __DATA_FETCH__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,330 [p:119216,w:4,m:0] [Thread-1] Loaded history export tool: __EXPORT_HISTORY__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker INFO 2019-11-09 13:22:11,332 [p:119216,w:4,m:0] [Thread-1] Queuing async task rebuild_toolbox_search_index for main.web.4.
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker DEBUG 2019-11-09 13:22:11,336 [p:119216,w:4,m:0] [Thread-1] Toolbox reload (29743.348 ms)
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker INFO 2019-11-09 13:22:11,338 [p:119216,w:4,m:0] [Thread-1] Instance 'main.web.4' received 'reload_toolbox' task, executing now.
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker DEBUG 2019-11-09 13:22:11,338 [p:119216,w:4,m:0] [Thread-1] Executing toolbox reload on 'main.web.4'
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: 115.146.92.55 - - [09/Nov/2019:13:22:11 +0200] "GET /api/jobs/4d16a9b9d521c9b3?key=59001665ead9d0bfd82dad0c0253dda0 HTTP/1.1" 200 - "-" "python-requests/2.22.0"
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: [pid: 3882852|app: 0|req: 11155/55634] 115.146.92.55 () {70 vars in 992 bytes} [Sat Nov 9 13:22:11 2019] GET /api/jobs/4d16a9b9d521c9b3?key=59001665ead9d0bfd82dad0c0253dda0 => generated 1118 bytes in 88 msecs (HTTP/1.1 200) 2 headers in 110 bytes (1 switches on core 0)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: galaxy.tools.search DEBUG 2019-11-09 13:22:12,036 [p:228435,w:4,m:0] [Thread-1] Toolbox index finished (29660.792 ms)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.util ERROR 2019-11-09 13:22:12,537 [p:119216,w:4,m:0] [Thread-1] Error parsing file /opt/galaxy/mutable-config/integrated_tool_panel.xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: Traceback (most recent call last):
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/util/__init__.py", line 236, in parse_xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: root = tree.parse(fname, parser=ElementTree.XMLParser(target=DoctypeSafeCallbackTarget()))
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 657, in parse
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._root = parser.close()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1654, in close
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._raiseerror(v)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: raise err
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: ParseError: unclosed token: line 10952, column 8
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker ERROR 2019-11-09 13:22:12,538 [p:119216,w:4,m:0] [Thread-1] Error running control task type: reload_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: Traceback (most recent call last):
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 372, in process_task
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: result = f(self.app, **body['kwargs'])
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 171, in reload_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: _get_new_toolbox(app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 188, in _get_new_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: new_toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/__init__.py", line 253, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: app=app,
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 1163, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: super(BaseGalaxyToolBox, self).__init__(config_filenames, tool_root_dir, app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 100, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._init_integrated_tool_panel(app.config)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/integrated_panel.py", line 39, in _init_integrated_tool_panel
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._load_integrated_tool_panel_keys()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 445, in _load_integrated_tool_panel_keys
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: tree = parse_xml(self._integrated_tool_panel_config)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/util/__init__.py", line 236, in parse_xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: root = tree.parse(fname, parser=ElementTree.XMLParser(target=DoctypeSafeCallbackTarget()))
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 657, in parse
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._root = parser.close()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1654, in close
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._raiseerror(v)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: raise err
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: ParseError: unclosed token: line 10952, column 8
|
ParseError
|
def __initiate_and_install_repositories(
self, tool_shed_url, repository_revision_dict, repo_info_dicts, install_options
):
try:
has_repository_dependencies = repository_revision_dict[
"has_repository_dependencies"
]
except KeyError:
raise exceptions.InternalServerError(
"Tool shed response missing required parameter 'has_repository_dependencies'."
)
try:
includes_tools = repository_revision_dict["includes_tools"]
except KeyError:
raise exceptions.InternalServerError(
"Tool shed response missing required parameter 'includes_tools'."
)
try:
includes_tool_dependencies = repository_revision_dict[
"includes_tool_dependencies"
]
except KeyError:
raise exceptions.InternalServerError(
"Tool shed response missing required parameter 'includes_tool_dependencies'."
)
try:
includes_tools_for_display_in_tool_panel = repository_revision_dict[
"includes_tools_for_display_in_tool_panel"
]
except KeyError:
raise exceptions.InternalServerError(
"Tool shed response missing required parameter 'includes_tools_for_display_in_tool_panel'."
)
# Get the information about the Galaxy components (e.g., tool panel section, tool config file, etc) that will contain the repository information.
install_repository_dependencies = install_options.get(
"install_repository_dependencies", False
)
install_resolver_dependencies = install_options.get(
"install_resolver_dependencies", False
)
install_tool_dependencies = install_options.get("install_tool_dependencies", False)
if install_tool_dependencies:
self.__assert_can_install_dependencies()
new_tool_panel_section_label = install_options.get(
"new_tool_panel_section_label", ""
)
tool_panel_section_mapping = install_options.get("tool_panel_section_mapping", {})
shed_tool_conf = install_options.get("shed_tool_conf", None)
if shed_tool_conf:
# Get the tool_path setting.
shed_config_dict = self.tpm.get_shed_tool_conf_dict(shed_tool_conf)
else:
try:
dynamic_confs = self.app.toolbox.dynamic_confs(
include_migrated_tool_conf=False
)
# Pick the first tool config that doesn't set `is_shed_conf="false"` and that is not a migrated_tool_conf
shed_config_dict = dynamic_confs[0]
if self.app.config.shed_tool_config_file_set:
# Use shed_tool_config_file if explicitly set
for shed_config_dict in dynamic_confs:
if (
shed_config_dict.get("config_filename")
== self.app.config.shed_tool_config_file
):
break
except IndexError:
raise exceptions.RequestParameterMissingException(
"Missing required parameter 'shed_tool_conf'."
)
shed_tool_conf = shed_config_dict["config_filename"]
tool_path = shed_config_dict["tool_path"]
tool_panel_section_id = self.app.toolbox.find_section_id(
install_options.get("tool_panel_section_id", "")
)
# Build the dictionary of information necessary for creating tool_shed_repository database records
# for each repository being installed.
installation_dict = dict(
install_repository_dependencies=install_repository_dependencies,
new_tool_panel_section_label=new_tool_panel_section_label,
tool_panel_section_mapping=tool_panel_section_mapping,
no_changes_checked=False,
repo_info_dicts=repo_info_dicts,
tool_panel_section_id=tool_panel_section_id,
tool_path=tool_path,
tool_shed_url=tool_shed_url,
)
# Create the tool_shed_repository database records and gather additional information for repository installation.
(
created_or_updated_tool_shed_repositories,
tool_panel_section_keys,
repo_info_dicts,
filtered_repo_info_dicts,
) = self.handle_tool_shed_repositories(installation_dict)
if created_or_updated_tool_shed_repositories:
# Build the dictionary of information necessary for installing the repositories.
installation_dict = dict(
created_or_updated_tool_shed_repositories=created_or_updated_tool_shed_repositories,
filtered_repo_info_dicts=filtered_repo_info_dicts,
has_repository_dependencies=has_repository_dependencies,
includes_tool_dependencies=includes_tool_dependencies,
includes_tools=includes_tools,
includes_tools_for_display_in_tool_panel=includes_tools_for_display_in_tool_panel,
install_repository_dependencies=install_repository_dependencies,
install_resolver_dependencies=install_resolver_dependencies,
install_tool_dependencies=install_tool_dependencies,
message="",
new_tool_panel_section_label=new_tool_panel_section_label,
shed_tool_conf=shed_tool_conf,
status="done",
tool_panel_section_id=tool_panel_section_id,
tool_panel_section_keys=tool_panel_section_keys,
tool_panel_section_mapping=tool_panel_section_mapping,
tool_path=tool_path,
tool_shed_url=tool_shed_url,
)
# Prepare the repositories for installation. Even though this
# method receives a single combination of tool_shed_url, name,
# owner and changeset_revision, there may be multiple repositories
# for installation at this point because repository dependencies
# may have added additional repositories for installation along
# with the single specified repository.
encoded_kwd, query, tool_shed_repositories, encoded_repository_ids = (
self.initiate_repository_installation(installation_dict)
)
# Some repositories may have repository dependencies that are
# required to be installed before the dependent repository, so
# we'll order the list of tsr_ids to ensure all repositories
# install in the required order.
tsr_ids = [
self.app.security.encode_id(tool_shed_repository.id)
for tool_shed_repository in tool_shed_repositories
]
decoded_kwd = dict(
shed_tool_conf=shed_tool_conf,
tool_path=tool_path,
tool_panel_section_keys=tool_panel_section_keys,
repo_info_dicts=filtered_repo_info_dicts,
install_resolver_dependencies=install_resolver_dependencies,
install_tool_dependencies=install_tool_dependencies,
tool_panel_section_mapping=tool_panel_section_mapping,
)
return self.install_repositories(
tsr_ids, decoded_kwd, reinstalling=False, install_options=install_options
)
|
def __initiate_and_install_repositories(
self, tool_shed_url, repository_revision_dict, repo_info_dicts, install_options
):
try:
has_repository_dependencies = repository_revision_dict[
"has_repository_dependencies"
]
except KeyError:
raise exceptions.InternalServerError(
"Tool shed response missing required parameter 'has_repository_dependencies'."
)
try:
includes_tools = repository_revision_dict["includes_tools"]
except KeyError:
raise exceptions.InternalServerError(
"Tool shed response missing required parameter 'includes_tools'."
)
try:
includes_tool_dependencies = repository_revision_dict[
"includes_tool_dependencies"
]
except KeyError:
raise exceptions.InternalServerError(
"Tool shed response missing required parameter 'includes_tool_dependencies'."
)
try:
includes_tools_for_display_in_tool_panel = repository_revision_dict[
"includes_tools_for_display_in_tool_panel"
]
except KeyError:
raise exceptions.InternalServerError(
"Tool shed response missing required parameter 'includes_tools_for_display_in_tool_panel'."
)
# Get the information about the Galaxy components (e.g., tool panel section, tool config file, etc) that will contain the repository information.
install_repository_dependencies = install_options.get(
"install_repository_dependencies", False
)
install_resolver_dependencies = install_options.get(
"install_resolver_dependencies", False
)
install_tool_dependencies = install_options.get("install_tool_dependencies", False)
if install_tool_dependencies:
self.__assert_can_install_dependencies()
new_tool_panel_section_label = install_options.get(
"new_tool_panel_section_label", ""
)
tool_panel_section_mapping = install_options.get("tool_panel_section_mapping", {})
shed_tool_conf = install_options.get("shed_tool_conf", None)
if shed_tool_conf:
# Get the tool_path setting.
shed_config_dict = self.tpm.get_shed_tool_conf_dict(shed_tool_conf)
else:
# Don't use migrated_tools_conf.xml and prefer shed_tool_config_file.
try:
for shed_config_dict in self.app.toolbox.dynamic_confs(
include_migrated_tool_conf=False
):
if (
shed_config_dict.get("config_filename")
== self.app.config.shed_tool_config_file
):
break
else:
shed_config_dict = self.app.toolbox.dynamic_confs(
include_migrated_tool_conf=False
)[0]
except IndexError:
raise exceptions.RequestParameterMissingException(
"Missing required parameter 'shed_tool_conf'."
)
shed_tool_conf = shed_config_dict["config_filename"]
tool_path = shed_config_dict["tool_path"]
tool_panel_section_id = self.app.toolbox.find_section_id(
install_options.get("tool_panel_section_id", "")
)
# Build the dictionary of information necessary for creating tool_shed_repository database records
# for each repository being installed.
installation_dict = dict(
install_repository_dependencies=install_repository_dependencies,
new_tool_panel_section_label=new_tool_panel_section_label,
tool_panel_section_mapping=tool_panel_section_mapping,
no_changes_checked=False,
repo_info_dicts=repo_info_dicts,
tool_panel_section_id=tool_panel_section_id,
tool_path=tool_path,
tool_shed_url=tool_shed_url,
)
# Create the tool_shed_repository database records and gather additional information for repository installation.
(
created_or_updated_tool_shed_repositories,
tool_panel_section_keys,
repo_info_dicts,
filtered_repo_info_dicts,
) = self.handle_tool_shed_repositories(installation_dict)
if created_or_updated_tool_shed_repositories:
# Build the dictionary of information necessary for installing the repositories.
installation_dict = dict(
created_or_updated_tool_shed_repositories=created_or_updated_tool_shed_repositories,
filtered_repo_info_dicts=filtered_repo_info_dicts,
has_repository_dependencies=has_repository_dependencies,
includes_tool_dependencies=includes_tool_dependencies,
includes_tools=includes_tools,
includes_tools_for_display_in_tool_panel=includes_tools_for_display_in_tool_panel,
install_repository_dependencies=install_repository_dependencies,
install_resolver_dependencies=install_resolver_dependencies,
install_tool_dependencies=install_tool_dependencies,
message="",
new_tool_panel_section_label=new_tool_panel_section_label,
shed_tool_conf=shed_tool_conf,
status="done",
tool_panel_section_id=tool_panel_section_id,
tool_panel_section_keys=tool_panel_section_keys,
tool_panel_section_mapping=tool_panel_section_mapping,
tool_path=tool_path,
tool_shed_url=tool_shed_url,
)
# Prepare the repositories for installation. Even though this
# method receives a single combination of tool_shed_url, name,
# owner and changeset_revision, there may be multiple repositories
# for installation at this point because repository dependencies
# may have added additional repositories for installation along
# with the single specified repository.
encoded_kwd, query, tool_shed_repositories, encoded_repository_ids = (
self.initiate_repository_installation(installation_dict)
)
# Some repositories may have repository dependencies that are
# required to be installed before the dependent repository, so
# we'll order the list of tsr_ids to ensure all repositories
# install in the required order.
tsr_ids = [
self.app.security.encode_id(tool_shed_repository.id)
for tool_shed_repository in tool_shed_repositories
]
decoded_kwd = dict(
shed_tool_conf=shed_tool_conf,
tool_path=tool_path,
tool_panel_section_keys=tool_panel_section_keys,
repo_info_dicts=filtered_repo_info_dicts,
install_resolver_dependencies=install_resolver_dependencies,
install_tool_dependencies=install_tool_dependencies,
tool_panel_section_mapping=tool_panel_section_mapping,
)
return self.install_repositories(
tsr_ids, decoded_kwd, reinstalling=False, install_options=install_options
)
|
https://github.com/galaxyproject/galaxy/issues/8952
|
Nov 09 13:22:10 sn04.bi.uni-freiburg.de uwsgi[4107748]: [pid: 4124485|app: 0|req: 9911/42155] 88.66.136.83 () {84 vars in 1575 bytes} [Sat Nov 9 13:22:10 2019] POST /admin_toolshed/repository_installation_status_updates => generated 2 bytes in 23 msecs (HTTP/1.1 200) 3 headers in 96 bytes (1 switches on core 0)
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.datatypes.registry DEBUG 2019-11-09 13:22:11,317 [p:119216,w:4,m:0] [Thread-1] Loaded external metadata tool: __SET_METADATA__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,323 [p:119216,w:4,m:0] [Thread-1] Loaded history import tool: __IMPORT_HISTORY__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,327 [p:119216,w:4,m:0] [Thread-1] Loaded data fetch tool: __DATA_FETCH__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.tools.special_tools DEBUG 2019-11-09 13:22:11,330 [p:119216,w:4,m:0] [Thread-1] Loaded history export tool: __EXPORT_HISTORY__
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker INFO 2019-11-09 13:22:11,332 [p:119216,w:4,m:0] [Thread-1] Queuing async task rebuild_toolbox_search_index for main.web.4.
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker DEBUG 2019-11-09 13:22:11,336 [p:119216,w:4,m:0] [Thread-1] Toolbox reload (29743.348 ms)
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker INFO 2019-11-09 13:22:11,338 [p:119216,w:4,m:0] [Thread-1] Instance 'main.web.4' received 'reload_toolbox' task, executing now.
Nov 09 13:22:11 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker DEBUG 2019-11-09 13:22:11,338 [p:119216,w:4,m:0] [Thread-1] Executing toolbox reload on 'main.web.4'
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: 115.146.92.55 - - [09/Nov/2019:13:22:11 +0200] "GET /api/jobs/4d16a9b9d521c9b3?key=59001665ead9d0bfd82dad0c0253dda0 HTTP/1.1" 200 - "-" "python-requests/2.22.0"
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: [pid: 3882852|app: 0|req: 11155/55634] 115.146.92.55 () {70 vars in 992 bytes} [Sat Nov 9 13:22:11 2019] GET /api/jobs/4d16a9b9d521c9b3?key=59001665ead9d0bfd82dad0c0253dda0 => generated 1118 bytes in 88 msecs (HTTP/1.1 200) 2 headers in 110 bytes (1 switches on core 0)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3843198]: galaxy.tools.search DEBUG 2019-11-09 13:22:12,036 [p:228435,w:4,m:0] [Thread-1] Toolbox index finished (29660.792 ms)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.util ERROR 2019-11-09 13:22:12,537 [p:119216,w:4,m:0] [Thread-1] Error parsing file /opt/galaxy/mutable-config/integrated_tool_panel.xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: Traceback (most recent call last):
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/util/__init__.py", line 236, in parse_xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: root = tree.parse(fname, parser=ElementTree.XMLParser(target=DoctypeSafeCallbackTarget()))
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 657, in parse
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._root = parser.close()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1654, in close
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._raiseerror(v)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: raise err
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: ParseError: unclosed token: line 10952, column 8
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: galaxy.queue_worker ERROR 2019-11-09 13:22:12,538 [p:119216,w:4,m:0] [Thread-1] Error running control task type: reload_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: Traceback (most recent call last):
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 372, in process_task
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: result = f(self.app, **body['kwargs'])
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 171, in reload_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: _get_new_toolbox(app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/queue_worker.py", line 188, in _get_new_toolbox
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: new_toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/__init__.py", line 253, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: app=app,
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 1163, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: super(BaseGalaxyToolBox, self).__init__(config_filenames, tool_root_dir, app)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 100, in __init__
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._init_integrated_tool_panel(app.config)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/integrated_panel.py", line 39, in _init_integrated_tool_panel
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._load_integrated_tool_panel_keys()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/tools/toolbox/base.py", line 445, in _load_integrated_tool_panel_keys
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: tree = parse_xml(self._integrated_tool_panel_config)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "lib/galaxy/util/__init__.py", line 236, in parse_xml
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: root = tree.parse(fname, parser=ElementTree.XMLParser(target=DoctypeSafeCallbackTarget()))
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 657, in parse
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._root = parser.close()
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1654, in close
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: self._raiseerror(v)
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: File "/usr/lib64/python2.7/xml/etree/ElementTree.py", line 1506, in _raiseerror
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: raise err
Nov 09 13:22:12 sn04.bi.uni-freiburg.de uwsgi[3191341]: ParseError: unclosed token: line 10952, column 8
|
ParseError
|
def process_bind_param(self, value, dialect):
"""Automatically truncate string values"""
if self.impl.length and value is not None:
value = unicodify(value)[0 : self.impl.length]
return value
|
def process_bind_param(self, value, dialect):
"""Automatically truncate string values"""
if self.impl.length and value is not None:
value = value[0 : self.impl.length]
return value
|
https://github.com/galaxyproject/galaxy/issues/8680
|
galaxy.web.framework.decorators ERROR 2019-09-19 17:20:36,279 [p:47682,w:1,m:0] [uWSGIWorker1Core0] Uncaught exception in exposed API method:
Traceback (most recent call last):
File "lib/galaxy/web/framework/decorators.py", line 157, in decorator
rval = func(self, trans, *args, **kwargs)
File "lib/galaxy/webapps/galaxy/controllers/dataset.py", line 426, in set_edit
trans.sa_session.flush()
File "/path/to/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/orm/scoping.py", line 162, in do
return getattr(self.registry(), name)(*args, **kwargs)
File "/path/to/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/orm/session.py", line 2459, in flush
self._flush(objects)
File "/path/to/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/orm/session.py", line 2597, in _flush
transaction.rollback(_capture_exception=True)
File "/path/to/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/util/langhelpers.py", line 68, in __exit__
compat.reraise(exc_type, exc_value, exc_tb)
File "/path/to/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/orm/session.py", line 2557, in _flush
flush_context.execute()
File "/path/to/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/orm/unitofwork.py", line 422, in execute
rec.execute(self)
File "/path/to/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/orm/unitofwork.py", line 589, in execute
uow,
File "/path/to/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/orm/persistence.py", line 236, in save_obj
update,
File "/path/to/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/orm/persistence.py", line 996, in _emit_update_statements
statement, multiparams
File "/path/to/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 988, in execute
return meth(self, multiparams, params)
File "/path/to/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/sql/elements.py", line 287, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/path/to/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1107, in _execute_clauseelement
distilled_params,
File "/path/to/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1182, in _execute_context
e, util.text_type(statement), parameters, None, None
File "/path/to/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1466, in _handle_dbapi_exception
util.raise_from_cause(sqlalchemy_exception, exc_info)
File "/path/to/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/util/compat.py", line 398, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/path/to/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1179, in _execute_context
context = constructor(dialect, self, conn, *args)
File "/path/to/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/engine/default.py", line 719, in _init_compiled
param.append(processors[key](compiled_params[key]))
File "/path/to/galaxy/.venv/lib/python2.7/site-packages/sqlalchemy/sql/type_api.py", line 1210, in process
return process_param(value, dialect)
File "lib/galaxy/model/custom_types.py", line 375, in process_bind_param
value = value[0:self.impl.length]
StatementError: (exceptions.TypeError) 'int' object has no attribute '__getitem__'
[SQL: UPDATE history_dataset_association SET update_time=?, name=?, metadata=?, version=? WHERE history_dataset_association.id = ?]
[parameters: [{'_metadata': {u'data_lines': 1, u'dbkey': [u'?']}, 'version': 4, 'name': 1, 'history_dataset_association_id': 1234}]]
|
StatementError
|
def _upload_dataset(self, trans, library_id, folder_id, replace_dataset=None, **kwd):
# Set up the traditional tool state/params
cntrller = "api"
tool_id = "upload1"
message = None
file_type = kwd.get("file_type")
try:
upload_common.validate_datatype_extension(
datatypes_registry=trans.app.datatypes_registry, ext=file_type
)
except RequestParameterInvalidException as e:
return (400, util.unicodify(e))
tool = trans.app.toolbox.get_tool(tool_id)
state = tool.new_state(trans)
populate_state(trans, tool.inputs, kwd, state.inputs)
tool_params = state.inputs
dataset_upload_inputs = []
for input_name, input in tool.inputs.items():
if input.type == "upload_dataset":
dataset_upload_inputs.append(input)
# Library-specific params
server_dir = kwd.get("server_dir", "")
upload_option = kwd.get("upload_option", "upload_file")
response_code = 200
if upload_option == "upload_directory":
full_dir, import_dir_desc = validate_server_directory_upload(trans, server_dir)
message = "Select a directory"
elif upload_option == "upload_paths":
# Library API already checked this - following check isn't actually needed.
validate_path_upload(trans)
# Some error handling should be added to this method.
try:
# FIXME: instead of passing params here ( which have been processed by util.Params(), the original kwd
# should be passed so that complex objects that may have been included in the initial request remain.
library_bunch = upload_common.handle_library_params(
trans, kwd, folder_id, replace_dataset
)
except Exception:
response_code = 500
message = "Unable to parse upload parameters, please report this error."
# Proceed with (mostly) regular upload processing if we're still errorless
if response_code == 200:
if upload_option == "upload_file":
tool_params = upload_common.persist_uploads(tool_params, trans)
uploaded_datasets = upload_common.get_uploaded_datasets(
trans,
cntrller,
tool_params,
dataset_upload_inputs,
library_bunch=library_bunch,
)
elif upload_option == "upload_directory":
uploaded_datasets, response_code, message = (
self._get_server_dir_uploaded_datasets(
trans,
kwd,
full_dir,
import_dir_desc,
library_bunch,
response_code,
message,
)
)
elif upload_option == "upload_paths":
uploaded_datasets, response_code, message = (
self._get_path_paste_uploaded_datasets(
trans, kwd, library_bunch, response_code, message
)
)
if upload_option == "upload_file" and not uploaded_datasets:
response_code = 400
message = "Select a file, enter a URL or enter text"
if response_code != 200:
return (response_code, message)
json_file_path = upload_common.create_paramfile(trans, uploaded_datasets)
data_list = [ud.data for ud in uploaded_datasets]
job_params = {}
job_params["link_data_only"] = json.dumps(kwd.get("link_data_only", "copy_files"))
job_params["uuid"] = json.dumps(kwd.get("uuid", None))
job, output = upload_common.create_job(
trans,
tool_params,
tool,
json_file_path,
data_list,
folder=library_bunch.folder,
job_params=job_params,
)
trans.sa_session.add(job)
trans.sa_session.flush()
return output
|
def _upload_dataset(self, trans, library_id, folder_id, replace_dataset=None, **kwd):
# Set up the traditional tool state/params
cntrller = "api"
tool_id = "upload1"
message = None
tool = trans.app.toolbox.get_tool(tool_id)
state = tool.new_state(trans)
populate_state(trans, tool.inputs, kwd, state.inputs)
tool_params = state.inputs
dataset_upload_inputs = []
for input_name, input in tool.inputs.items():
if input.type == "upload_dataset":
dataset_upload_inputs.append(input)
# Library-specific params
server_dir = kwd.get("server_dir", "")
upload_option = kwd.get("upload_option", "upload_file")
response_code = 200
if upload_option == "upload_directory":
full_dir, import_dir_desc = validate_server_directory_upload(trans, server_dir)
message = "Select a directory"
elif upload_option == "upload_paths":
# Library API already checked this - following check isn't actually needed.
validate_path_upload(trans)
# Some error handling should be added to this method.
try:
# FIXME: instead of passing params here ( which have been processed by util.Params(), the original kwd
# should be passed so that complex objects that may have been included in the initial request remain.
library_bunch = upload_common.handle_library_params(
trans, kwd, folder_id, replace_dataset
)
except Exception:
response_code = 500
message = "Unable to parse upload parameters, please report this error."
# Proceed with (mostly) regular upload processing if we're still errorless
if response_code == 200:
if upload_option == "upload_file":
tool_params = upload_common.persist_uploads(tool_params, trans)
uploaded_datasets = upload_common.get_uploaded_datasets(
trans,
cntrller,
tool_params,
dataset_upload_inputs,
library_bunch=library_bunch,
)
elif upload_option == "upload_directory":
uploaded_datasets, response_code, message = (
self._get_server_dir_uploaded_datasets(
trans,
kwd,
full_dir,
import_dir_desc,
library_bunch,
response_code,
message,
)
)
elif upload_option == "upload_paths":
uploaded_datasets, response_code, message = (
self._get_path_paste_uploaded_datasets(
trans, kwd, library_bunch, response_code, message
)
)
if upload_option == "upload_file" and not uploaded_datasets:
response_code = 400
message = "Select a file, enter a URL or enter text"
if response_code != 200:
return (response_code, message)
json_file_path = upload_common.create_paramfile(trans, uploaded_datasets)
data_list = [ud.data for ud in uploaded_datasets]
job_params = {}
job_params["link_data_only"] = json.dumps(kwd.get("link_data_only", "copy_files"))
job_params["uuid"] = json.dumps(kwd.get("uuid", None))
job, output = upload_common.create_job(
trans,
tool_params,
tool,
json_file_path,
data_list,
folder=library_bunch.folder,
job_params=job_params,
)
trans.sa_session.add(job)
trans.sa_session.flush()
return output
|
https://github.com/galaxyproject/galaxy/issues/8820
|
Traceback (most recent call last):
File "lib/galaxy/web/framework/decorators.py", line 282, in decorator
rval = func(self, trans, *args, **kwargs)
File "lib/galaxy/webapps/galaxy/api/library_contents.py", line 250, in create
status, output = self._upload_library_dataset(trans, library_id, real_folder_id, **payload)
File "lib/galaxy/webapps/galaxy/api/library_contents.py", line 337, in _upload_library_dataset
**kwd)
File "lib/galaxy/actions/library.py", line 108, in _upload_dataset
uploaded_datasets = upload_common.get_uploaded_datasets(trans, cntrller, tool_params, dataset_upload_inputs, library_bunch=library_bunch)
File "lib/galaxy/tools/actions/upload_common.py", line 289, in get_uploaded_datasets
uploaded_datasets.extend(dataset_upload_input.get_uploaded_datasets(trans, params))
File "lib/galaxy/tools/parameters/grouping.py", line 548, in get_uploaded_datasets
writable_files = d_type.writable_files
AttributeError: 'NoneType' object has no attribute 'writable_files'
|
AttributeError
|
def validate_and_normalize_targets(trans, payload):
"""Validate and normalize all src references in fetch targets.
- Normalize ftp_import and server_dir src entries into simple path entries
with the relevant paths resolved and permissions / configuration checked.
- Check for file:// URLs in items src of "url" and convert them into path
src items - after verifying path pastes are allowed and user is admin.
- Check for valid URLs to be fetched for http and https entries.
- Based on Galaxy configuration and upload types set purge_source and in_place
as needed for each upload.
"""
targets = payload.get("targets", [])
for target in targets:
destination = get_required_item(
target, "destination", "Each target must specify a 'destination'"
)
destination_type = get_required_item(
destination, "type", "Each target destination must specify a 'type'"
)
if "object_id" in destination:
raise RequestParameterInvalidException(
"object_id not allowed to appear in the request."
)
if destination_type not in VALID_DESTINATION_TYPES:
template = (
"Invalid target destination type [%s] encountered, must be one of %s"
)
msg = template % (destination_type, VALID_DESTINATION_TYPES)
raise RequestParameterInvalidException(msg)
if destination_type == "library":
library_name = get_required_item(
destination, "name", "Must specify a library name"
)
description = destination.get("description", "")
synopsis = destination.get("synopsis", "")
library = trans.app.library_manager.create(
trans, library_name, description=description, synopsis=synopsis
)
destination["type"] = "library_folder"
for key in ["name", "description", "synopsis"]:
if key in destination:
del destination[key]
destination["library_folder_id"] = trans.app.security.encode_id(
library.root_folder.id
)
# Unlike upload.py we don't transmit or use run_as_real_user in the job - we just make sure
# in_place and purge_source are set on the individual upload fetch sources as needed based
# on this.
run_as_real_user = (
trans.app.config.external_chown_script is not None
) # See comment in upload.py
purge_ftp_source = (
getattr(trans.app.config, "ftp_upload_purge", True) and not run_as_real_user
)
payload["check_content"] = trans.app.config.check_upload_content
def check_src(item):
if "object_id" in item:
raise RequestParameterInvalidException(
"object_id not allowed to appear in the request."
)
validate_datatype_extension(
datatypes_registry=trans.app.datatypes_registry, ext=item.get("ext")
)
# Normalize file:// URLs into paths.
if item["src"] == "url" and item["url"].startswith("file://"):
item["src"] = "path"
item["path"] = item["url"][len("file://") :]
del item["path"]
if "in_place" in item:
raise RequestParameterInvalidException(
"in_place cannot be set in the upload request"
)
src = item["src"]
# Check link_data_only can only be set for certain src types and certain elements_from types.
_handle_invalid_link_data_only_elements_type(item)
if src not in ["path", "server_dir"]:
_handle_invalid_link_data_only_type(item)
elements_from = item.get("elements_from", None)
if elements_from and elements_from not in ELEMENTS_FROM_TYPE:
raise RequestParameterInvalidException(
"Invalid elements_from/items_from found in request"
)
if src == "path" or (src == "url" and item["url"].startswith("file:")):
# Validate is admin, leave alone.
validate_path_upload(trans)
elif src == "server_dir":
# Validate and replace with path definition.
server_dir = item["server_dir"]
full_path, _ = validate_server_directory_upload(trans, server_dir)
item["src"] = "path"
item["path"] = full_path
elif src == "ftp_import":
ftp_path = item["ftp_path"]
full_path = None
# It'd be nice if this can be de-duplicated with what is in parameters/grouping.py.
user_ftp_dir = trans.user_ftp_dir
is_directory = False
assert not os.path.islink(user_ftp_dir), (
"User FTP directory cannot be a symbolic link"
)
for dirpath, dirnames, filenames in os.walk(user_ftp_dir):
for filename in filenames:
if ftp_path == filename:
path = relpath(os.path.join(dirpath, filename), user_ftp_dir)
if not os.path.islink(os.path.join(dirpath, filename)):
full_path = os.path.abspath(
os.path.join(user_ftp_dir, path)
)
break
for dirname in dirnames:
if ftp_path == dirname:
path = relpath(os.path.join(dirpath, dirname), user_ftp_dir)
if not os.path.islink(os.path.join(dirpath, dirname)):
full_path = os.path.abspath(
os.path.join(user_ftp_dir, path)
)
is_directory = True
break
if is_directory:
# If the target is a directory - make sure no files under it are symbolic links
for dirpath, dirnames, filenames in os.walk(full_path):
for filename in filenames:
if ftp_path == filename:
path = relpath(os.path.join(dirpath, filename), full_path)
if not os.path.islink(os.path.join(dirpath, filename)):
full_path = False
break
for dirname in dirnames:
if ftp_path == dirname:
path = relpath(os.path.join(dirpath, filename), full_path)
if not os.path.islink(os.path.join(dirpath, filename)):
full_path = False
break
if not full_path:
raise RequestParameterInvalidException(
"Failed to find referenced ftp_path or symbolic link was enountered"
)
item["src"] = "path"
item["path"] = full_path
item["purge_source"] = purge_ftp_source
elif src == "url":
url = item["url"]
looks_like_url = False
for url_prefix in ["http://", "https://", "ftp://", "ftps://"]:
if url.startswith(url_prefix):
looks_like_url = True
break
if not looks_like_url:
raise RequestParameterInvalidException(
"Invalid URL [%s] found in src definition." % url
)
validate_url(url, trans.app.config.fetch_url_whitelist_ips)
item["in_place"] = run_as_real_user
elif src == "files":
item["in_place"] = run_as_real_user
# Small disagreement with traditional uploads - we purge less by default since whether purging
# happens varies based on upload options in non-obvious ways.
# https://github.com/galaxyproject/galaxy/issues/5361
if "purge_source" not in item:
item["purge_source"] = False
replace_request_syntax_sugar(targets)
_for_each_src(check_src, targets)
|
def validate_and_normalize_targets(trans, payload):
"""Validate and normalize all src references in fetch targets.
- Normalize ftp_import and server_dir src entries into simple path entries
with the relevant paths resolved and permissions / configuration checked.
- Check for file:// URLs in items src of "url" and convert them into path
src items - after verifying path pastes are allowed and user is admin.
- Check for valid URLs to be fetched for http and https entries.
- Based on Galaxy configuration and upload types set purge_source and in_place
as needed for each upload.
"""
targets = payload.get("targets", [])
for target in targets:
destination = get_required_item(
target, "destination", "Each target must specify a 'destination'"
)
destination_type = get_required_item(
destination, "type", "Each target destination must specify a 'type'"
)
if "object_id" in destination:
raise RequestParameterInvalidException(
"object_id not allowed to appear in the request."
)
if destination_type not in VALID_DESTINATION_TYPES:
template = (
"Invalid target destination type [%s] encountered, must be one of %s"
)
msg = template % (destination_type, VALID_DESTINATION_TYPES)
raise RequestParameterInvalidException(msg)
if destination_type == "library":
library_name = get_required_item(
destination, "name", "Must specify a library name"
)
description = destination.get("description", "")
synopsis = destination.get("synopsis", "")
library = trans.app.library_manager.create(
trans, library_name, description=description, synopsis=synopsis
)
destination["type"] = "library_folder"
for key in ["name", "description", "synopsis"]:
if key in destination:
del destination[key]
destination["library_folder_id"] = trans.app.security.encode_id(
library.root_folder.id
)
# Unlike upload.py we don't transmit or use run_as_real_user in the job - we just make sure
# in_place and purge_source are set on the individual upload fetch sources as needed based
# on this.
run_as_real_user = (
trans.app.config.external_chown_script is not None
) # See comment in upload.py
purge_ftp_source = (
getattr(trans.app.config, "ftp_upload_purge", True) and not run_as_real_user
)
payload["check_content"] = trans.app.config.check_upload_content
def check_src(item):
if "object_id" in item:
raise RequestParameterInvalidException(
"object_id not allowed to appear in the request."
)
# Normalize file:// URLs into paths.
if item["src"] == "url" and item["url"].startswith("file://"):
item["src"] = "path"
item["path"] = item["url"][len("file://") :]
del item["path"]
if "in_place" in item:
raise RequestParameterInvalidException(
"in_place cannot be set in the upload request"
)
src = item["src"]
# Check link_data_only can only be set for certain src types and certain elements_from types.
_handle_invalid_link_data_only_elements_type(item)
if src not in ["path", "server_dir"]:
_handle_invalid_link_data_only_type(item)
elements_from = item.get("elements_from", None)
if elements_from and elements_from not in ELEMENTS_FROM_TYPE:
raise RequestParameterInvalidException(
"Invalid elements_from/items_from found in request"
)
if src == "path" or (src == "url" and item["url"].startswith("file:")):
# Validate is admin, leave alone.
validate_path_upload(trans)
elif src == "server_dir":
# Validate and replace with path definition.
server_dir = item["server_dir"]
full_path, _ = validate_server_directory_upload(trans, server_dir)
item["src"] = "path"
item["path"] = full_path
elif src == "ftp_import":
ftp_path = item["ftp_path"]
full_path = None
# It'd be nice if this can be de-duplicated with what is in parameters/grouping.py.
user_ftp_dir = trans.user_ftp_dir
is_directory = False
assert not os.path.islink(user_ftp_dir), (
"User FTP directory cannot be a symbolic link"
)
for dirpath, dirnames, filenames in os.walk(user_ftp_dir):
for filename in filenames:
if ftp_path == filename:
path = relpath(os.path.join(dirpath, filename), user_ftp_dir)
if not os.path.islink(os.path.join(dirpath, filename)):
full_path = os.path.abspath(
os.path.join(user_ftp_dir, path)
)
break
for dirname in dirnames:
if ftp_path == dirname:
path = relpath(os.path.join(dirpath, dirname), user_ftp_dir)
if not os.path.islink(os.path.join(dirpath, dirname)):
full_path = os.path.abspath(
os.path.join(user_ftp_dir, path)
)
is_directory = True
break
if is_directory:
# If the target is a directory - make sure no files under it are symbolic links
for dirpath, dirnames, filenames in os.walk(full_path):
for filename in filenames:
if ftp_path == filename:
path = relpath(os.path.join(dirpath, filename), full_path)
if not os.path.islink(os.path.join(dirpath, filename)):
full_path = False
break
for dirname in dirnames:
if ftp_path == dirname:
path = relpath(os.path.join(dirpath, filename), full_path)
if not os.path.islink(os.path.join(dirpath, filename)):
full_path = False
break
if not full_path:
raise RequestParameterInvalidException(
"Failed to find referenced ftp_path or symbolic link was enountered"
)
item["src"] = "path"
item["path"] = full_path
item["purge_source"] = purge_ftp_source
elif src == "url":
url = item["url"]
looks_like_url = False
for url_prefix in ["http://", "https://", "ftp://", "ftps://"]:
if url.startswith(url_prefix):
looks_like_url = True
break
if not looks_like_url:
raise RequestParameterInvalidException(
"Invalid URL [%s] found in src definition." % url
)
validate_url(url, trans.app.config.fetch_url_whitelist_ips)
item["in_place"] = run_as_real_user
elif src == "files":
item["in_place"] = run_as_real_user
# Small disagreement with traditional uploads - we purge less by default since whether purging
# happens varies based on upload options in non-obvious ways.
# https://github.com/galaxyproject/galaxy/issues/5361
if "purge_source" not in item:
item["purge_source"] = False
replace_request_syntax_sugar(targets)
_for_each_src(check_src, targets)
|
https://github.com/galaxyproject/galaxy/issues/8820
|
Traceback (most recent call last):
File "lib/galaxy/web/framework/decorators.py", line 282, in decorator
rval = func(self, trans, *args, **kwargs)
File "lib/galaxy/webapps/galaxy/api/library_contents.py", line 250, in create
status, output = self._upload_library_dataset(trans, library_id, real_folder_id, **payload)
File "lib/galaxy/webapps/galaxy/api/library_contents.py", line 337, in _upload_library_dataset
**kwd)
File "lib/galaxy/actions/library.py", line 108, in _upload_dataset
uploaded_datasets = upload_common.get_uploaded_datasets(trans, cntrller, tool_params, dataset_upload_inputs, library_bunch=library_bunch)
File "lib/galaxy/tools/actions/upload_common.py", line 289, in get_uploaded_datasets
uploaded_datasets.extend(dataset_upload_input.get_uploaded_datasets(trans, params))
File "lib/galaxy/tools/parameters/grouping.py", line 548, in get_uploaded_datasets
writable_files = d_type.writable_files
AttributeError: 'NoneType' object has no attribute 'writable_files'
|
AttributeError
|
def check_src(item):
if "object_id" in item:
raise RequestParameterInvalidException(
"object_id not allowed to appear in the request."
)
validate_datatype_extension(
datatypes_registry=trans.app.datatypes_registry, ext=item.get("ext")
)
# Normalize file:// URLs into paths.
if item["src"] == "url" and item["url"].startswith("file://"):
item["src"] = "path"
item["path"] = item["url"][len("file://") :]
del item["path"]
if "in_place" in item:
raise RequestParameterInvalidException(
"in_place cannot be set in the upload request"
)
src = item["src"]
# Check link_data_only can only be set for certain src types and certain elements_from types.
_handle_invalid_link_data_only_elements_type(item)
if src not in ["path", "server_dir"]:
_handle_invalid_link_data_only_type(item)
elements_from = item.get("elements_from", None)
if elements_from and elements_from not in ELEMENTS_FROM_TYPE:
raise RequestParameterInvalidException(
"Invalid elements_from/items_from found in request"
)
if src == "path" or (src == "url" and item["url"].startswith("file:")):
# Validate is admin, leave alone.
validate_path_upload(trans)
elif src == "server_dir":
# Validate and replace with path definition.
server_dir = item["server_dir"]
full_path, _ = validate_server_directory_upload(trans, server_dir)
item["src"] = "path"
item["path"] = full_path
elif src == "ftp_import":
ftp_path = item["ftp_path"]
full_path = None
# It'd be nice if this can be de-duplicated with what is in parameters/grouping.py.
user_ftp_dir = trans.user_ftp_dir
is_directory = False
assert not os.path.islink(user_ftp_dir), (
"User FTP directory cannot be a symbolic link"
)
for dirpath, dirnames, filenames in os.walk(user_ftp_dir):
for filename in filenames:
if ftp_path == filename:
path = relpath(os.path.join(dirpath, filename), user_ftp_dir)
if not os.path.islink(os.path.join(dirpath, filename)):
full_path = os.path.abspath(os.path.join(user_ftp_dir, path))
break
for dirname in dirnames:
if ftp_path == dirname:
path = relpath(os.path.join(dirpath, dirname), user_ftp_dir)
if not os.path.islink(os.path.join(dirpath, dirname)):
full_path = os.path.abspath(os.path.join(user_ftp_dir, path))
is_directory = True
break
if is_directory:
# If the target is a directory - make sure no files under it are symbolic links
for dirpath, dirnames, filenames in os.walk(full_path):
for filename in filenames:
if ftp_path == filename:
path = relpath(os.path.join(dirpath, filename), full_path)
if not os.path.islink(os.path.join(dirpath, filename)):
full_path = False
break
for dirname in dirnames:
if ftp_path == dirname:
path = relpath(os.path.join(dirpath, filename), full_path)
if not os.path.islink(os.path.join(dirpath, filename)):
full_path = False
break
if not full_path:
raise RequestParameterInvalidException(
"Failed to find referenced ftp_path or symbolic link was enountered"
)
item["src"] = "path"
item["path"] = full_path
item["purge_source"] = purge_ftp_source
elif src == "url":
url = item["url"]
looks_like_url = False
for url_prefix in ["http://", "https://", "ftp://", "ftps://"]:
if url.startswith(url_prefix):
looks_like_url = True
break
if not looks_like_url:
raise RequestParameterInvalidException(
"Invalid URL [%s] found in src definition." % url
)
validate_url(url, trans.app.config.fetch_url_whitelist_ips)
item["in_place"] = run_as_real_user
elif src == "files":
item["in_place"] = run_as_real_user
# Small disagreement with traditional uploads - we purge less by default since whether purging
# happens varies based on upload options in non-obvious ways.
# https://github.com/galaxyproject/galaxy/issues/5361
if "purge_source" not in item:
item["purge_source"] = False
|
def check_src(item):
if "object_id" in item:
raise RequestParameterInvalidException(
"object_id not allowed to appear in the request."
)
# Normalize file:// URLs into paths.
if item["src"] == "url" and item["url"].startswith("file://"):
item["src"] = "path"
item["path"] = item["url"][len("file://") :]
del item["path"]
if "in_place" in item:
raise RequestParameterInvalidException(
"in_place cannot be set in the upload request"
)
src = item["src"]
# Check link_data_only can only be set for certain src types and certain elements_from types.
_handle_invalid_link_data_only_elements_type(item)
if src not in ["path", "server_dir"]:
_handle_invalid_link_data_only_type(item)
elements_from = item.get("elements_from", None)
if elements_from and elements_from not in ELEMENTS_FROM_TYPE:
raise RequestParameterInvalidException(
"Invalid elements_from/items_from found in request"
)
if src == "path" or (src == "url" and item["url"].startswith("file:")):
# Validate is admin, leave alone.
validate_path_upload(trans)
elif src == "server_dir":
# Validate and replace with path definition.
server_dir = item["server_dir"]
full_path, _ = validate_server_directory_upload(trans, server_dir)
item["src"] = "path"
item["path"] = full_path
elif src == "ftp_import":
ftp_path = item["ftp_path"]
full_path = None
# It'd be nice if this can be de-duplicated with what is in parameters/grouping.py.
user_ftp_dir = trans.user_ftp_dir
is_directory = False
assert not os.path.islink(user_ftp_dir), (
"User FTP directory cannot be a symbolic link"
)
for dirpath, dirnames, filenames in os.walk(user_ftp_dir):
for filename in filenames:
if ftp_path == filename:
path = relpath(os.path.join(dirpath, filename), user_ftp_dir)
if not os.path.islink(os.path.join(dirpath, filename)):
full_path = os.path.abspath(os.path.join(user_ftp_dir, path))
break
for dirname in dirnames:
if ftp_path == dirname:
path = relpath(os.path.join(dirpath, dirname), user_ftp_dir)
if not os.path.islink(os.path.join(dirpath, dirname)):
full_path = os.path.abspath(os.path.join(user_ftp_dir, path))
is_directory = True
break
if is_directory:
# If the target is a directory - make sure no files under it are symbolic links
for dirpath, dirnames, filenames in os.walk(full_path):
for filename in filenames:
if ftp_path == filename:
path = relpath(os.path.join(dirpath, filename), full_path)
if not os.path.islink(os.path.join(dirpath, filename)):
full_path = False
break
for dirname in dirnames:
if ftp_path == dirname:
path = relpath(os.path.join(dirpath, filename), full_path)
if not os.path.islink(os.path.join(dirpath, filename)):
full_path = False
break
if not full_path:
raise RequestParameterInvalidException(
"Failed to find referenced ftp_path or symbolic link was enountered"
)
item["src"] = "path"
item["path"] = full_path
item["purge_source"] = purge_ftp_source
elif src == "url":
url = item["url"]
looks_like_url = False
for url_prefix in ["http://", "https://", "ftp://", "ftps://"]:
if url.startswith(url_prefix):
looks_like_url = True
break
if not looks_like_url:
raise RequestParameterInvalidException(
"Invalid URL [%s] found in src definition." % url
)
validate_url(url, trans.app.config.fetch_url_whitelist_ips)
item["in_place"] = run_as_real_user
elif src == "files":
item["in_place"] = run_as_real_user
# Small disagreement with traditional uploads - we purge less by default since whether purging
# happens varies based on upload options in non-obvious ways.
# https://github.com/galaxyproject/galaxy/issues/5361
if "purge_source" not in item:
item["purge_source"] = False
|
https://github.com/galaxyproject/galaxy/issues/8820
|
Traceback (most recent call last):
File "lib/galaxy/web/framework/decorators.py", line 282, in decorator
rval = func(self, trans, *args, **kwargs)
File "lib/galaxy/webapps/galaxy/api/library_contents.py", line 250, in create
status, output = self._upload_library_dataset(trans, library_id, real_folder_id, **payload)
File "lib/galaxy/webapps/galaxy/api/library_contents.py", line 337, in _upload_library_dataset
**kwd)
File "lib/galaxy/actions/library.py", line 108, in _upload_dataset
uploaded_datasets = upload_common.get_uploaded_datasets(trans, cntrller, tool_params, dataset_upload_inputs, library_bunch=library_bunch)
File "lib/galaxy/tools/actions/upload_common.py", line 289, in get_uploaded_datasets
uploaded_datasets.extend(dataset_upload_input.get_uploaded_datasets(trans, params))
File "lib/galaxy/tools/parameters/grouping.py", line 548, in get_uploaded_datasets
writable_files = d_type.writable_files
AttributeError: 'NoneType' object has no attribute 'writable_files'
|
AttributeError
|
def parse_xml(file_name, check_exists=True):
"""Returns a parsed xml tree with comments intact."""
error_message = ""
if check_exists and not os.path.exists(file_name):
return None, "File does not exist %s" % str(file_name)
with open(file_name, "r") as fobj:
try:
tree = XmlET.parse(
fobj, parser=XmlET.XMLParser(target=CommentedTreeBuilder())
)
except Exception as e:
error_message = "Exception attempting to parse %s: %s" % (
str(file_name),
str(e),
)
log.exception(error_message)
return None, error_message
return tree, error_message
|
def parse_xml(file_name, check_exists=True):
"""Returns a parsed xml tree with comments intact."""
error_message = ""
if check_exists and not os.path.exists(file_name):
return None, "File does not exist %s" % str(file_name)
with open(file_name, "r") as fobj:
try:
tree = XmlET.parse(
fobj, parser=XmlET.XMLParser(target=Py27CommentedTreeBuilder())
)
except Exception as e:
error_message = "Exception attempting to parse %s: %s" % (
str(file_name),
str(e),
)
log.exception(error_message)
return None, error_message
return tree, error_message
|
https://github.com/galaxyproject/galaxy/issues/8641
|
tool_shed.util.xml_util ERROR 2019-09-12 19:47:44,334 [p:54106,w:1,m:0] [uWSGIWorker1Core1] Exception attempting to parse GLXFOLDER/tool-data/TOOLSHED/repos/USER/REPO/REVISION/tool_data_table_conf.xml: multiple elements on top level
Traceback (most recent call last):
File "lib/tool_shed/util/xml_util.py", line 87, in parse_xml
tree = XmlET.parse(fobj, parser=XmlET.XMLParser(target=Py27CommentedTreeBuilder()))
File "PYTHON3.7/lib/python3.7/xml/etree/ElementTree.py", line 1197, in parse
tree.parse(source, parser)
File "PYTHON3.7/lib/python3.7/xml/etree/ElementTree.py", line 604, in parse
parser.feed(data)
File "<string>", line None
xml.etree.ElementTree.ParseError: multiple elements on top level
|
xml.etree.ElementTree.ParseError
|
def comment(self, data):
if self.non_comment_seen:
# Cannot start XML file with comment
self.start(XmlET.Comment, {})
self.data(data)
self.end(XmlET.Comment)
|
def comment(self, data):
self.start(XmlET.Comment, {})
self.data(data)
self.end(XmlET.Comment)
|
https://github.com/galaxyproject/galaxy/issues/8641
|
tool_shed.util.xml_util ERROR 2019-09-12 19:47:44,334 [p:54106,w:1,m:0] [uWSGIWorker1Core1] Exception attempting to parse GLXFOLDER/tool-data/TOOLSHED/repos/USER/REPO/REVISION/tool_data_table_conf.xml: multiple elements on top level
Traceback (most recent call last):
File "lib/tool_shed/util/xml_util.py", line 87, in parse_xml
tree = XmlET.parse(fobj, parser=XmlET.XMLParser(target=Py27CommentedTreeBuilder()))
File "PYTHON3.7/lib/python3.7/xml/etree/ElementTree.py", line 1197, in parse
tree.parse(source, parser)
File "PYTHON3.7/lib/python3.7/xml/etree/ElementTree.py", line 604, in parse
parser.feed(data)
File "<string>", line None
xml.etree.ElementTree.ParseError: multiple elements on top level
|
xml.etree.ElementTree.ParseError
|
def install_data_managers(
self,
shed_data_manager_conf_filename,
metadata_dict,
shed_config_dict,
relative_install_dir,
repository,
repository_tools_tups,
):
rval = []
if "data_manager" in metadata_dict:
tpm = tool_panel_manager.ToolPanelManager(self.app)
repository_tools_by_guid = {}
for tool_tup in repository_tools_tups:
repository_tools_by_guid[tool_tup[1]] = dict(
tool_config_filename=tool_tup[0], tool=tool_tup[2]
)
# Load existing data managers.
try:
tree, error_message = xml_util.parse_xml(
shed_data_manager_conf_filename, check_exists=False
)
except (OSError, IOError) as exc:
if exc.errno == errno.ENOENT:
with open(shed_data_manager_conf_filename, "w") as fh:
fh.write(SHED_DATA_MANAGER_CONF_XML)
tree, error_message = xml_util.parse_xml(
shed_data_manager_conf_filename
)
else:
raise
if tree is None:
return rval
config_elems = [elem for elem in tree.getroot()]
repo_data_manager_conf_filename = metadata_dict["data_manager"].get(
"config_filename", None
)
if repo_data_manager_conf_filename is None:
log.debug("No data_manager_conf.xml file has been defined.")
return rval
data_manager_config_has_changes = False
relative_repo_data_manager_dir = os.path.join(
shed_config_dict.get("tool_path", ""), relative_install_dir
)
repo_data_manager_conf_filename = os.path.join(
relative_repo_data_manager_dir, repo_data_manager_conf_filename
)
tree, error_message = xml_util.parse_xml(repo_data_manager_conf_filename)
if tree is None:
return rval
root = tree.getroot()
for elem in root:
if elem.tag == "data_manager":
data_manager_id = elem.get("id", None)
if data_manager_id is None:
log.error(
"A data manager was defined that does not have an id and will not be installed:\n%s"
% xml_to_string(elem)
)
continue
data_manager_dict = (
metadata_dict["data_manager"]
.get("data_managers", {})
.get(data_manager_id, None)
)
if data_manager_dict is None:
log.error(
"Data manager metadata is not defined properly for '%s'."
% (data_manager_id)
)
continue
guid = data_manager_dict.get("guid", None)
if guid is None:
log.error(
"Data manager guid '%s' is not set in metadata for '%s'."
% (guid, data_manager_id)
)
continue
elem.set("guid", guid)
tool_guid = data_manager_dict.get("tool_guid", None)
if tool_guid is None:
log.error(
"Data manager tool guid '%s' is not set in metadata for '%s'."
% (tool_guid, data_manager_id)
)
continue
tool_dict = repository_tools_by_guid.get(tool_guid, None)
if tool_dict is None:
log.error(
"Data manager tool guid '%s' could not be found for '%s'. Perhaps the tool is invalid?"
% (tool_guid, data_manager_id)
)
continue
tool = tool_dict.get("tool", None)
if tool is None:
log.error(
"Data manager tool with guid '%s' could not be found for '%s'. Perhaps the tool is invalid?"
% (tool_guid, data_manager_id)
)
continue
tool_config_filename = tool_dict.get("tool_config_filename", None)
if tool_config_filename is None:
log.error(
"Data manager metadata is missing 'tool_config_file' for '%s'."
% (data_manager_id)
)
continue
elem.set("shed_conf_file", shed_config_dict["config_filename"])
if elem.get("tool_file", None) is not None:
del elem.attrib["tool_file"] # remove old tool_file info
tool_elem = tpm.generate_tool_elem(
repository.tool_shed,
repository.name,
repository.installed_changeset_revision,
repository.owner,
tool_config_filename,
tool,
None,
)
elem.insert(0, tool_elem)
data_manager = self.app.data_managers.load_manager_from_elem(
elem, tool_path=shed_config_dict.get("tool_path", "")
)
if data_manager:
rval.append(data_manager)
elif elem.tag is ElementTree.Comment:
pass
else:
log.warning(
"Encountered unexpected element '%s':\n%s"
% (elem.tag, xml_to_string(elem))
)
config_elems.append(elem)
data_manager_config_has_changes = True
# Persist the altered shed_data_manager_config file.
if data_manager_config_has_changes:
reload_count = self.app.data_managers._reload_count
self.data_manager_config_elems_to_xml_file(
config_elems, shed_data_manager_conf_filename
)
while self.app.data_managers._reload_count <= reload_count:
time.sleep(
0.1
) # Wait for shed_data_manager watcher thread to pick up changes
return rval
|
def install_data_managers(
self,
shed_data_manager_conf_filename,
metadata_dict,
shed_config_dict,
relative_install_dir,
repository,
repository_tools_tups,
):
rval = []
if "data_manager" in metadata_dict:
tpm = tool_panel_manager.ToolPanelManager(self.app)
repository_tools_by_guid = {}
for tool_tup in repository_tools_tups:
repository_tools_by_guid[tool_tup[1]] = dict(
tool_config_filename=tool_tup[0], tool=tool_tup[2]
)
# Load existing data managers.
try:
tree, error_message = xml_util.parse_xml(
shed_data_manager_conf_filename, check_exists=False
)
except (OSError, IOError) as exc:
if exc.errno == errno.ENOENT:
with open(shed_data_manager_conf_filename, "w") as fh:
fh.write(SHED_DATA_MANAGER_CONF_XML)
tree, error_message = xml_util.parse_xml(
shed_data_manager_conf_filename
)
else:
raise
if tree is None:
return rval
config_elems = [elem for elem in tree.getroot()]
repo_data_manager_conf_filename = metadata_dict["data_manager"].get(
"config_filename", None
)
if repo_data_manager_conf_filename is None:
log.debug("No data_manager_conf.xml file has been defined.")
return rval
data_manager_config_has_changes = False
relative_repo_data_manager_dir = os.path.join(
shed_config_dict.get("tool_path", ""), relative_install_dir
)
repo_data_manager_conf_filename = os.path.join(
relative_repo_data_manager_dir, repo_data_manager_conf_filename
)
tree, error_message = xml_util.parse_xml(repo_data_manager_conf_filename)
if tree is None:
return rval
root = tree.getroot()
for elem in root:
if elem.tag == "data_manager":
data_manager_id = elem.get("id", None)
if data_manager_id is None:
log.error(
"A data manager was defined that does not have an id and will not be installed:\n%s"
% xml_to_string(elem)
)
continue
data_manager_dict = (
metadata_dict["data_manager"]
.get("data_managers", {})
.get(data_manager_id, None)
)
if data_manager_dict is None:
log.error(
"Data manager metadata is not defined properly for '%s'."
% (data_manager_id)
)
continue
guid = data_manager_dict.get("guid", None)
if guid is None:
log.error(
"Data manager guid '%s' is not set in metadata for '%s'."
% (guid, data_manager_id)
)
continue
elem.set("guid", guid)
tool_guid = data_manager_dict.get("tool_guid", None)
if tool_guid is None:
log.error(
"Data manager tool guid '%s' is not set in metadata for '%s'."
% (tool_guid, data_manager_id)
)
continue
tool_dict = repository_tools_by_guid.get(tool_guid, None)
if tool_dict is None:
log.error(
"Data manager tool guid '%s' could not be found for '%s'. Perhaps the tool is invalid?"
% (tool_guid, data_manager_id)
)
continue
tool = tool_dict.get("tool", None)
if tool is None:
log.error(
"Data manager tool with guid '%s' could not be found for '%s'. Perhaps the tool is invalid?"
% (tool_guid, data_manager_id)
)
continue
tool_config_filename = tool_dict.get("tool_config_filename", None)
if tool_config_filename is None:
log.error(
"Data manager metadata is missing 'tool_config_file' for '%s'."
% (data_manager_id)
)
continue
elem.set("shed_conf_file", shed_config_dict["config_filename"])
if elem.get("tool_file", None) is not None:
del elem.attrib["tool_file"] # remove old tool_file info
tool_elem = tpm.generate_tool_elem(
repository.tool_shed,
repository.name,
repository.installed_changeset_revision,
repository.owner,
tool_config_filename,
tool,
None,
)
elem.insert(0, tool_elem)
data_manager = self.app.data_managers.load_manager_from_elem(
elem, tool_path=shed_config_dict.get("tool_path", "")
)
if data_manager:
rval.append(data_manager)
else:
log.warning(
"Encountered unexpected element '%s':\n%s"
% (elem.tag, xml_to_string(elem))
)
config_elems.append(elem)
data_manager_config_has_changes = True
# Persist the altered shed_data_manager_config file.
if data_manager_config_has_changes:
reload_count = self.app.data_managers._reload_count
self.data_manager_config_elems_to_xml_file(
config_elems, shed_data_manager_conf_filename
)
while self.app.data_managers._reload_count <= reload_count:
time.sleep(
0.1
) # Wait for shed_data_manager watcher thread to pick up changes
return rval
|
https://github.com/galaxyproject/galaxy/issues/8641
|
tool_shed.util.xml_util ERROR 2019-09-12 19:47:44,334 [p:54106,w:1,m:0] [uWSGIWorker1Core1] Exception attempting to parse GLXFOLDER/tool-data/TOOLSHED/repos/USER/REPO/REVISION/tool_data_table_conf.xml: multiple elements on top level
Traceback (most recent call last):
File "lib/tool_shed/util/xml_util.py", line 87, in parse_xml
tree = XmlET.parse(fobj, parser=XmlET.XMLParser(target=Py27CommentedTreeBuilder()))
File "PYTHON3.7/lib/python3.7/xml/etree/ElementTree.py", line 1197, in parse
tree.parse(source, parser)
File "PYTHON3.7/lib/python3.7/xml/etree/ElementTree.py", line 604, in parse
parser.feed(data)
File "<string>", line None
xml.etree.ElementTree.ParseError: multiple elements on top level
|
xml.etree.ElementTree.ParseError
|
def _xml_replace(query, targets, parent_map):
# parent_el = query.find('..') ## Something like this would be better with newer xml library
parent_el = parent_map[query]
matching_index = -1
# for index, el in enumerate(parent_el.iter('.')): ## Something like this for newer implementation
for index, el in enumerate(list(parent_el)):
if el == query:
matching_index = index
break
assert matching_index >= 0
current_index = matching_index
for target in targets:
current_index += 1
parent_el.insert(current_index, deepcopy(target))
parent_el.remove(query)
|
def _xml_replace(query, targets, parent_map):
# parent_el = query.find('..') ## Something like this would be better with newer xml library
parent_el = parent_map[query]
matching_index = -1
# for index, el in enumerate(parent_el.iter('.')): ## Something like this for newer implementation
for index, el in enumerate(list(parent_el)):
if el == query:
matching_index = index
break
assert matching_index >= 0
current_index = matching_index
for target in targets:
current_index += 1
parent_el.insert(current_index, target)
parent_el.remove(query)
|
https://github.com/galaxyproject/galaxy/issues/8805
|
galaxy.tools.toolbox.base ERROR 2019-10-15 10:55:57,876 Failed to load potential tool /Users/scholtalbers/workspace/je-suite/src/galaxy/je-clip.xml.
Traceback (most recent call last):
File "/private/var/folders/cf/k7c5rjhj1sggk6x8z1jnw4b80000gp/T/tmpFw8KvW/galaxy-dev/lib/galaxy/tools/toolbox/base.py", line 772, in quick_load
tool = self.load_tool(tool_file)
File "/private/var/folders/cf/k7c5rjhj1sggk6x8z1jnw4b80000gp/T/tmpFw8KvW/galaxy-dev/lib/galaxy/tools/toolbox/base.py", line 808, in load_tool
tool = self.create_tool(config_file=config_file, tool_shed_repository=tool_shed_repository, guid=guid, **kwds)
File "/private/var/folders/cf/k7c5rjhj1sggk6x8z1jnw4b80000gp/T/tmpFw8KvW/galaxy-dev/lib/galaxy/tools/__init__.py", line 274, in create_tool
raise e
AssertionError
galaxy.tools.toolbox.base ERROR 2019-10-15 10:55:57,887 Failed to load potential tool /Users/scholtalbers/workspace/je-suite/src/galaxy/je-demultiplex.xml.
Traceback (most recent call last):
File "/private/var/folders/cf/k7c5rjhj1sggk6x8z1jnw4b80000gp/T/tmpFw8KvW/galaxy-dev/lib/galaxy/tools/toolbox/base.py", line 772, in quick_load
tool = self.load_tool(tool_file)
File "/private/var/folders/cf/k7c5rjhj1sggk6x8z1jnw4b80000gp/T/tmpFw8KvW/galaxy-dev/lib/galaxy/tools/toolbox/base.py", line 808, in load_tool
tool = self.create_tool(config_file=config_file, tool_shed_repository=tool_shed_repository, guid=guid, **kwds)
File "/private/var/folders/cf/k7c5rjhj1sggk6x8z1jnw4b80000gp/T/tmpFw8KvW/galaxy-dev/lib/galaxy/tools/__init__.py", line 274, in create_tool
raise e
AssertionError
|
AssertionError
|
def _expand_yield_statements(macro_def, expand_el):
yield_els = [
yield_el
for macro_def_el in macro_def
for yield_el in macro_def_el.findall(".//yield")
]
expand_el_children = list(expand_el)
macro_def_parent_map = dict(
(c, p) for macro_def_el in macro_def for p in macro_def_el.iter() for c in p
)
for yield_el in yield_els:
_xml_replace(yield_el, expand_el_children, macro_def_parent_map)
# Replace yields at the top level of a macro, seems hacky approach
replace_yield = True
while replace_yield:
for i, macro_def_el in enumerate(macro_def):
if macro_def_el.tag == "yield":
for target in expand_el_children:
i += 1
macro_def.insert(i, deepcopy(target))
macro_def.remove(macro_def_el)
continue
replace_yield = False
|
def _expand_yield_statements(macro_def, expand_el):
yield_els = [
yield_el
for macro_def_el in macro_def
for yield_el in macro_def_el.findall(".//yield")
]
expand_el_children = list(expand_el)
macro_def_parent_map = dict(
(c, p) for macro_def_el in macro_def for p in macro_def_el.iter() for c in p
)
for yield_el in yield_els:
_xml_replace(yield_el, expand_el_children, macro_def_parent_map)
# Replace yields at the top level of a macro, seems hacky approach
replace_yield = True
while replace_yield:
for i, macro_def_el in enumerate(macro_def):
if macro_def_el.tag == "yield":
for target in expand_el_children:
i += 1
macro_def.insert(i, target)
macro_def.remove(macro_def_el)
continue
replace_yield = False
|
https://github.com/galaxyproject/galaxy/issues/8805
|
galaxy.tools.toolbox.base ERROR 2019-10-15 10:55:57,876 Failed to load potential tool /Users/scholtalbers/workspace/je-suite/src/galaxy/je-clip.xml.
Traceback (most recent call last):
File "/private/var/folders/cf/k7c5rjhj1sggk6x8z1jnw4b80000gp/T/tmpFw8KvW/galaxy-dev/lib/galaxy/tools/toolbox/base.py", line 772, in quick_load
tool = self.load_tool(tool_file)
File "/private/var/folders/cf/k7c5rjhj1sggk6x8z1jnw4b80000gp/T/tmpFw8KvW/galaxy-dev/lib/galaxy/tools/toolbox/base.py", line 808, in load_tool
tool = self.create_tool(config_file=config_file, tool_shed_repository=tool_shed_repository, guid=guid, **kwds)
File "/private/var/folders/cf/k7c5rjhj1sggk6x8z1jnw4b80000gp/T/tmpFw8KvW/galaxy-dev/lib/galaxy/tools/__init__.py", line 274, in create_tool
raise e
AssertionError
galaxy.tools.toolbox.base ERROR 2019-10-15 10:55:57,887 Failed to load potential tool /Users/scholtalbers/workspace/je-suite/src/galaxy/je-demultiplex.xml.
Traceback (most recent call last):
File "/private/var/folders/cf/k7c5rjhj1sggk6x8z1jnw4b80000gp/T/tmpFw8KvW/galaxy-dev/lib/galaxy/tools/toolbox/base.py", line 772, in quick_load
tool = self.load_tool(tool_file)
File "/private/var/folders/cf/k7c5rjhj1sggk6x8z1jnw4b80000gp/T/tmpFw8KvW/galaxy-dev/lib/galaxy/tools/toolbox/base.py", line 808, in load_tool
tool = self.create_tool(config_file=config_file, tool_shed_repository=tool_shed_repository, guid=guid, **kwds)
File "/private/var/folders/cf/k7c5rjhj1sggk6x8z1jnw4b80000gp/T/tmpFw8KvW/galaxy-dev/lib/galaxy/tools/__init__.py", line 274, in create_tool
raise e
AssertionError
|
AssertionError
|
def __determine_job_destination(self, params, raw_job_destination=None):
if self.job_wrapper.tool is None:
raise JobMappingException(
"Can't map job to destination, tool '%s' is unavailable"
% self.job_wrapper.get_job().tool_id
)
if raw_job_destination is None:
raw_job_destination = self.job_wrapper.tool.get_job_destination(params)
if raw_job_destination.runner == DYNAMIC_RUNNER_NAME:
job_destination = self.__handle_dynamic_job_destination(raw_job_destination)
log.debug(
"(%s) Mapped job to destination id: %s",
self.job_wrapper.job_id,
job_destination.id,
)
# Recursively handle chained dynamic destinations
if job_destination.runner == DYNAMIC_RUNNER_NAME:
return self.__determine_job_destination(
params, raw_job_destination=job_destination
)
else:
job_destination = raw_job_destination
log.debug(
"(%s) Mapped job to destination id: %s",
self.job_wrapper.job_id,
job_destination.id,
)
return job_destination
|
def __determine_job_destination(self, params, raw_job_destination=None):
if raw_job_destination is None:
raw_job_destination = self.job_wrapper.tool.get_job_destination(params)
if raw_job_destination.runner == DYNAMIC_RUNNER_NAME:
job_destination = self.__handle_dynamic_job_destination(raw_job_destination)
log.debug(
"(%s) Mapped job to destination id: %s",
self.job_wrapper.job_id,
job_destination.id,
)
# Recursively handle chained dynamic destinations
if job_destination.runner == DYNAMIC_RUNNER_NAME:
return self.__determine_job_destination(
params, raw_job_destination=job_destination
)
else:
job_destination = raw_job_destination
log.debug(
"(%s) Mapped job to destination id: %s",
self.job_wrapper.job_id,
job_destination.id,
)
return job_destination
|
https://github.com/galaxyproject/galaxy/issues/8141
|
Traceback (most recent call last):
File "/opt/galaxy/server/lib/galaxy/jobs/handler.py", line 374, in __handle_waiting_jobs
job_state = self.__check_job_state(job)
File "/opt/galaxy/server/lib/galaxy/jobs/handler.py", line 510, in __check_job_state
state, job_destination = self.__verify_job_ready(job, job_wrapper)
File "/opt/galaxy/server/lib/galaxy/jobs/handler.py", line 534, in __verify_job_ready
job_wrapper.fail(e)
File "/opt/galaxy/server/lib/galaxy/jobs/__init__.py", line 1002, in fail
self.job_destination
File "/opt/galaxy/server/lib/galaxy/jobs/__init__.py", line 806, in job_destination
return self.job_runner_mapper.get_job_destination(self.params)
File "/opt/galaxy/server/lib/galaxy/jobs/mapper.py", line 235, in get_job_destination
return self.__cache_job_destination(params)
File "/opt/galaxy/server/lib/galaxy/jobs/mapper.py", line 225, in __cache_job_destination
params, raw_job_destination=raw_job_destination)
File "/opt/galaxy/server/lib/galaxy/jobs/mapper.py", line 211, in __determine_job_destination
raw_job_destination = self.job_wrapper.tool.get_job_destination(params)
AttributeError: 'NoneType' object has no attribute 'get_job_destination'
|
AttributeError
|
def __init__(self, **kwd):
Binary.__init__(self, **kwd)
"""The header file. Provides information about dimensions, identification, and processing history."""
self.add_composite_file(
"hdr", description="The Analyze75 header file.", is_binary=True
)
"""The image file. Image data, whose data type and ordering are described by the header file."""
self.add_composite_file(
"img", description="The Analyze75 image file.", is_binary=True
)
"""The optional t2m file."""
self.add_composite_file(
"t2m", description="The Analyze75 t2m file.", optional=True, is_binary=True
)
|
def __init__(self, **kwd):
Binary.__init__(self, **kwd)
"""The header file. Provides information about dimensions, identification, and processing history."""
self.add_composite_file(
"hdr", description="The Analyze75 header file.", is_binary=False
)
"""The image file. Image data, whose data type and ordering are described by the header file."""
self.add_composite_file(
"img", description="The Analyze75 image file.", is_binary=True
)
"""The optional t2m file."""
self.add_composite_file(
"t2m", description="The Analyze75 t2m file.", optional="True", is_binary=True
)
|
https://github.com/galaxyproject/galaxy/issues/7412
|
Fatal error: Exit code 1 ()
Traceback (most recent call last):
File "/cvmfs/main.galaxyproject.org/galaxy/tools/data_source/upload.py", line 329, in <module>
__main__()
File "/cvmfs/main.galaxyproject.org/galaxy/tools/data_source/upload.py", line 320, in __main__
metadata.append(add_composite_file(dataset, registry, output_path, files_path))
File "/cvmfs/main.galaxyproject.org/galaxy/tools/data_source/upload.py", line 243, in add_composite_file
stage_file(name, composite_file_path, value.is_binary)
File "/cvmfs/main.galaxyproject.org/galaxy/tools/data_source/upload.py", line 223, in stage_file
sniff.convert_newlines(dp, tmp_dir=tmpdir, tmp_prefix=tmp_prefix)
File "/cvmfs/main.galaxyproject.org/galaxy/lib/galaxy/datatypes/sniff.py", line 122, in convert_newlines
for i, line in enumerate(io.open(fname, mode="U", encoding='utf-8')):
File "/cvmfs/main.galaxyproject.org/venv/lib/python2.7/codecs.py", line 314, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf8' codec can't decode byte 0x80 in position 0: invalid start byte
|
UnicodeDecodeError
|
def add_composite_file(dataset, registry, output_path, files_path):
datatype = None
# Find data type
if dataset.file_type is not None:
datatype = registry.get_datatype_by_extension(dataset.file_type)
def to_path(path_or_url):
is_url = path_or_url.find("://") != -1 # todo fixme
if is_url:
try:
temp_name = sniff.stream_to_file(
urlopen(path_or_url), prefix="url_paste"
)
except Exception as e:
raise UploadProblemException(
"Unable to fetch %s\n%s" % (path_or_url, str(e))
)
return temp_name, is_url
return path_or_url, is_url
def make_files_path():
safe_makedirs(files_path)
def stage_file(name, composite_file_path, is_binary=False):
dp = composite_file_path["path"]
path, is_url = to_path(dp)
if is_url:
dataset.path = path
dp = path
auto_decompress = composite_file_path.get("auto_decompress", True)
if (
auto_decompress
and not datatype.composite_type
and CompressedFile.can_decompress(dp)
):
# It isn't an explictly composite datatype, so these are just extra files to attach
# as composite data. It'd be better if Galaxy was communicating this to the tool
# a little more explicitly so we didn't need to dispatch on the datatype and so we
# could attach arbitrary extra composite data to an existing composite datatype if
# if need be? Perhaps that would be a mistake though.
CompressedFile(dp).extract(files_path)
else:
if not is_binary:
tmpdir = output_adjacent_tmpdir(output_path)
tmp_prefix = "data_id_%s_convert_" % dataset.dataset_id
if composite_file_path.get("space_to_tab"):
sniff.convert_newlines_sep2tabs(
dp, tmp_dir=tmpdir, tmp_prefix=tmp_prefix
)
else:
sniff.convert_newlines(dp, tmp_dir=tmpdir, tmp_prefix=tmp_prefix)
file_output_path = os.path.join(files_path, name)
shutil.move(dp, file_output_path)
# groom the dataset file content if required by the corresponding datatype definition
if datatype.dataset_content_needs_grooming(file_output_path):
datatype.groom_dataset_content(file_output_path)
# Do we have pre-defined composite files from the datatype definition.
if dataset.composite_files:
make_files_path()
for name, value in dataset.composite_files.items():
value = bunch.Bunch(**value)
if value.name not in dataset.composite_file_paths:
raise UploadProblemException(
"Failed to find file_path %s in %s"
% (value.name, dataset.composite_file_paths)
)
if dataset.composite_file_paths[value.name] is None and not value.optional:
raise UploadProblemException(
"A required composite data file was not provided (%s)" % name
)
elif dataset.composite_file_paths[value.name] is not None:
composite_file_path = dataset.composite_file_paths[value.name]
stage_file(name, composite_file_path, value.is_binary)
# Do we have ad-hoc user supplied composite files.
elif dataset.composite_file_paths:
make_files_path()
for key, composite_file in dataset.composite_file_paths.items():
stage_file(key, composite_file) # TODO: replace these defaults
# Move the dataset to its "real" path
primary_file_path, _ = to_path(dataset.primary_file)
shutil.move(primary_file_path, output_path)
# Write the job info
return dict(
type="dataset",
dataset_id=dataset.dataset_id,
stdout="uploaded %s file" % dataset.file_type,
)
|
def add_composite_file(dataset, registry, output_path, files_path):
datatype = None
# Find data type
if dataset.file_type is not None:
try:
datatype = registry.get_datatype_by_extension(dataset.file_type)
except Exception:
print(
"Unable to instantiate the datatype object for the file type '%s'"
% dataset.file_type
)
def to_path(path_or_url):
is_url = path_or_url.find("://") != -1 # todo fixme
if is_url:
try:
temp_name = sniff.stream_to_file(
urlopen(path_or_url), prefix="url_paste"
)
except Exception as e:
raise UploadProblemException(
"Unable to fetch %s\n%s" % (path_or_url, str(e))
)
return temp_name, is_url
return path_or_url, is_url
def make_files_path():
safe_makedirs(files_path)
def stage_file(name, composite_file_path, is_binary=False):
dp = composite_file_path["path"]
path, is_url = to_path(dp)
if is_url:
dataset.path = path
dp = path
auto_decompress = composite_file_path.get("auto_decompress", True)
if (
auto_decompress
and not datatype.composite_type
and CompressedFile.can_decompress(dp)
):
# It isn't an explictly composite datatype, so these are just extra files to attach
# as composite data. It'd be better if Galaxy was communicating this to the tool
# a little more explicitly so we didn't need to dispatch on the datatype and so we
# could attach arbitrary extra composite data to an existing composite datatype if
# if need be? Perhaps that would be a mistake though.
CompressedFile(dp).extract(files_path)
else:
if not is_binary:
tmpdir = output_adjacent_tmpdir(output_path)
tmp_prefix = "data_id_%s_convert_" % dataset.dataset_id
if composite_file_path.get("space_to_tab"):
sniff.convert_newlines_sep2tabs(
dp, tmp_dir=tmpdir, tmp_prefix=tmp_prefix
)
else:
sniff.convert_newlines(dp, tmp_dir=tmpdir, tmp_prefix=tmp_prefix)
file_output_path = os.path.join(files_path, name)
shutil.move(dp, file_output_path)
# groom the dataset file content if required by the corresponding datatype definition
if datatype.dataset_content_needs_grooming(file_output_path):
datatype.groom_dataset_content(file_output_path)
# Do we have pre-defined composite files from the datatype definition.
if dataset.composite_files:
make_files_path()
for name, value in dataset.composite_files.items():
value = bunch.Bunch(**value)
if value.name not in dataset.composite_file_paths:
raise UploadProblemException(
"Failed to find file_path %s in %s"
% (value.name, dataset.composite_file_paths)
)
if dataset.composite_file_paths[value.name] is None and not value.optional:
raise UploadProblemException(
"A required composite data file was not provided (%s)" % name
)
elif dataset.composite_file_paths[value.name] is not None:
composite_file_path = dataset.composite_file_paths[value.name]
stage_file(name, composite_file_path, value.is_binary)
# Do we have ad-hoc user supplied composite files.
elif dataset.composite_file_paths:
make_files_path()
for key, composite_file in dataset.composite_file_paths.items():
stage_file(key, composite_file) # TODO: replace these defaults
# Move the dataset to its "real" path
primary_file_path, _ = to_path(dataset.primary_file)
shutil.move(primary_file_path, output_path)
# Write the job info
return dict(
type="dataset",
dataset_id=dataset.dataset_id,
stdout="uploaded %s file" % dataset.file_type,
)
|
https://github.com/galaxyproject/galaxy/issues/7412
|
Fatal error: Exit code 1 ()
Traceback (most recent call last):
File "/cvmfs/main.galaxyproject.org/galaxy/tools/data_source/upload.py", line 329, in <module>
__main__()
File "/cvmfs/main.galaxyproject.org/galaxy/tools/data_source/upload.py", line 320, in __main__
metadata.append(add_composite_file(dataset, registry, output_path, files_path))
File "/cvmfs/main.galaxyproject.org/galaxy/tools/data_source/upload.py", line 243, in add_composite_file
stage_file(name, composite_file_path, value.is_binary)
File "/cvmfs/main.galaxyproject.org/galaxy/tools/data_source/upload.py", line 223, in stage_file
sniff.convert_newlines(dp, tmp_dir=tmpdir, tmp_prefix=tmp_prefix)
File "/cvmfs/main.galaxyproject.org/galaxy/lib/galaxy/datatypes/sniff.py", line 122, in convert_newlines
for i, line in enumerate(io.open(fname, mode="U", encoding='utf-8')):
File "/cvmfs/main.galaxyproject.org/venv/lib/python2.7/codecs.py", line 314, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf8' codec can't decode byte 0x80 in position 0: invalid start byte
|
UnicodeDecodeError
|
def tool_shed_repositories(self):
try:
repositories = self.cache.repositories
except AttributeError:
self.rebuild()
repositories = self.cache.repositories
tool_shed_repositories = [
repo
for repo in repositories
if isinstance(repo, self.app.install_model.ToolShedRepository)
]
if tool_shed_repositories and inspect(tool_shed_repositories[0]).detached:
self.rebuild()
repositories = self.cache.repositories
return repositories
|
def tool_shed_repositories(self):
try:
repositories = self.cache.repositories
except AttributeError:
self.rebuild()
repositories = self.cache.repositories
if repositories and not repositories[0]._sa_instance_state._attached:
self.rebuild()
repositories = self.cache.repositories
return repositories
|
https://github.com/galaxyproject/galaxy/issues/8015
|
galaxy.tools.toolbox.base ERROR 2019-05-21 18:57:13,924 [p:1,w:0,m:0] [MainThread] Error reading tool from path: toolshed.g2.bx.psu.edu/repos/devteam/sam_pileup/890d97772e2a/sam_pileup/sam_pileup.xml
Traceback (most recent call last):
File "/galaxy/server/lib/galaxy/tools/toolbox/base.py", line 605, in _load_tool_tag_set
tool_shed_repository = self.get_tool_repository_from_xml_item(item, path)
File "/galaxy/server/lib/galaxy/tools/toolbox/base.py", line 652, in get_tool_repository_from_xml_item
installed_changeset_revision=installed_changeset_revision)
File "/galaxy/server/lib/galaxy/tools/__init__.py", line 339, in _get_tool_shed_repository
installed_changeset_revision=installed_changeset_revision
File "/galaxy/server/lib/tool_shed/util/repository_util.py", line 374, in get_installed_repository
repository_id=repository_id)
File "/galaxy/server/lib/galaxy/tools/cache.py", line 165, in get_installed_repository
repository_id=repository_id)
File "/galaxy/server/lib/galaxy/tools/cache.py", line 182, in _get_installed_repository
repos = [repo for repo in self.tool_shed_repositories if repo.tool_shed == tool_shed and repo.owner == owner and repo.name == name]
File "/galaxy/server/lib/galaxy/tools/cache.py", line 150, in tool_shed_repositories
if repositories and not repositories[0]._sa_instance_state._attached:
AttributeError: 'ToolConfRepository' object has no attribute '_sa_instance_state'
|
AttributeError
|
def get_file_name(self):
if not self.external_filename:
assert self.object_store is not None, (
"Object Store has not been initialized for dataset %s" % self.id
)
if self.object_store.exists(self):
return self.object_store.get_filename(self)
else:
return ""
else:
filename = self.external_filename
# Make filename absolute
return os.path.abspath(filename)
|
def get_file_name(self):
if not self.external_filename:
assert self.object_store is not None, (
"Object Store has not been initialized for dataset %s" % self.id
)
filename = self.object_store.get_filename(self)
return filename
else:
filename = self.external_filename
# Make filename absolute
return os.path.abspath(filename)
|
https://github.com/galaxyproject/galaxy/issues/7368
|
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,638 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150481
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,639 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150482
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,640 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150483
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,641 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150484
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,642 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150485
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,642 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150486
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,643 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150487
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,644 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150488
galaxy.workflow.scheduling_manager INFO 2019-02-13 14:22:26,645 [p:336,w:1,m:0] [uWSGIWorker1Core3] Queueing workflow invocation for handler [main.web.1]
98.236.80.203 - - [13/Feb/2019:14:22:26 -0500] "POST /api/workflows/f293ecb6146177b0/invocations HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/workflows/run?id=f293ecb6146177b0" "Mozilla/5.0 (Macintosh; Intel Mac
OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36"
[pid: 336|app: 0|req: 9300/93021] 127.0.0.1 () {56 vars in 1491 bytes} [Wed Feb 13 14:22:26 2019] POST /api/workflows/f293ecb6146177b0/invocations => generated 243 bytes in 458 msecs (HTTP/1.1 200) 3 headers in
124 bytes (1 switches on core 3)
98.236.80.203 - - [13/Feb/2019:14:22:26 -0500] "GET /api/webhooks HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/workflows/run?id=f293ecb6146177b0" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.
36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36"
[pid: 343|app: 0|req: 9298/93022] 127.0.0.1 () {50 vars in 1334 bytes} [Wed Feb 13 14:22:26 2019] GET /api/webhooks => generated 2 bytes in 20 msecs (HTTP/1.1 200) 3 headers in 124 bytes (1 switches on core 2)
98.236.80.203 - - [13/Feb/2019:14:22:26 -0500] "GET /api/histories/e8e7e8330bf2145b/contents?details=a2cb428649a1c4a2&order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&
qv=False HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/workflows/run?id=f293ecb6146177b0" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36"
[pid: 434|app: 0|req: 9304/93023] 127.0.0.1 () {50 vars in 1641 bytes} [Wed Feb 13 14:22:26 2019] GET /api/histories/e8e7e8330bf2145b/contents?details=a2cb428649a1c4a2&order=hid&v=dev&q=update_time-ge&q=deleted&
q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&qv=False => generated 2 bytes in 66 msecs (HTTP/1.1 200) 3 headers in 124 bytes (1 switches on core 3)
galaxy.workflow.run DEBUG 2019-02-13 14:22:27,590 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 150481 of invocation 17854 invoked (29.118 ms)
galaxy.workflow.run DEBUG 2019-02-13 14:22:27,610 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 150482 of invocation 17854 invoked (19.367 ms)
galaxy.managers.collections DEBUG 2019-02-13 14:22:27,672 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Created collection with 2 elements
/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:226: SAWarning: Unicode type received non-unicode bind param value 'output'. (this warning may be suppr
essed after 10 occurrences)
(util.ellipses_string(value),))
/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:226: SAWarning: Unicode type received non-unicode bind param value 'input'. (this warning may be suppre
ssed after 10 occurrences)
(util.ellipses_string(value),))
galaxy.tools.actions INFO 2019-02-13 14:22:27,980 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Handled output named output for tool toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0
.9992.0 (73.258 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:27,995 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Added output datasets to history (12.906 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:27,998 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Setup for job Job[unflushed,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.
9992.0] complete, ready to flush (3.279 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,096 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Flushed transaction for job Job[id=191675,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msco
nvert_nix/3.0.9992.0] (97.376 ms)
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,096 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Tool [toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] created job [191675]
(286.212 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,329 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Handled output named output for tool toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0 (40.918 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,343 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Added output datasets to history (12.881 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,348 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Setup for job Job[unflushed,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] complete, ready to flush (5.537 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,434 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Flushed transaction for job Job[id=191676,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] (85.546 ms)
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,435 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Tool [toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] created job [191676] (165.391 ms)
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,631 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Executed 2 job(s) for tool toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0 request: (983.069 ms)
galaxy.workflow.run DEBUG 2019-02-13 14:22:28,672 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 150483 of invocation 17854 invoked (1061.588 ms)
galaxy.tools ERROR 2019-02-13 14:22:28,865 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Exception caught while attempting tool execution:
Traceback (most recent call last):
File "lib/galaxy/tools/__init__.py", line 1436, in handle_single_execution
collection_info=collection_info,
File "lib/galaxy/tools/__init__.py", line 1517, in execute
return self.tool_action.execute(self, trans, incoming=incoming, set_output_hid=set_output_hid, history=history, **kwargs)
File "lib/galaxy/tools/actions/__init__.py", line 331, in execute
params=wrapped_params.params,
File "lib/galaxy/tools/parameters/wrapped.py", line 44, in params
self.wrap_values(self.tool.inputs, params, skip_missing_values=not self.tool.check_values)
File "lib/galaxy/tools/parameters/wrapped.py", line 79, in wrap_values
name=input.name)
File "lib/galaxy/tools/wrappers.py", line 343, in __init__
list.__init__(self, map(to_wrapper, datasets))
File "lib/galaxy/tools/wrappers.py", line 341, in to_wrapper
return self._dataset_wrapper(dataset, dataset_paths, **kwargs)
File "lib/galaxy/tools/wrappers.py", line 314, in _dataset_wrapper
real_path = dataset.file_name
File "lib/galaxy/model/__init__.py", line 2181, in get_file_name
return self.dataset.get_file_name()
File "lib/galaxy/model/__init__.py", line 1969, in get_file_name
filename = self.object_store.get_filename(self)
File "lib/galaxy/objectstore/__init__.py", line 506, in get_filename
return self._call_method('get_filename', obj, ObjectNotFound, True, **kwargs)
File "lib/galaxy/objectstore/__init__.py", line 534, in _call_method
% (method, self._repr_object_for_exception(obj), str(kwargs)))
ObjectNotFound: objectstore, _call_method failed: get_filename on Dataset(id=241288), kwargs: {}
galaxy.tools.execute WARNING 2019-02-13 14:22:28,870 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] There was a failure executing a job for tool [toolshed.g2.bx.psu.edu/repos/galaxyp/peptideshaker/search_gui/3.3.10.1] - Error executing tool: objectstore, _call_method failed: get_filename on Dataset(id=241288), kwargs: {}
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,896 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Executed 1 job(s) for tool toolshed.g2.bx.psu.edu/repos/galaxyp/peptideshaker/search_gui/3.3.10.1 request: (162.061 ms)
galaxy.workflow.run ERROR 2019-02-13 14:22:28,931 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Failed to schedule Workflow[id=7155,name=iMetaQuantome_workflow (imported from uploaded file)], problem occurred on WorkflowStep[index=3,type=tool].
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 1011, in execute
raise Exception(message)
Exception: Failed to create one or more job(s) for workflow step.
galaxy.workflow.run ERROR 2019-02-13 14:22:28,932 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Failed to execute scheduled workflow.
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 83, in __invoke
outputs = invoker.invoke()
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 1011, in execute
raise Exception(message)
Exception: Failed to create one or more job(s) for workflow step.
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:29,710 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] In default runner handling tool msconvert_nix
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:29,710 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] JobDestination created with values {'-l': 'walltime=1:00:00,nodes=1:ppn=2,mem=10gb', '-q': 'small@mesabihn3'}
galaxy.jobs.mapper DEBUG 2019-02-13 14:22:29,710 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Mapped job to destination id: dynamic-destination-msconvert_nix
10.21.31.51 - - [13/Feb/2019:14:22:29 -0500] "GET /api/histories/26b7bfc3888698b1/contents?order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&qv=False HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/?tool_id=toolshed.g2.bx.psu.edu%2Frepos%2Fgalaxyp%2Fpeptideshaker%2Fsearch_gui%2F3.3.10.1&version=3.3.10.1&__identifer=n8xwa2bomdwb" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299"
[pid: 351|app: 0|req: 9303/93024] 127.0.0.1 () {50 vars in 1830 bytes} [Wed Feb 13 14:22:29 2019] GET /api/histories/26b7bfc3888698b1/contents?order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&qv=False => generated 2 bytes in 73 msecs (HTTP/1.1 200) 3 headers in 124 bytes (1 switches on core 1)
galaxy.jobs.handler DEBUG 2019-02-13 14:22:29,818 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Dispatching to pbs-mesabi runner
galaxy.jobs DEBUG 2019-02-13 14:22:29,842 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Persisting job destination (destination id: dynamic-destination-msconvert_nix)
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:29,856 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushing cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241288.dat' of size 0 bytes to key '000/241/dataset_241288.dat'
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:29,905 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushed cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241288.dat' to key '000/241/dataset_241288.dat' (0 bytes transfered in 0:00:00.048339 sec)
galaxy.jobs DEBUG 2019-02-13 14:22:29,927 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Working directory for job is: /panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/job_working_directory/000/191/191675
galaxy.jobs.runners DEBUG 2019-02-13 14:22:29,958 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Job [191675] queued (139.791 ms)
galaxy.jobs.handler INFO 2019-02-13 14:22:29,986 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Job dispatched
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:30,024 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] In default runner handling tool msconvert_nix
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:30,024 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] JobDestination created with values {'-l': 'walltime=1:00:00,nodes=1:ppn=2,mem=10gb', '-q': 'small@mesabihn3'}
galaxy.jobs.mapper DEBUG 2019-02-13 14:22:30,025 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Mapped job to destination id: dynamic-destination-msconvert_nix
galaxy.jobs.handler DEBUG 2019-02-13 14:22:30,080 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Dispatching to pbs-mesabi runner
galaxy.jobs DEBUG 2019-02-13 14:22:30,100 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Persisting job destination (destination id: dynamic-destination-msconvert_nix)
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:30,112 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushing cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241289.dat' :
of size 0 bytes to key '000/241/dataset_241289.dat'
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:30,153 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushed cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241289.dat' to key '000/241/dataset_241289.dat' (0 bytes transfered in 0:00:00.041692 sec)
galaxy.jobs DEBUG 2019-02-13 14:22:30,169 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Working directory for job is: /panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/job_working_directory/000/191/191676
galaxy.jobs.runners DEBUG 2019-02-13 14:22:30,186 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Job [191676] queued (106.436 ms)
galaxy.jobs.handler INFO 2019-02-13 14:22:30,209 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Job dispatched
galaxy.tools.deps DEBUG 2019-02-13 14:22:30,411 [p:336,w:1,m:0] [PBSRunner.work_thread-0] Using dependency proteowizard version 3.0.9992 of type conda
galaxy.jobs.command_factory INFO 2019-02-13 14:22:30,518 [p:336,w:1,m:0] [PBSRunner.work_thread-0] Built script [/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/job_working_directory/000/191/191675/tool_script.sh] for tool command [[ "$CONDA_DEFAULT_ENV" = "/home/galaxy/galaxy/miniconda2/envs/__proteowizard@3.0.9992" ] ||
|
Exception
|
def get_extra_files_path(self):
# Unlike get_file_name - external_extra_files_path is not backed by an
# actual database column so if SA instantiates this object - the
# attribute won't exist yet.
if not getattr(self, "external_extra_files_path", None):
if self.object_store.exists(
self, dir_only=True, extra_dir=self._extra_files_rel_path
):
return self.object_store.get_filename(
self, dir_only=True, extra_dir=self._extra_files_rel_path
)
return ""
else:
return os.path.abspath(self.external_extra_files_path)
|
def get_extra_files_path(self):
# Unlike get_file_name - external_extra_files_path is not backed by an
# actual database column so if SA instantiates this object - the
# attribute won't exist yet.
if not getattr(self, "external_extra_files_path", None):
return self.object_store.get_filename(
self, dir_only=True, extra_dir=self._extra_files_rel_path
)
else:
return os.path.abspath(self.external_extra_files_path)
|
https://github.com/galaxyproject/galaxy/issues/7368
|
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,638 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150481
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,639 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150482
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,640 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150483
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,641 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150484
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,642 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150485
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,642 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150486
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,643 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150487
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,644 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150488
galaxy.workflow.scheduling_manager INFO 2019-02-13 14:22:26,645 [p:336,w:1,m:0] [uWSGIWorker1Core3] Queueing workflow invocation for handler [main.web.1]
98.236.80.203 - - [13/Feb/2019:14:22:26 -0500] "POST /api/workflows/f293ecb6146177b0/invocations HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/workflows/run?id=f293ecb6146177b0" "Mozilla/5.0 (Macintosh; Intel Mac
OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36"
[pid: 336|app: 0|req: 9300/93021] 127.0.0.1 () {56 vars in 1491 bytes} [Wed Feb 13 14:22:26 2019] POST /api/workflows/f293ecb6146177b0/invocations => generated 243 bytes in 458 msecs (HTTP/1.1 200) 3 headers in
124 bytes (1 switches on core 3)
98.236.80.203 - - [13/Feb/2019:14:22:26 -0500] "GET /api/webhooks HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/workflows/run?id=f293ecb6146177b0" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.
36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36"
[pid: 343|app: 0|req: 9298/93022] 127.0.0.1 () {50 vars in 1334 bytes} [Wed Feb 13 14:22:26 2019] GET /api/webhooks => generated 2 bytes in 20 msecs (HTTP/1.1 200) 3 headers in 124 bytes (1 switches on core 2)
98.236.80.203 - - [13/Feb/2019:14:22:26 -0500] "GET /api/histories/e8e7e8330bf2145b/contents?details=a2cb428649a1c4a2&order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&
qv=False HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/workflows/run?id=f293ecb6146177b0" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36"
[pid: 434|app: 0|req: 9304/93023] 127.0.0.1 () {50 vars in 1641 bytes} [Wed Feb 13 14:22:26 2019] GET /api/histories/e8e7e8330bf2145b/contents?details=a2cb428649a1c4a2&order=hid&v=dev&q=update_time-ge&q=deleted&
q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&qv=False => generated 2 bytes in 66 msecs (HTTP/1.1 200) 3 headers in 124 bytes (1 switches on core 3)
galaxy.workflow.run DEBUG 2019-02-13 14:22:27,590 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 150481 of invocation 17854 invoked (29.118 ms)
galaxy.workflow.run DEBUG 2019-02-13 14:22:27,610 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 150482 of invocation 17854 invoked (19.367 ms)
galaxy.managers.collections DEBUG 2019-02-13 14:22:27,672 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Created collection with 2 elements
/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:226: SAWarning: Unicode type received non-unicode bind param value 'output'. (this warning may be suppr
essed after 10 occurrences)
(util.ellipses_string(value),))
/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:226: SAWarning: Unicode type received non-unicode bind param value 'input'. (this warning may be suppre
ssed after 10 occurrences)
(util.ellipses_string(value),))
galaxy.tools.actions INFO 2019-02-13 14:22:27,980 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Handled output named output for tool toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0
.9992.0 (73.258 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:27,995 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Added output datasets to history (12.906 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:27,998 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Setup for job Job[unflushed,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.
9992.0] complete, ready to flush (3.279 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,096 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Flushed transaction for job Job[id=191675,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msco
nvert_nix/3.0.9992.0] (97.376 ms)
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,096 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Tool [toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] created job [191675]
(286.212 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,329 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Handled output named output for tool toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0 (40.918 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,343 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Added output datasets to history (12.881 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,348 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Setup for job Job[unflushed,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] complete, ready to flush (5.537 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,434 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Flushed transaction for job Job[id=191676,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] (85.546 ms)
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,435 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Tool [toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] created job [191676] (165.391 ms)
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,631 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Executed 2 job(s) for tool toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0 request: (983.069 ms)
galaxy.workflow.run DEBUG 2019-02-13 14:22:28,672 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 150483 of invocation 17854 invoked (1061.588 ms)
galaxy.tools ERROR 2019-02-13 14:22:28,865 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Exception caught while attempting tool execution:
Traceback (most recent call last):
File "lib/galaxy/tools/__init__.py", line 1436, in handle_single_execution
collection_info=collection_info,
File "lib/galaxy/tools/__init__.py", line 1517, in execute
return self.tool_action.execute(self, trans, incoming=incoming, set_output_hid=set_output_hid, history=history, **kwargs)
File "lib/galaxy/tools/actions/__init__.py", line 331, in execute
params=wrapped_params.params,
File "lib/galaxy/tools/parameters/wrapped.py", line 44, in params
self.wrap_values(self.tool.inputs, params, skip_missing_values=not self.tool.check_values)
File "lib/galaxy/tools/parameters/wrapped.py", line 79, in wrap_values
name=input.name)
File "lib/galaxy/tools/wrappers.py", line 343, in __init__
list.__init__(self, map(to_wrapper, datasets))
File "lib/galaxy/tools/wrappers.py", line 341, in to_wrapper
return self._dataset_wrapper(dataset, dataset_paths, **kwargs)
File "lib/galaxy/tools/wrappers.py", line 314, in _dataset_wrapper
real_path = dataset.file_name
File "lib/galaxy/model/__init__.py", line 2181, in get_file_name
return self.dataset.get_file_name()
File "lib/galaxy/model/__init__.py", line 1969, in get_file_name
filename = self.object_store.get_filename(self)
File "lib/galaxy/objectstore/__init__.py", line 506, in get_filename
return self._call_method('get_filename', obj, ObjectNotFound, True, **kwargs)
File "lib/galaxy/objectstore/__init__.py", line 534, in _call_method
% (method, self._repr_object_for_exception(obj), str(kwargs)))
ObjectNotFound: objectstore, _call_method failed: get_filename on Dataset(id=241288), kwargs: {}
galaxy.tools.execute WARNING 2019-02-13 14:22:28,870 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] There was a failure executing a job for tool [toolshed.g2.bx.psu.edu/repos/galaxyp/peptideshaker/search_gui/3.3.10.1] - Error executing tool: objectstore, _call_method failed: get_filename on Dataset(id=241288), kwargs: {}
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,896 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Executed 1 job(s) for tool toolshed.g2.bx.psu.edu/repos/galaxyp/peptideshaker/search_gui/3.3.10.1 request: (162.061 ms)
galaxy.workflow.run ERROR 2019-02-13 14:22:28,931 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Failed to schedule Workflow[id=7155,name=iMetaQuantome_workflow (imported from uploaded file)], problem occurred on WorkflowStep[index=3,type=tool].
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 1011, in execute
raise Exception(message)
Exception: Failed to create one or more job(s) for workflow step.
galaxy.workflow.run ERROR 2019-02-13 14:22:28,932 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Failed to execute scheduled workflow.
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 83, in __invoke
outputs = invoker.invoke()
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 1011, in execute
raise Exception(message)
Exception: Failed to create one or more job(s) for workflow step.
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:29,710 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] In default runner handling tool msconvert_nix
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:29,710 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] JobDestination created with values {'-l': 'walltime=1:00:00,nodes=1:ppn=2,mem=10gb', '-q': 'small@mesabihn3'}
galaxy.jobs.mapper DEBUG 2019-02-13 14:22:29,710 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Mapped job to destination id: dynamic-destination-msconvert_nix
10.21.31.51 - - [13/Feb/2019:14:22:29 -0500] "GET /api/histories/26b7bfc3888698b1/contents?order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&qv=False HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/?tool_id=toolshed.g2.bx.psu.edu%2Frepos%2Fgalaxyp%2Fpeptideshaker%2Fsearch_gui%2F3.3.10.1&version=3.3.10.1&__identifer=n8xwa2bomdwb" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299"
[pid: 351|app: 0|req: 9303/93024] 127.0.0.1 () {50 vars in 1830 bytes} [Wed Feb 13 14:22:29 2019] GET /api/histories/26b7bfc3888698b1/contents?order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&qv=False => generated 2 bytes in 73 msecs (HTTP/1.1 200) 3 headers in 124 bytes (1 switches on core 1)
galaxy.jobs.handler DEBUG 2019-02-13 14:22:29,818 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Dispatching to pbs-mesabi runner
galaxy.jobs DEBUG 2019-02-13 14:22:29,842 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Persisting job destination (destination id: dynamic-destination-msconvert_nix)
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:29,856 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushing cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241288.dat' of size 0 bytes to key '000/241/dataset_241288.dat'
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:29,905 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushed cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241288.dat' to key '000/241/dataset_241288.dat' (0 bytes transfered in 0:00:00.048339 sec)
galaxy.jobs DEBUG 2019-02-13 14:22:29,927 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Working directory for job is: /panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/job_working_directory/000/191/191675
galaxy.jobs.runners DEBUG 2019-02-13 14:22:29,958 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Job [191675] queued (139.791 ms)
galaxy.jobs.handler INFO 2019-02-13 14:22:29,986 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Job dispatched
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:30,024 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] In default runner handling tool msconvert_nix
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:30,024 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] JobDestination created with values {'-l': 'walltime=1:00:00,nodes=1:ppn=2,mem=10gb', '-q': 'small@mesabihn3'}
galaxy.jobs.mapper DEBUG 2019-02-13 14:22:30,025 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Mapped job to destination id: dynamic-destination-msconvert_nix
galaxy.jobs.handler DEBUG 2019-02-13 14:22:30,080 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Dispatching to pbs-mesabi runner
galaxy.jobs DEBUG 2019-02-13 14:22:30,100 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Persisting job destination (destination id: dynamic-destination-msconvert_nix)
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:30,112 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushing cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241289.dat' :
of size 0 bytes to key '000/241/dataset_241289.dat'
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:30,153 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushed cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241289.dat' to key '000/241/dataset_241289.dat' (0 bytes transfered in 0:00:00.041692 sec)
galaxy.jobs DEBUG 2019-02-13 14:22:30,169 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Working directory for job is: /panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/job_working_directory/000/191/191676
galaxy.jobs.runners DEBUG 2019-02-13 14:22:30,186 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Job [191676] queued (106.436 ms)
galaxy.jobs.handler INFO 2019-02-13 14:22:30,209 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Job dispatched
galaxy.tools.deps DEBUG 2019-02-13 14:22:30,411 [p:336,w:1,m:0] [PBSRunner.work_thread-0] Using dependency proteowizard version 3.0.9992 of type conda
galaxy.jobs.command_factory INFO 2019-02-13 14:22:30,518 [p:336,w:1,m:0] [PBSRunner.work_thread-0] Built script [/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/job_working_directory/000/191/191675/tool_script.sh] for tool command [[ "$CONDA_DEFAULT_ENV" = "/home/galaxy/galaxy/miniconda2/envs/__proteowizard@3.0.9992" ] ||
|
Exception
|
def full_delete(self):
"""Remove the file and extra files, marks deleted and purged"""
# os.unlink( self.file_name )
try:
self.object_store.delete(self)
except galaxy.exceptions.ObjectNotFound:
pass
if self.object_store.exists(
self, extra_dir=self._extra_files_rel_path, dir_only=True
):
self.object_store.delete(
self, entire_dir=True, extra_dir=self._extra_files_rel_path, dir_only=True
)
# TODO: purge metadata files
self.deleted = True
self.purged = True
|
def full_delete(self):
"""Remove the file and extra files, marks deleted and purged"""
# os.unlink( self.file_name )
self.object_store.delete(self)
if self.object_store.exists(
self, extra_dir=self._extra_files_rel_path, dir_only=True
):
self.object_store.delete(
self, entire_dir=True, extra_dir=self._extra_files_rel_path, dir_only=True
)
# if os.path.exists( self.extra_files_path ):
# shutil.rmtree( self.extra_files_path )
# TODO: purge metadata files
self.deleted = True
self.purged = True
|
https://github.com/galaxyproject/galaxy/issues/7368
|
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,638 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150481
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,639 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150482
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,640 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150483
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,641 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150484
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,642 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150485
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,642 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150486
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,643 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150487
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,644 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150488
galaxy.workflow.scheduling_manager INFO 2019-02-13 14:22:26,645 [p:336,w:1,m:0] [uWSGIWorker1Core3] Queueing workflow invocation for handler [main.web.1]
98.236.80.203 - - [13/Feb/2019:14:22:26 -0500] "POST /api/workflows/f293ecb6146177b0/invocations HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/workflows/run?id=f293ecb6146177b0" "Mozilla/5.0 (Macintosh; Intel Mac
OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36"
[pid: 336|app: 0|req: 9300/93021] 127.0.0.1 () {56 vars in 1491 bytes} [Wed Feb 13 14:22:26 2019] POST /api/workflows/f293ecb6146177b0/invocations => generated 243 bytes in 458 msecs (HTTP/1.1 200) 3 headers in
124 bytes (1 switches on core 3)
98.236.80.203 - - [13/Feb/2019:14:22:26 -0500] "GET /api/webhooks HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/workflows/run?id=f293ecb6146177b0" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.
36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36"
[pid: 343|app: 0|req: 9298/93022] 127.0.0.1 () {50 vars in 1334 bytes} [Wed Feb 13 14:22:26 2019] GET /api/webhooks => generated 2 bytes in 20 msecs (HTTP/1.1 200) 3 headers in 124 bytes (1 switches on core 2)
98.236.80.203 - - [13/Feb/2019:14:22:26 -0500] "GET /api/histories/e8e7e8330bf2145b/contents?details=a2cb428649a1c4a2&order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&
qv=False HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/workflows/run?id=f293ecb6146177b0" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36"
[pid: 434|app: 0|req: 9304/93023] 127.0.0.1 () {50 vars in 1641 bytes} [Wed Feb 13 14:22:26 2019] GET /api/histories/e8e7e8330bf2145b/contents?details=a2cb428649a1c4a2&order=hid&v=dev&q=update_time-ge&q=deleted&
q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&qv=False => generated 2 bytes in 66 msecs (HTTP/1.1 200) 3 headers in 124 bytes (1 switches on core 3)
galaxy.workflow.run DEBUG 2019-02-13 14:22:27,590 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 150481 of invocation 17854 invoked (29.118 ms)
galaxy.workflow.run DEBUG 2019-02-13 14:22:27,610 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 150482 of invocation 17854 invoked (19.367 ms)
galaxy.managers.collections DEBUG 2019-02-13 14:22:27,672 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Created collection with 2 elements
/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:226: SAWarning: Unicode type received non-unicode bind param value 'output'. (this warning may be suppr
essed after 10 occurrences)
(util.ellipses_string(value),))
/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:226: SAWarning: Unicode type received non-unicode bind param value 'input'. (this warning may be suppre
ssed after 10 occurrences)
(util.ellipses_string(value),))
galaxy.tools.actions INFO 2019-02-13 14:22:27,980 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Handled output named output for tool toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0
.9992.0 (73.258 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:27,995 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Added output datasets to history (12.906 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:27,998 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Setup for job Job[unflushed,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.
9992.0] complete, ready to flush (3.279 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,096 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Flushed transaction for job Job[id=191675,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msco
nvert_nix/3.0.9992.0] (97.376 ms)
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,096 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Tool [toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] created job [191675]
(286.212 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,329 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Handled output named output for tool toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0 (40.918 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,343 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Added output datasets to history (12.881 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,348 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Setup for job Job[unflushed,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] complete, ready to flush (5.537 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,434 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Flushed transaction for job Job[id=191676,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] (85.546 ms)
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,435 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Tool [toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] created job [191676] (165.391 ms)
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,631 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Executed 2 job(s) for tool toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0 request: (983.069 ms)
galaxy.workflow.run DEBUG 2019-02-13 14:22:28,672 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 150483 of invocation 17854 invoked (1061.588 ms)
galaxy.tools ERROR 2019-02-13 14:22:28,865 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Exception caught while attempting tool execution:
Traceback (most recent call last):
File "lib/galaxy/tools/__init__.py", line 1436, in handle_single_execution
collection_info=collection_info,
File "lib/galaxy/tools/__init__.py", line 1517, in execute
return self.tool_action.execute(self, trans, incoming=incoming, set_output_hid=set_output_hid, history=history, **kwargs)
File "lib/galaxy/tools/actions/__init__.py", line 331, in execute
params=wrapped_params.params,
File "lib/galaxy/tools/parameters/wrapped.py", line 44, in params
self.wrap_values(self.tool.inputs, params, skip_missing_values=not self.tool.check_values)
File "lib/galaxy/tools/parameters/wrapped.py", line 79, in wrap_values
name=input.name)
File "lib/galaxy/tools/wrappers.py", line 343, in __init__
list.__init__(self, map(to_wrapper, datasets))
File "lib/galaxy/tools/wrappers.py", line 341, in to_wrapper
return self._dataset_wrapper(dataset, dataset_paths, **kwargs)
File "lib/galaxy/tools/wrappers.py", line 314, in _dataset_wrapper
real_path = dataset.file_name
File "lib/galaxy/model/__init__.py", line 2181, in get_file_name
return self.dataset.get_file_name()
File "lib/galaxy/model/__init__.py", line 1969, in get_file_name
filename = self.object_store.get_filename(self)
File "lib/galaxy/objectstore/__init__.py", line 506, in get_filename
return self._call_method('get_filename', obj, ObjectNotFound, True, **kwargs)
File "lib/galaxy/objectstore/__init__.py", line 534, in _call_method
% (method, self._repr_object_for_exception(obj), str(kwargs)))
ObjectNotFound: objectstore, _call_method failed: get_filename on Dataset(id=241288), kwargs: {}
galaxy.tools.execute WARNING 2019-02-13 14:22:28,870 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] There was a failure executing a job for tool [toolshed.g2.bx.psu.edu/repos/galaxyp/peptideshaker/search_gui/3.3.10.1] - Error executing tool: objectstore, _call_method failed: get_filename on Dataset(id=241288), kwargs: {}
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,896 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Executed 1 job(s) for tool toolshed.g2.bx.psu.edu/repos/galaxyp/peptideshaker/search_gui/3.3.10.1 request: (162.061 ms)
galaxy.workflow.run ERROR 2019-02-13 14:22:28,931 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Failed to schedule Workflow[id=7155,name=iMetaQuantome_workflow (imported from uploaded file)], problem occurred on WorkflowStep[index=3,type=tool].
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 1011, in execute
raise Exception(message)
Exception: Failed to create one or more job(s) for workflow step.
galaxy.workflow.run ERROR 2019-02-13 14:22:28,932 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Failed to execute scheduled workflow.
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 83, in __invoke
outputs = invoker.invoke()
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 1011, in execute
raise Exception(message)
Exception: Failed to create one or more job(s) for workflow step.
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:29,710 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] In default runner handling tool msconvert_nix
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:29,710 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] JobDestination created with values {'-l': 'walltime=1:00:00,nodes=1:ppn=2,mem=10gb', '-q': 'small@mesabihn3'}
galaxy.jobs.mapper DEBUG 2019-02-13 14:22:29,710 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Mapped job to destination id: dynamic-destination-msconvert_nix
10.21.31.51 - - [13/Feb/2019:14:22:29 -0500] "GET /api/histories/26b7bfc3888698b1/contents?order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&qv=False HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/?tool_id=toolshed.g2.bx.psu.edu%2Frepos%2Fgalaxyp%2Fpeptideshaker%2Fsearch_gui%2F3.3.10.1&version=3.3.10.1&__identifer=n8xwa2bomdwb" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299"
[pid: 351|app: 0|req: 9303/93024] 127.0.0.1 () {50 vars in 1830 bytes} [Wed Feb 13 14:22:29 2019] GET /api/histories/26b7bfc3888698b1/contents?order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&qv=False => generated 2 bytes in 73 msecs (HTTP/1.1 200) 3 headers in 124 bytes (1 switches on core 1)
galaxy.jobs.handler DEBUG 2019-02-13 14:22:29,818 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Dispatching to pbs-mesabi runner
galaxy.jobs DEBUG 2019-02-13 14:22:29,842 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Persisting job destination (destination id: dynamic-destination-msconvert_nix)
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:29,856 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushing cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241288.dat' of size 0 bytes to key '000/241/dataset_241288.dat'
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:29,905 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushed cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241288.dat' to key '000/241/dataset_241288.dat' (0 bytes transfered in 0:00:00.048339 sec)
galaxy.jobs DEBUG 2019-02-13 14:22:29,927 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Working directory for job is: /panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/job_working_directory/000/191/191675
galaxy.jobs.runners DEBUG 2019-02-13 14:22:29,958 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Job [191675] queued (139.791 ms)
galaxy.jobs.handler INFO 2019-02-13 14:22:29,986 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Job dispatched
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:30,024 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] In default runner handling tool msconvert_nix
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:30,024 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] JobDestination created with values {'-l': 'walltime=1:00:00,nodes=1:ppn=2,mem=10gb', '-q': 'small@mesabihn3'}
galaxy.jobs.mapper DEBUG 2019-02-13 14:22:30,025 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Mapped job to destination id: dynamic-destination-msconvert_nix
galaxy.jobs.handler DEBUG 2019-02-13 14:22:30,080 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Dispatching to pbs-mesabi runner
galaxy.jobs DEBUG 2019-02-13 14:22:30,100 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Persisting job destination (destination id: dynamic-destination-msconvert_nix)
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:30,112 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushing cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241289.dat' :
of size 0 bytes to key '000/241/dataset_241289.dat'
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:30,153 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushed cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241289.dat' to key '000/241/dataset_241289.dat' (0 bytes transfered in 0:00:00.041692 sec)
galaxy.jobs DEBUG 2019-02-13 14:22:30,169 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Working directory for job is: /panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/job_working_directory/000/191/191676
galaxy.jobs.runners DEBUG 2019-02-13 14:22:30,186 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Job [191676] queued (106.436 ms)
galaxy.jobs.handler INFO 2019-02-13 14:22:30,209 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Job dispatched
galaxy.tools.deps DEBUG 2019-02-13 14:22:30,411 [p:336,w:1,m:0] [PBSRunner.work_thread-0] Using dependency proteowizard version 3.0.9992 of type conda
galaxy.jobs.command_factory INFO 2019-02-13 14:22:30,518 [p:336,w:1,m:0] [PBSRunner.work_thread-0] Built script [/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/job_working_directory/000/191/191675/tool_script.sh] for tool command [[ "$CONDA_DEFAULT_ENV" = "/home/galaxy/galaxy/miniconda2/envs/__proteowizard@3.0.9992" ] ||
|
Exception
|
def get_filename(self, obj, **kwargs):
"""
Override `ObjectStore`'s stub.
If `object_store_check_old_style` is set to `True` in config then the
root path is checked first.
"""
if self.check_old_style:
path = self._construct_path(obj, old_style=True, **kwargs)
# For backward compatibility, check root path first; otherwise,
# construct and return hashed path
if os.path.exists(path):
return path
path = self._construct_path(obj, **kwargs)
if not os.path.exists(path):
raise ObjectNotFound
return path
|
def get_filename(self, obj, **kwargs):
"""
Override `ObjectStore`'s stub.
If `object_store_check_old_style` is set to `True` in config then the
root path is checked first.
"""
if self.check_old_style:
path = self._construct_path(obj, old_style=True, **kwargs)
# For backward compatibility, check root path first; otherwise,
# construct and return hashed path
if os.path.exists(path):
return path
return self._construct_path(obj, **kwargs)
|
https://github.com/galaxyproject/galaxy/issues/7368
|
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,638 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150481
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,639 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150482
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,640 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150483
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,641 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150484
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,642 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150485
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,642 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150486
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,643 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150487
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,644 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150488
galaxy.workflow.scheduling_manager INFO 2019-02-13 14:22:26,645 [p:336,w:1,m:0] [uWSGIWorker1Core3] Queueing workflow invocation for handler [main.web.1]
98.236.80.203 - - [13/Feb/2019:14:22:26 -0500] "POST /api/workflows/f293ecb6146177b0/invocations HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/workflows/run?id=f293ecb6146177b0" "Mozilla/5.0 (Macintosh; Intel Mac
OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36"
[pid: 336|app: 0|req: 9300/93021] 127.0.0.1 () {56 vars in 1491 bytes} [Wed Feb 13 14:22:26 2019] POST /api/workflows/f293ecb6146177b0/invocations => generated 243 bytes in 458 msecs (HTTP/1.1 200) 3 headers in
124 bytes (1 switches on core 3)
98.236.80.203 - - [13/Feb/2019:14:22:26 -0500] "GET /api/webhooks HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/workflows/run?id=f293ecb6146177b0" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.
36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36"
[pid: 343|app: 0|req: 9298/93022] 127.0.0.1 () {50 vars in 1334 bytes} [Wed Feb 13 14:22:26 2019] GET /api/webhooks => generated 2 bytes in 20 msecs (HTTP/1.1 200) 3 headers in 124 bytes (1 switches on core 2)
98.236.80.203 - - [13/Feb/2019:14:22:26 -0500] "GET /api/histories/e8e7e8330bf2145b/contents?details=a2cb428649a1c4a2&order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&
qv=False HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/workflows/run?id=f293ecb6146177b0" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36"
[pid: 434|app: 0|req: 9304/93023] 127.0.0.1 () {50 vars in 1641 bytes} [Wed Feb 13 14:22:26 2019] GET /api/histories/e8e7e8330bf2145b/contents?details=a2cb428649a1c4a2&order=hid&v=dev&q=update_time-ge&q=deleted&
q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&qv=False => generated 2 bytes in 66 msecs (HTTP/1.1 200) 3 headers in 124 bytes (1 switches on core 3)
galaxy.workflow.run DEBUG 2019-02-13 14:22:27,590 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 150481 of invocation 17854 invoked (29.118 ms)
galaxy.workflow.run DEBUG 2019-02-13 14:22:27,610 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 150482 of invocation 17854 invoked (19.367 ms)
galaxy.managers.collections DEBUG 2019-02-13 14:22:27,672 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Created collection with 2 elements
/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:226: SAWarning: Unicode type received non-unicode bind param value 'output'. (this warning may be suppr
essed after 10 occurrences)
(util.ellipses_string(value),))
/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:226: SAWarning: Unicode type received non-unicode bind param value 'input'. (this warning may be suppre
ssed after 10 occurrences)
(util.ellipses_string(value),))
galaxy.tools.actions INFO 2019-02-13 14:22:27,980 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Handled output named output for tool toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0
.9992.0 (73.258 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:27,995 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Added output datasets to history (12.906 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:27,998 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Setup for job Job[unflushed,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.
9992.0] complete, ready to flush (3.279 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,096 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Flushed transaction for job Job[id=191675,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msco
nvert_nix/3.0.9992.0] (97.376 ms)
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,096 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Tool [toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] created job [191675]
(286.212 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,329 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Handled output named output for tool toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0 (40.918 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,343 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Added output datasets to history (12.881 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,348 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Setup for job Job[unflushed,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] complete, ready to flush (5.537 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,434 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Flushed transaction for job Job[id=191676,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] (85.546 ms)
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,435 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Tool [toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] created job [191676] (165.391 ms)
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,631 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Executed 2 job(s) for tool toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0 request: (983.069 ms)
galaxy.workflow.run DEBUG 2019-02-13 14:22:28,672 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 150483 of invocation 17854 invoked (1061.588 ms)
galaxy.tools ERROR 2019-02-13 14:22:28,865 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Exception caught while attempting tool execution:
Traceback (most recent call last):
File "lib/galaxy/tools/__init__.py", line 1436, in handle_single_execution
collection_info=collection_info,
File "lib/galaxy/tools/__init__.py", line 1517, in execute
return self.tool_action.execute(self, trans, incoming=incoming, set_output_hid=set_output_hid, history=history, **kwargs)
File "lib/galaxy/tools/actions/__init__.py", line 331, in execute
params=wrapped_params.params,
File "lib/galaxy/tools/parameters/wrapped.py", line 44, in params
self.wrap_values(self.tool.inputs, params, skip_missing_values=not self.tool.check_values)
File "lib/galaxy/tools/parameters/wrapped.py", line 79, in wrap_values
name=input.name)
File "lib/galaxy/tools/wrappers.py", line 343, in __init__
list.__init__(self, map(to_wrapper, datasets))
File "lib/galaxy/tools/wrappers.py", line 341, in to_wrapper
return self._dataset_wrapper(dataset, dataset_paths, **kwargs)
File "lib/galaxy/tools/wrappers.py", line 314, in _dataset_wrapper
real_path = dataset.file_name
File "lib/galaxy/model/__init__.py", line 2181, in get_file_name
return self.dataset.get_file_name()
File "lib/galaxy/model/__init__.py", line 1969, in get_file_name
filename = self.object_store.get_filename(self)
File "lib/galaxy/objectstore/__init__.py", line 506, in get_filename
return self._call_method('get_filename', obj, ObjectNotFound, True, **kwargs)
File "lib/galaxy/objectstore/__init__.py", line 534, in _call_method
% (method, self._repr_object_for_exception(obj), str(kwargs)))
ObjectNotFound: objectstore, _call_method failed: get_filename on Dataset(id=241288), kwargs: {}
galaxy.tools.execute WARNING 2019-02-13 14:22:28,870 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] There was a failure executing a job for tool [toolshed.g2.bx.psu.edu/repos/galaxyp/peptideshaker/search_gui/3.3.10.1] - Error executing tool: objectstore, _call_method failed: get_filename on Dataset(id=241288), kwargs: {}
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,896 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Executed 1 job(s) for tool toolshed.g2.bx.psu.edu/repos/galaxyp/peptideshaker/search_gui/3.3.10.1 request: (162.061 ms)
galaxy.workflow.run ERROR 2019-02-13 14:22:28,931 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Failed to schedule Workflow[id=7155,name=iMetaQuantome_workflow (imported from uploaded file)], problem occurred on WorkflowStep[index=3,type=tool].
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 1011, in execute
raise Exception(message)
Exception: Failed to create one or more job(s) for workflow step.
galaxy.workflow.run ERROR 2019-02-13 14:22:28,932 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Failed to execute scheduled workflow.
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 83, in __invoke
outputs = invoker.invoke()
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 1011, in execute
raise Exception(message)
Exception: Failed to create one or more job(s) for workflow step.
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:29,710 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] In default runner handling tool msconvert_nix
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:29,710 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] JobDestination created with values {'-l': 'walltime=1:00:00,nodes=1:ppn=2,mem=10gb', '-q': 'small@mesabihn3'}
galaxy.jobs.mapper DEBUG 2019-02-13 14:22:29,710 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Mapped job to destination id: dynamic-destination-msconvert_nix
10.21.31.51 - - [13/Feb/2019:14:22:29 -0500] "GET /api/histories/26b7bfc3888698b1/contents?order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&qv=False HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/?tool_id=toolshed.g2.bx.psu.edu%2Frepos%2Fgalaxyp%2Fpeptideshaker%2Fsearch_gui%2F3.3.10.1&version=3.3.10.1&__identifer=n8xwa2bomdwb" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299"
[pid: 351|app: 0|req: 9303/93024] 127.0.0.1 () {50 vars in 1830 bytes} [Wed Feb 13 14:22:29 2019] GET /api/histories/26b7bfc3888698b1/contents?order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&qv=False => generated 2 bytes in 73 msecs (HTTP/1.1 200) 3 headers in 124 bytes (1 switches on core 1)
galaxy.jobs.handler DEBUG 2019-02-13 14:22:29,818 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Dispatching to pbs-mesabi runner
galaxy.jobs DEBUG 2019-02-13 14:22:29,842 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Persisting job destination (destination id: dynamic-destination-msconvert_nix)
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:29,856 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushing cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241288.dat' of size 0 bytes to key '000/241/dataset_241288.dat'
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:29,905 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushed cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241288.dat' to key '000/241/dataset_241288.dat' (0 bytes transfered in 0:00:00.048339 sec)
galaxy.jobs DEBUG 2019-02-13 14:22:29,927 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Working directory for job is: /panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/job_working_directory/000/191/191675
galaxy.jobs.runners DEBUG 2019-02-13 14:22:29,958 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Job [191675] queued (139.791 ms)
galaxy.jobs.handler INFO 2019-02-13 14:22:29,986 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Job dispatched
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:30,024 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] In default runner handling tool msconvert_nix
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:30,024 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] JobDestination created with values {'-l': 'walltime=1:00:00,nodes=1:ppn=2,mem=10gb', '-q': 'small@mesabihn3'}
galaxy.jobs.mapper DEBUG 2019-02-13 14:22:30,025 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Mapped job to destination id: dynamic-destination-msconvert_nix
galaxy.jobs.handler DEBUG 2019-02-13 14:22:30,080 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Dispatching to pbs-mesabi runner
galaxy.jobs DEBUG 2019-02-13 14:22:30,100 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Persisting job destination (destination id: dynamic-destination-msconvert_nix)
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:30,112 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushing cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241289.dat' :
of size 0 bytes to key '000/241/dataset_241289.dat'
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:30,153 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushed cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241289.dat' to key '000/241/dataset_241289.dat' (0 bytes transfered in 0:00:00.041692 sec)
galaxy.jobs DEBUG 2019-02-13 14:22:30,169 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Working directory for job is: /panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/job_working_directory/000/191/191676
galaxy.jobs.runners DEBUG 2019-02-13 14:22:30,186 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Job [191676] queued (106.436 ms)
galaxy.jobs.handler INFO 2019-02-13 14:22:30,209 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Job dispatched
galaxy.tools.deps DEBUG 2019-02-13 14:22:30,411 [p:336,w:1,m:0] [PBSRunner.work_thread-0] Using dependency proteowizard version 3.0.9992 of type conda
galaxy.jobs.command_factory INFO 2019-02-13 14:22:30,518 [p:336,w:1,m:0] [PBSRunner.work_thread-0] Built script [/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/job_working_directory/000/191/191675/tool_script.sh] for tool command [[ "$CONDA_DEFAULT_ENV" = "/home/galaxy/galaxy/miniconda2/envs/__proteowizard@3.0.9992" ] ||
|
Exception
|
def collect_primary_datasets(job_context, output, input_ext):
tool = job_context.tool
job_working_directory = job_context.job_working_directory
sa_session = job_context.sa_session
# Loop through output file names, looking for generated primary
# datasets in form specified by discover dataset patterns or in tool provided metadata.
primary_output_assigned = False
new_outdata_name = None
primary_datasets = {}
for output_index, (name, outdata) in enumerate(output.items()):
dataset_collectors = [DEFAULT_DATASET_COLLECTOR]
if name in tool.outputs:
dataset_collectors = [
dataset_collector(description)
for description in tool.outputs[name].dataset_collector_descriptions
]
filenames = odict.odict()
for discovered_file in discover_files(
name,
job_context.tool_provided_metadata,
dataset_collectors,
job_working_directory,
outdata,
):
filenames[discovered_file.path] = discovered_file
for filename_index, (filename, discovered_file) in enumerate(filenames.items()):
extra_file_collector = discovered_file.collector
fields_match = discovered_file.match
if not fields_match:
# Before I guess pop() would just have thrown an IndexError
raise Exception(
"Problem parsing metadata fields for file %s" % filename
)
designation = fields_match.designation
if (
filename_index == 0
and extra_file_collector.assign_primary_output
and output_index == 0
):
new_outdata_name = fields_match.name or "%s (%s)" % (
outdata.name,
designation,
)
# Move data from temp location to dataset location
job_context.object_store.update_from_file(
outdata.dataset, file_name=filename, create=True
)
primary_output_assigned = True
continue
if name not in primary_datasets:
primary_datasets[name] = odict.odict()
visible = fields_match.visible
ext = fields_match.ext
if ext == "input":
ext = input_ext
dbkey = fields_match.dbkey
if dbkey == INPUT_DBKEY_TOKEN:
dbkey = job_context.input_dbkey
# Create new primary dataset
new_primary_name = fields_match.name or "%s (%s)" % (
outdata.name,
designation,
)
info = outdata.info
# TODO: should be able to disambiguate files in different directories...
new_primary_filename = os.path.split(filename)[-1]
new_primary_datasets_attributes = (
job_context.tool_provided_metadata.get_new_dataset_meta_by_basename(
name, new_primary_filename
)
)
primary_data = job_context.create_dataset(
ext,
designation,
visible,
dbkey,
new_primary_name,
filename,
info=info,
init_from=outdata,
dataset_attributes=new_primary_datasets_attributes,
)
# Associate new dataset with job
job_context.add_output_dataset_association(
"__new_primary_file_%s|%s__" % (name, designation), primary_data
)
if new_primary_datasets_attributes:
extra_files_path = new_primary_datasets_attributes.get(
"extra_files", None
)
if extra_files_path:
extra_files_path_joined = os.path.join(
job_working_directory, extra_files_path
)
primary_data.dataset.create_extra_files_path()
for root, dirs, files in os.walk(extra_files_path_joined):
extra_dir = os.path.join(
primary_data.extra_files_path,
root.replace(extra_files_path_joined, "", 1).lstrip(
os.path.sep
),
)
extra_dir = os.path.normpath(extra_dir)
for f in files:
job_context.object_store.update_from_file(
primary_data.dataset,
extra_dir=extra_dir,
alt_name=f,
file_name=os.path.join(root, f),
create=True,
preserve_symlinks=True,
)
job_context.add_datasets_to_history(
[primary_data], for_output_dataset=outdata
)
# Add dataset to return dict
primary_datasets[name][designation] = primary_data
if primary_output_assigned:
outdata.name = new_outdata_name
outdata.init_meta()
outdata.set_meta()
outdata.set_peek()
sa_session.add(outdata)
sa_session.flush()
return primary_datasets
|
def collect_primary_datasets(job_context, output, input_ext):
tool = job_context.tool
job_working_directory = job_context.job_working_directory
sa_session = job_context.sa_session
# Loop through output file names, looking for generated primary
# datasets in form specified by discover dataset patterns or in tool provided metadata.
primary_output_assigned = False
new_outdata_name = None
primary_datasets = {}
for output_index, (name, outdata) in enumerate(output.items()):
dataset_collectors = [DEFAULT_DATASET_COLLECTOR]
if name in tool.outputs:
dataset_collectors = [
dataset_collector(description)
for description in tool.outputs[name].dataset_collector_descriptions
]
filenames = odict.odict()
for discovered_file in discover_files(
name,
job_context.tool_provided_metadata,
dataset_collectors,
job_working_directory,
outdata,
):
filenames[discovered_file.path] = discovered_file
for filename_index, (filename, discovered_file) in enumerate(filenames.items()):
extra_file_collector = discovered_file.collector
fields_match = discovered_file.match
if not fields_match:
# Before I guess pop() would just have thrown an IndexError
raise Exception(
"Problem parsing metadata fields for file %s" % filename
)
designation = fields_match.designation
if (
filename_index == 0
and extra_file_collector.assign_primary_output
and output_index == 0
):
new_outdata_name = fields_match.name or "%s (%s)" % (
outdata.name,
designation,
)
# Move data from temp location to dataset location
job_context.object_store.update_from_file(
outdata.dataset, file_name=filename, create=True
)
primary_output_assigned = True
continue
if name not in primary_datasets:
primary_datasets[name] = odict.odict()
visible = fields_match.visible
ext = fields_match.ext
if ext == "input":
ext = input_ext
dbkey = fields_match.dbkey
if dbkey == INPUT_DBKEY_TOKEN:
dbkey = job_context.input_dbkey
# Create new primary dataset
new_primary_name = fields_match.name or "%s (%s)" % (
outdata.name,
designation,
)
info = outdata.info
# TODO: should be able to disambiguate files in different directories...
new_primary_filename = os.path.split(filename)[-1]
new_primary_datasets_attributes = (
job_context.tool_provided_metadata.get_new_dataset_meta_by_basename(
name, new_primary_filename
)
)
primary_data = job_context.create_dataset(
ext,
designation,
visible,
dbkey,
new_primary_name,
filename,
info=info,
init_from=outdata,
dataset_attributes=new_primary_datasets_attributes,
)
# Associate new dataset with job
job_context.add_output_dataset_association(
"__new_primary_file_%s|%s__" % (name, designation), primary_data
)
if new_primary_datasets_attributes:
extra_files_path = new_primary_datasets_attributes.get(
"extra_files", None
)
if extra_files_path:
extra_files_path_joined = os.path.join(
job_working_directory, extra_files_path
)
for root, dirs, files in os.walk(extra_files_path_joined):
extra_dir = os.path.join(
primary_data.extra_files_path,
root.replace(extra_files_path_joined, "", 1).lstrip(
os.path.sep
),
)
extra_dir = os.path.normpath(extra_dir)
for f in files:
job_context.object_store.update_from_file(
primary_data.dataset,
extra_dir=extra_dir,
alt_name=f,
file_name=os.path.join(root, f),
create=True,
preserve_symlinks=True,
)
job_context.add_datasets_to_history(
[primary_data], for_output_dataset=outdata
)
# Add dataset to return dict
primary_datasets[name][designation] = primary_data
if primary_output_assigned:
outdata.name = new_outdata_name
outdata.init_meta()
outdata.set_meta()
outdata.set_peek()
sa_session.add(outdata)
sa_session.flush()
return primary_datasets
|
https://github.com/galaxyproject/galaxy/issues/7368
|
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,638 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150481
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,639 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150482
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,640 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150483
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,641 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150484
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,642 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150485
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,642 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150486
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,643 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150487
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,644 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150488
galaxy.workflow.scheduling_manager INFO 2019-02-13 14:22:26,645 [p:336,w:1,m:0] [uWSGIWorker1Core3] Queueing workflow invocation for handler [main.web.1]
98.236.80.203 - - [13/Feb/2019:14:22:26 -0500] "POST /api/workflows/f293ecb6146177b0/invocations HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/workflows/run?id=f293ecb6146177b0" "Mozilla/5.0 (Macintosh; Intel Mac
OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36"
[pid: 336|app: 0|req: 9300/93021] 127.0.0.1 () {56 vars in 1491 bytes} [Wed Feb 13 14:22:26 2019] POST /api/workflows/f293ecb6146177b0/invocations => generated 243 bytes in 458 msecs (HTTP/1.1 200) 3 headers in
124 bytes (1 switches on core 3)
98.236.80.203 - - [13/Feb/2019:14:22:26 -0500] "GET /api/webhooks HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/workflows/run?id=f293ecb6146177b0" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.
36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36"
[pid: 343|app: 0|req: 9298/93022] 127.0.0.1 () {50 vars in 1334 bytes} [Wed Feb 13 14:22:26 2019] GET /api/webhooks => generated 2 bytes in 20 msecs (HTTP/1.1 200) 3 headers in 124 bytes (1 switches on core 2)
98.236.80.203 - - [13/Feb/2019:14:22:26 -0500] "GET /api/histories/e8e7e8330bf2145b/contents?details=a2cb428649a1c4a2&order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&
qv=False HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/workflows/run?id=f293ecb6146177b0" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36"
[pid: 434|app: 0|req: 9304/93023] 127.0.0.1 () {50 vars in 1641 bytes} [Wed Feb 13 14:22:26 2019] GET /api/histories/e8e7e8330bf2145b/contents?details=a2cb428649a1c4a2&order=hid&v=dev&q=update_time-ge&q=deleted&
q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&qv=False => generated 2 bytes in 66 msecs (HTTP/1.1 200) 3 headers in 124 bytes (1 switches on core 3)
galaxy.workflow.run DEBUG 2019-02-13 14:22:27,590 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 150481 of invocation 17854 invoked (29.118 ms)
galaxy.workflow.run DEBUG 2019-02-13 14:22:27,610 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 150482 of invocation 17854 invoked (19.367 ms)
galaxy.managers.collections DEBUG 2019-02-13 14:22:27,672 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Created collection with 2 elements
/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:226: SAWarning: Unicode type received non-unicode bind param value 'output'. (this warning may be suppr
essed after 10 occurrences)
(util.ellipses_string(value),))
/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:226: SAWarning: Unicode type received non-unicode bind param value 'input'. (this warning may be suppre
ssed after 10 occurrences)
(util.ellipses_string(value),))
galaxy.tools.actions INFO 2019-02-13 14:22:27,980 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Handled output named output for tool toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0
.9992.0 (73.258 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:27,995 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Added output datasets to history (12.906 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:27,998 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Setup for job Job[unflushed,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.
9992.0] complete, ready to flush (3.279 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,096 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Flushed transaction for job Job[id=191675,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msco
nvert_nix/3.0.9992.0] (97.376 ms)
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,096 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Tool [toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] created job [191675]
(286.212 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,329 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Handled output named output for tool toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0 (40.918 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,343 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Added output datasets to history (12.881 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,348 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Setup for job Job[unflushed,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] complete, ready to flush (5.537 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,434 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Flushed transaction for job Job[id=191676,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] (85.546 ms)
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,435 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Tool [toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] created job [191676] (165.391 ms)
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,631 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Executed 2 job(s) for tool toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0 request: (983.069 ms)
galaxy.workflow.run DEBUG 2019-02-13 14:22:28,672 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 150483 of invocation 17854 invoked (1061.588 ms)
galaxy.tools ERROR 2019-02-13 14:22:28,865 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Exception caught while attempting tool execution:
Traceback (most recent call last):
File "lib/galaxy/tools/__init__.py", line 1436, in handle_single_execution
collection_info=collection_info,
File "lib/galaxy/tools/__init__.py", line 1517, in execute
return self.tool_action.execute(self, trans, incoming=incoming, set_output_hid=set_output_hid, history=history, **kwargs)
File "lib/galaxy/tools/actions/__init__.py", line 331, in execute
params=wrapped_params.params,
File "lib/galaxy/tools/parameters/wrapped.py", line 44, in params
self.wrap_values(self.tool.inputs, params, skip_missing_values=not self.tool.check_values)
File "lib/galaxy/tools/parameters/wrapped.py", line 79, in wrap_values
name=input.name)
File "lib/galaxy/tools/wrappers.py", line 343, in __init__
list.__init__(self, map(to_wrapper, datasets))
File "lib/galaxy/tools/wrappers.py", line 341, in to_wrapper
return self._dataset_wrapper(dataset, dataset_paths, **kwargs)
File "lib/galaxy/tools/wrappers.py", line 314, in _dataset_wrapper
real_path = dataset.file_name
File "lib/galaxy/model/__init__.py", line 2181, in get_file_name
return self.dataset.get_file_name()
File "lib/galaxy/model/__init__.py", line 1969, in get_file_name
filename = self.object_store.get_filename(self)
File "lib/galaxy/objectstore/__init__.py", line 506, in get_filename
return self._call_method('get_filename', obj, ObjectNotFound, True, **kwargs)
File "lib/galaxy/objectstore/__init__.py", line 534, in _call_method
% (method, self._repr_object_for_exception(obj), str(kwargs)))
ObjectNotFound: objectstore, _call_method failed: get_filename on Dataset(id=241288), kwargs: {}
galaxy.tools.execute WARNING 2019-02-13 14:22:28,870 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] There was a failure executing a job for tool [toolshed.g2.bx.psu.edu/repos/galaxyp/peptideshaker/search_gui/3.3.10.1] - Error executing tool: objectstore, _call_method failed: get_filename on Dataset(id=241288), kwargs: {}
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,896 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Executed 1 job(s) for tool toolshed.g2.bx.psu.edu/repos/galaxyp/peptideshaker/search_gui/3.3.10.1 request: (162.061 ms)
galaxy.workflow.run ERROR 2019-02-13 14:22:28,931 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Failed to schedule Workflow[id=7155,name=iMetaQuantome_workflow (imported from uploaded file)], problem occurred on WorkflowStep[index=3,type=tool].
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 1011, in execute
raise Exception(message)
Exception: Failed to create one or more job(s) for workflow step.
galaxy.workflow.run ERROR 2019-02-13 14:22:28,932 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Failed to execute scheduled workflow.
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 83, in __invoke
outputs = invoker.invoke()
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 1011, in execute
raise Exception(message)
Exception: Failed to create one or more job(s) for workflow step.
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:29,710 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] In default runner handling tool msconvert_nix
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:29,710 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] JobDestination created with values {'-l': 'walltime=1:00:00,nodes=1:ppn=2,mem=10gb', '-q': 'small@mesabihn3'}
galaxy.jobs.mapper DEBUG 2019-02-13 14:22:29,710 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Mapped job to destination id: dynamic-destination-msconvert_nix
10.21.31.51 - - [13/Feb/2019:14:22:29 -0500] "GET /api/histories/26b7bfc3888698b1/contents?order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&qv=False HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/?tool_id=toolshed.g2.bx.psu.edu%2Frepos%2Fgalaxyp%2Fpeptideshaker%2Fsearch_gui%2F3.3.10.1&version=3.3.10.1&__identifer=n8xwa2bomdwb" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299"
[pid: 351|app: 0|req: 9303/93024] 127.0.0.1 () {50 vars in 1830 bytes} [Wed Feb 13 14:22:29 2019] GET /api/histories/26b7bfc3888698b1/contents?order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&qv=False => generated 2 bytes in 73 msecs (HTTP/1.1 200) 3 headers in 124 bytes (1 switches on core 1)
galaxy.jobs.handler DEBUG 2019-02-13 14:22:29,818 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Dispatching to pbs-mesabi runner
galaxy.jobs DEBUG 2019-02-13 14:22:29,842 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Persisting job destination (destination id: dynamic-destination-msconvert_nix)
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:29,856 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushing cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241288.dat' of size 0 bytes to key '000/241/dataset_241288.dat'
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:29,905 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushed cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241288.dat' to key '000/241/dataset_241288.dat' (0 bytes transfered in 0:00:00.048339 sec)
galaxy.jobs DEBUG 2019-02-13 14:22:29,927 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Working directory for job is: /panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/job_working_directory/000/191/191675
galaxy.jobs.runners DEBUG 2019-02-13 14:22:29,958 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Job [191675] queued (139.791 ms)
galaxy.jobs.handler INFO 2019-02-13 14:22:29,986 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Job dispatched
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:30,024 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] In default runner handling tool msconvert_nix
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:30,024 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] JobDestination created with values {'-l': 'walltime=1:00:00,nodes=1:ppn=2,mem=10gb', '-q': 'small@mesabihn3'}
galaxy.jobs.mapper DEBUG 2019-02-13 14:22:30,025 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Mapped job to destination id: dynamic-destination-msconvert_nix
galaxy.jobs.handler DEBUG 2019-02-13 14:22:30,080 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Dispatching to pbs-mesabi runner
galaxy.jobs DEBUG 2019-02-13 14:22:30,100 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Persisting job destination (destination id: dynamic-destination-msconvert_nix)
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:30,112 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushing cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241289.dat' :
of size 0 bytes to key '000/241/dataset_241289.dat'
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:30,153 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushed cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241289.dat' to key '000/241/dataset_241289.dat' (0 bytes transfered in 0:00:00.041692 sec)
galaxy.jobs DEBUG 2019-02-13 14:22:30,169 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Working directory for job is: /panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/job_working_directory/000/191/191676
galaxy.jobs.runners DEBUG 2019-02-13 14:22:30,186 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Job [191676] queued (106.436 ms)
galaxy.jobs.handler INFO 2019-02-13 14:22:30,209 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Job dispatched
galaxy.tools.deps DEBUG 2019-02-13 14:22:30,411 [p:336,w:1,m:0] [PBSRunner.work_thread-0] Using dependency proteowizard version 3.0.9992 of type conda
galaxy.jobs.command_factory INFO 2019-02-13 14:22:30,518 [p:336,w:1,m:0] [PBSRunner.work_thread-0] Built script [/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/job_working_directory/000/191/191675/tool_script.sh] for tool command [[ "$CONDA_DEFAULT_ENV" = "/home/galaxy/galaxy/miniconda2/envs/__proteowizard@3.0.9992" ] ||
|
Exception
|
def _dataset_wrapper(self, dataset, dataset_paths, **kwargs):
wrapper_kwds = kwargs.copy()
if dataset and dataset_paths:
real_path = dataset.file_name
if real_path in dataset_paths:
wrapper_kwds["dataset_path"] = dataset_paths[real_path]
return DatasetFilenameWrapper(dataset, **wrapper_kwds)
|
def _dataset_wrapper(self, dataset, dataset_paths, **kwargs):
wrapper_kwds = kwargs.copy()
if dataset:
real_path = dataset.file_name
if real_path in dataset_paths:
wrapper_kwds["dataset_path"] = dataset_paths[real_path]
return DatasetFilenameWrapper(dataset, **wrapper_kwds)
|
https://github.com/galaxyproject/galaxy/issues/7368
|
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,638 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150481
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,639 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150482
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,640 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150483
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,641 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150484
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,642 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150485
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,642 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150486
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,643 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150487
galaxy.workflow.run_request INFO 2019-02-13 14:22:26,644 [p:336,w:1,m:0] [uWSGIWorker1Core3] Creating a step_state for step.id 150488
galaxy.workflow.scheduling_manager INFO 2019-02-13 14:22:26,645 [p:336,w:1,m:0] [uWSGIWorker1Core3] Queueing workflow invocation for handler [main.web.1]
98.236.80.203 - - [13/Feb/2019:14:22:26 -0500] "POST /api/workflows/f293ecb6146177b0/invocations HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/workflows/run?id=f293ecb6146177b0" "Mozilla/5.0 (Macintosh; Intel Mac
OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36"
[pid: 336|app: 0|req: 9300/93021] 127.0.0.1 () {56 vars in 1491 bytes} [Wed Feb 13 14:22:26 2019] POST /api/workflows/f293ecb6146177b0/invocations => generated 243 bytes in 458 msecs (HTTP/1.1 200) 3 headers in
124 bytes (1 switches on core 3)
98.236.80.203 - - [13/Feb/2019:14:22:26 -0500] "GET /api/webhooks HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/workflows/run?id=f293ecb6146177b0" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.
36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36"
[pid: 343|app: 0|req: 9298/93022] 127.0.0.1 () {50 vars in 1334 bytes} [Wed Feb 13 14:22:26 2019] GET /api/webhooks => generated 2 bytes in 20 msecs (HTTP/1.1 200) 3 headers in 124 bytes (1 switches on core 2)
98.236.80.203 - - [13/Feb/2019:14:22:26 -0500] "GET /api/histories/e8e7e8330bf2145b/contents?details=a2cb428649a1c4a2&order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&
qv=False HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/workflows/run?id=f293ecb6146177b0" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36"
[pid: 434|app: 0|req: 9304/93023] 127.0.0.1 () {50 vars in 1641 bytes} [Wed Feb 13 14:22:26 2019] GET /api/histories/e8e7e8330bf2145b/contents?details=a2cb428649a1c4a2&order=hid&v=dev&q=update_time-ge&q=deleted&
q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&qv=False => generated 2 bytes in 66 msecs (HTTP/1.1 200) 3 headers in 124 bytes (1 switches on core 3)
galaxy.workflow.run DEBUG 2019-02-13 14:22:27,590 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 150481 of invocation 17854 invoked (29.118 ms)
galaxy.workflow.run DEBUG 2019-02-13 14:22:27,610 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 150482 of invocation 17854 invoked (19.367 ms)
galaxy.managers.collections DEBUG 2019-02-13 14:22:27,672 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Created collection with 2 elements
/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:226: SAWarning: Unicode type received non-unicode bind param value 'output'. (this warning may be suppr
essed after 10 occurrences)
(util.ellipses_string(value),))
/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/.venv/lib/python2.7/site-packages/sqlalchemy/sql/sqltypes.py:226: SAWarning: Unicode type received non-unicode bind param value 'input'. (this warning may be suppre
ssed after 10 occurrences)
(util.ellipses_string(value),))
galaxy.tools.actions INFO 2019-02-13 14:22:27,980 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Handled output named output for tool toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0
.9992.0 (73.258 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:27,995 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Added output datasets to history (12.906 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:27,998 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Setup for job Job[unflushed,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.
9992.0] complete, ready to flush (3.279 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,096 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Flushed transaction for job Job[id=191675,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msco
nvert_nix/3.0.9992.0] (97.376 ms)
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,096 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Tool [toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] created job [191675]
(286.212 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,329 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Handled output named output for tool toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0 (40.918 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,343 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Added output datasets to history (12.881 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,348 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Setup for job Job[unflushed,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] complete, ready to flush (5.537 ms)
galaxy.tools.actions INFO 2019-02-13 14:22:28,434 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Flushed transaction for job Job[id=191676,tool_id=toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] (85.546 ms)
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,435 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Tool [toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0] created job [191676] (165.391 ms)
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,631 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Executed 2 job(s) for tool toolshed.g2.bx.psu.edu/repos/galaxyp/msconvert_nix/msconvert_nix/3.0.9992.0 request: (983.069 ms)
galaxy.workflow.run DEBUG 2019-02-13 14:22:28,672 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Workflow step 150483 of invocation 17854 invoked (1061.588 ms)
galaxy.tools ERROR 2019-02-13 14:22:28,865 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Exception caught while attempting tool execution:
Traceback (most recent call last):
File "lib/galaxy/tools/__init__.py", line 1436, in handle_single_execution
collection_info=collection_info,
File "lib/galaxy/tools/__init__.py", line 1517, in execute
return self.tool_action.execute(self, trans, incoming=incoming, set_output_hid=set_output_hid, history=history, **kwargs)
File "lib/galaxy/tools/actions/__init__.py", line 331, in execute
params=wrapped_params.params,
File "lib/galaxy/tools/parameters/wrapped.py", line 44, in params
self.wrap_values(self.tool.inputs, params, skip_missing_values=not self.tool.check_values)
File "lib/galaxy/tools/parameters/wrapped.py", line 79, in wrap_values
name=input.name)
File "lib/galaxy/tools/wrappers.py", line 343, in __init__
list.__init__(self, map(to_wrapper, datasets))
File "lib/galaxy/tools/wrappers.py", line 341, in to_wrapper
return self._dataset_wrapper(dataset, dataset_paths, **kwargs)
File "lib/galaxy/tools/wrappers.py", line 314, in _dataset_wrapper
real_path = dataset.file_name
File "lib/galaxy/model/__init__.py", line 2181, in get_file_name
return self.dataset.get_file_name()
File "lib/galaxy/model/__init__.py", line 1969, in get_file_name
filename = self.object_store.get_filename(self)
File "lib/galaxy/objectstore/__init__.py", line 506, in get_filename
return self._call_method('get_filename', obj, ObjectNotFound, True, **kwargs)
File "lib/galaxy/objectstore/__init__.py", line 534, in _call_method
% (method, self._repr_object_for_exception(obj), str(kwargs)))
ObjectNotFound: objectstore, _call_method failed: get_filename on Dataset(id=241288), kwargs: {}
galaxy.tools.execute WARNING 2019-02-13 14:22:28,870 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] There was a failure executing a job for tool [toolshed.g2.bx.psu.edu/repos/galaxyp/peptideshaker/search_gui/3.3.10.1] - Error executing tool: objectstore, _call_method failed: get_filename on Dataset(id=241288), kwargs: {}
galaxy.tools.execute DEBUG 2019-02-13 14:22:28,896 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Executed 1 job(s) for tool toolshed.g2.bx.psu.edu/repos/galaxyp/peptideshaker/search_gui/3.3.10.1 request: (162.061 ms)
galaxy.workflow.run ERROR 2019-02-13 14:22:28,931 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Failed to schedule Workflow[id=7155,name=iMetaQuantome_workflow (imported from uploaded file)], problem occurred on WorkflowStep[index=3,type=tool].
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 1011, in execute
raise Exception(message)
Exception: Failed to create one or more job(s) for workflow step.
galaxy.workflow.run ERROR 2019-02-13 14:22:28,932 [p:336,w:1,m:0] [WorkflowRequestMonitor.monitor_thread] Failed to execute scheduled workflow.
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 83, in __invoke
outputs = invoker.invoke()
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 1011, in execute
raise Exception(message)
Exception: Failed to create one or more job(s) for workflow step.
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:29,710 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] In default runner handling tool msconvert_nix
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:29,710 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] JobDestination created with values {'-l': 'walltime=1:00:00,nodes=1:ppn=2,mem=10gb', '-q': 'small@mesabihn3'}
galaxy.jobs.mapper DEBUG 2019-02-13 14:22:29,710 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Mapped job to destination id: dynamic-destination-msconvert_nix
10.21.31.51 - - [13/Feb/2019:14:22:29 -0500] "GET /api/histories/26b7bfc3888698b1/contents?order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&qv=False HTTP/1.1" 200 - "https://galaxyp.msi.umn.edu/?tool_id=toolshed.g2.bx.psu.edu%2Frepos%2Fgalaxyp%2Fpeptideshaker%2Fsearch_gui%2F3.3.10.1&version=3.3.10.1&__identifer=n8xwa2bomdwb" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299"
[pid: 351|app: 0|req: 9303/93024] 127.0.0.1 () {50 vars in 1830 bytes} [Wed Feb 13 14:22:29 2019] GET /api/histories/26b7bfc3888698b1/contents?order=hid&v=dev&q=update_time-ge&q=deleted&q=purged&qv=2019-02-13T20%3A22%3A24.000Z&qv=False&qv=False => generated 2 bytes in 73 msecs (HTTP/1.1 200) 3 headers in 124 bytes (1 switches on core 1)
galaxy.jobs.handler DEBUG 2019-02-13 14:22:29,818 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Dispatching to pbs-mesabi runner
galaxy.jobs DEBUG 2019-02-13 14:22:29,842 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Persisting job destination (destination id: dynamic-destination-msconvert_nix)
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:29,856 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushing cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241288.dat' of size 0 bytes to key '000/241/dataset_241288.dat'
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:29,905 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushed cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241288.dat' to key '000/241/dataset_241288.dat' (0 bytes transfered in 0:00:00.048339 sec)
galaxy.jobs DEBUG 2019-02-13 14:22:29,927 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Working directory for job is: /panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/job_working_directory/000/191/191675
galaxy.jobs.runners DEBUG 2019-02-13 14:22:29,958 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Job [191675] queued (139.791 ms)
galaxy.jobs.handler INFO 2019-02-13 14:22:29,986 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191675) Job dispatched
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:30,024 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] In default runner handling tool msconvert_nix
galaxy.jobs.rules.destinations DEBUG 2019-02-13 14:22:30,024 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] JobDestination created with values {'-l': 'walltime=1:00:00,nodes=1:ppn=2,mem=10gb', '-q': 'small@mesabihn3'}
galaxy.jobs.mapper DEBUG 2019-02-13 14:22:30,025 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Mapped job to destination id: dynamic-destination-msconvert_nix
galaxy.jobs.handler DEBUG 2019-02-13 14:22:30,080 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Dispatching to pbs-mesabi runner
galaxy.jobs DEBUG 2019-02-13 14:22:30,100 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Persisting job destination (destination id: dynamic-destination-msconvert_nix)
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:30,112 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushing cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241289.dat' :
of size 0 bytes to key '000/241/dataset_241289.dat'
galaxy.objectstore.s3 DEBUG 2019-02-13 14:22:30,153 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Pushed cache file '/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/files/000/241/dataset_241289.dat' to key '000/241/dataset_241289.dat' (0 bytes transfered in 0:00:00.041692 sec)
galaxy.jobs DEBUG 2019-02-13 14:22:30,169 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Working directory for job is: /panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/job_working_directory/000/191/191676
galaxy.jobs.runners DEBUG 2019-02-13 14:22:30,186 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] Job [191676] queued (106.436 ms)
galaxy.jobs.handler INFO 2019-02-13 14:22:30,209 [p:336,w:1,m:0] [JobHandlerQueue.monitor_thread] (191676) Job dispatched
galaxy.tools.deps DEBUG 2019-02-13 14:22:30,411 [p:336,w:1,m:0] [PBSRunner.work_thread-0] Using dependency proteowizard version 3.0.9992 of type conda
galaxy.jobs.command_factory INFO 2019-02-13 14:22:30,518 [p:336,w:1,m:0] [PBSRunner.work_thread-0] Built script [/panfs/roc/website/galaxyp.msi.umn.edu/GALAXYP/database/job_working_directory/000/191/191675/tool_script.sh] for tool command [[ "$CONDA_DEFAULT_ENV" = "/home/galaxy/galaxy/miniconda2/envs/__proteowizard@3.0.9992" ] ||
|
Exception
|
def __main__():
input_name = sys.argv[1]
output_name = sys.argv[2]
skipped_lines = 0
first_skipped_line = 0
out = open(output_name, "w")
i = 0
for i, line in enumerate(open(input_name)):
complete_bed = False
line = line.rstrip("\r\n")
if (
line
and not line.startswith("#")
and not line.startswith("track")
and not line.startswith("browser")
):
try:
elems = line.split("\t")
if len(elems) == 12:
complete_bed = True
chrom = elems[0]
if complete_bed:
feature = "mRNA"
else:
try:
feature = elems[3]
except Exception:
feature = "feature%d" % (i + 1)
start = int(elems[1]) + 1
end = int(elems[2])
try:
score = elems[4]
except Exception:
score = "0"
try:
strand = elems[5]
except Exception:
strand = "+"
try:
group = elems[3]
except Exception:
group = "group%d" % (i + 1)
if complete_bed:
out.write(
"%s\tbed2gff\t%s\t%d\t%d\t%s\t%s\t.\t%s %s;\n"
% (chrom, feature, start, end, score, strand, feature, group)
)
else:
out.write(
"%s\tbed2gff\t%s\t%d\t%d\t%s\t%s\t.\t%s;\n"
% (chrom, feature, start, end, score, strand, group)
)
if complete_bed:
# We have all the info necessary to annotate exons for genes and mRNAs
block_count = int(elems[9])
block_sizes = elems[10].split(",")
block_starts = elems[11].split(",")
for j in range(block_count):
exon_start = int(start) + int(block_starts[j])
exon_end = exon_start + int(block_sizes[j]) - 1
out.write(
"%s\tbed2gff\texon\t%d\t%d\t%s\t%s\t.\texon %s;\n"
% (chrom, exon_start, exon_end, score, strand, group)
)
except Exception:
skipped_lines += 1
if not first_skipped_line:
first_skipped_line = i + 1
else:
skipped_lines += 1
if not first_skipped_line:
first_skipped_line = i + 1
out.close()
info_msg = "%i lines converted to GFF version 2. " % (i + 1 - skipped_lines)
if skipped_lines > 0:
info_msg += "Skipped %d blank/comment/invalid lines starting with line #%d." % (
skipped_lines,
first_skipped_line,
)
print(info_msg)
|
def __main__():
input_name = sys.argv[1]
output_name = sys.argv[2]
skipped_lines = 0
first_skipped_line = 0
out = open(output_name, "w")
out.write("##gff-version 2\n")
out.write("##bed_to_gff_converter.py\n\n")
i = 0
for i, line in enumerate(open(input_name)):
complete_bed = False
line = line.rstrip("\r\n")
if (
line
and not line.startswith("#")
and not line.startswith("track")
and not line.startswith("browser")
):
try:
elems = line.split("\t")
if len(elems) == 12:
complete_bed = True
chrom = elems[0]
if complete_bed:
feature = "mRNA"
else:
try:
feature = elems[3]
except Exception:
feature = "feature%d" % (i + 1)
start = int(elems[1]) + 1
end = int(elems[2])
try:
score = elems[4]
except Exception:
score = "0"
try:
strand = elems[5]
except Exception:
strand = "+"
try:
group = elems[3]
except Exception:
group = "group%d" % (i + 1)
if complete_bed:
out.write(
"%s\tbed2gff\t%s\t%d\t%d\t%s\t%s\t.\t%s %s;\n"
% (chrom, feature, start, end, score, strand, feature, group)
)
else:
out.write(
"%s\tbed2gff\t%s\t%d\t%d\t%s\t%s\t.\t%s;\n"
% (chrom, feature, start, end, score, strand, group)
)
if complete_bed:
# We have all the info necessary to annotate exons for genes and mRNAs
block_count = int(elems[9])
block_sizes = elems[10].split(",")
block_starts = elems[11].split(",")
for j in range(block_count):
exon_start = int(start) + int(block_starts[j])
exon_end = exon_start + int(block_sizes[j]) - 1
out.write(
"%s\tbed2gff\texon\t%d\t%d\t%s\t%s\t.\texon %s;\n"
% (chrom, exon_start, exon_end, score, strand, group)
)
except Exception:
skipped_lines += 1
if not first_skipped_line:
first_skipped_line = i + 1
else:
skipped_lines += 1
if not first_skipped_line:
first_skipped_line = i + 1
out.close()
info_msg = "%i lines converted to GFF version 2. " % (i + 1 - skipped_lines)
if skipped_lines > 0:
info_msg += "Skipped %d blank/comment/invalid lines starting with line #%d." % (
skipped_lines,
first_skipped_line,
)
print(info_msg)
|
https://github.com/galaxyproject/galaxy/issues/7771
|
Dataset Error
An error occured while running the tool toolshed.g2.bx.psu.edu/repos/iuc/cwpair2/cwpair2/1.0.0.
Tool execution generated the following messages:
Unable to parse file "/galaxy-repl/main/files/031/018/dataset_31018984.dat".
Traceback (most recent call last):
File "/cvmfs/main.galaxyproject.org/shed_tools/toolshed.g2.bx.psu.edu/repos/iuc/cwpair2/d4db13c9dd7f/cwpair2/cwpair2_util.py", line 280, in perform_process
chromosomes = parse_chromosomes(input)
File "/cvmfs/main.galaxyproject.org/shed_tools/toolshed.g2.bx.psu.edu/repos/iuc/cwpair2/d4db13c9dd7f/cwpair2/cwpair2_util.py", line 96, in parse_chromosomes
cname, junk, junk, start, end, value, strand, junk, junk = line
ValueError: need more than 1 value to unpack
|
ValueError
|
def register(
self, trans, email=None, username=None, password=None, confirm=None, subscribe=False
):
"""
Register a new user.
"""
if not trans.app.config.allow_user_creation and not trans.user_is_admin:
message = "User registration is disabled. Please contact your local Galaxy administrator for an account."
if trans.app.config.error_email_to is not None:
message += " Contact: %s" % trans.app.config.error_email_to
return None, message
if not email or not username or not password or not confirm:
return None, "Please provide email, username and password."
message = "\n".join(
[
validate_email(trans, email),
validate_password(trans, password, confirm),
validate_publicname(trans, username),
]
).rstrip()
if message:
return None, message
email = util.restore_text(email)
username = util.restore_text(username)
message, status = trans.app.auth_manager.check_registration_allowed(
email, username, password
)
if message:
return None, message
if subscribe:
message = self.send_subscription_email(email)
if message:
return None, message
user = self.create(email=email, username=username, password=password)
if self.app.config.user_activation_on:
self.send_activation_email(trans, email, username)
return user, None
|
def register(
self,
trans,
email=None,
username=None,
password=None,
confirm=None,
subscribe=False,
**kwd,
):
"""
Register a new user.
"""
if not trans.app.config.allow_user_creation and not trans.user_is_admin:
message = "User registration is disabled. Please contact your local Galaxy administrator for an account."
if trans.app.config.error_email_to is not None:
message += " Contact: %s" % trans.app.config.error_email_to
return None, message
if not email or not username or not password or not confirm:
return None, "Please provide email, username and password."
message = "\n".join(
[
validate_email(trans, email),
validate_password(trans, password, confirm),
validate_publicname(trans, username),
]
).rstrip()
if message:
return None, message
email = util.restore_text(email)
username = util.restore_text(username)
message, status = trans.app.auth_manager.check_registration_allowed(
email, username, password
)
if message:
return None, message
if subscribe:
message = self.send_subscription_email(email)
if message:
return None, message
user = self.create(email=email, username=username, password=password)
if self.app.config.user_activation_on:
self.send_activation_email(trans, email, username)
return user, None
|
https://github.com/galaxyproject/galaxy/issues/7617
|
Traceback (most recent call last):
File "lib/galaxy/web/framework/decorators.py", line 283, in decorator
rval = func(self, trans, *args, **kwargs)
File "lib/galaxy/webapps/galaxy/controllers/user.py", line 114, in login
return self.__validate_login(trans, payload, **kwd)
File "lib/galaxy/webapps/galaxy/controllers/user.py", line 135, in __validate_login
message, status, user, success = self.__autoregistration(trans, login, password, status, kwd)
File "lib/galaxy/webapps/galaxy/controllers/user.py", line 90, in __autoregistration
message, status, user, success = self.__register(trans, **kwd)
AttributeError: 'User' object has no attribute '_User__register'
|
AttributeError
|
def __autoregistration(self, trans, login, password):
"""
Does the autoregistration if enabled. Returns a message
"""
autoreg = trans.app.auth_manager.check_auto_registration(trans, login, password)
user = None
if autoreg["auto_reg"]:
email = autoreg["email"]
username = autoreg["username"]
message = " ".join(
[
validate_email(trans, email, allow_empty=True),
validate_publicname(trans, username),
]
).rstrip()
if not message:
user = self.user_manager.create(email=email, username=username, password="")
if trans.app.config.user_activation_on:
self.user_manager.send_activation_email(trans, email, username)
# The handle_user_login() method has a call to the history_set_default_permissions() method
# (needed when logging in with a history), user needs to have default permissions set before logging in
if not trans.user_is_admin:
trans.handle_user_login(user)
trans.log_event("User (auto) created a new account")
trans.log_event("User logged in")
if "attributes" in autoreg and "roles" in autoreg["attributes"]:
self.__handle_role_and_group_auto_creation(
trans,
user,
autoreg["attributes"]["roles"],
auto_create_groups=autoreg["auto_create_groups"],
auto_create_roles=autoreg["auto_create_roles"],
auto_assign_roles_to_groups_only=autoreg[
"auto_assign_roles_to_groups_only"
],
)
else:
message = (
"Auto-registration failed, contact your local Galaxy administrator. %s"
% message
)
else:
message = "No such user or invalid password."
return message, user
|
def __autoregistration(
self, trans, login, password, status, kwd, no_password_check=False, cntrller=None
):
"""
Does the autoregistration if enabled. Returns a message
"""
autoreg = trans.app.auth_manager.check_auto_registration(
trans, login, password, no_password_check=no_password_check
)
user = None
success = False
if autoreg["auto_reg"]:
kwd["email"] = autoreg["email"]
kwd["username"] = autoreg["username"]
message = " ".join(
[
validate_email(trans, kwd["email"], allow_empty=True),
validate_publicname(trans, kwd["username"]),
]
).rstrip()
if not message:
message, status, user, success = self.__register(trans, **kwd)
if success:
# The handle_user_login() method has a call to the history_set_default_permissions() method
# (needed when logging in with a history), user needs to have default permissions set before logging in
if not trans.user_is_admin:
trans.handle_user_login(user)
trans.log_event("User (auto) created a new account")
trans.log_event("User logged in")
if "attributes" in autoreg and "roles" in autoreg["attributes"]:
self.__handle_role_and_group_auto_creation(
trans,
user,
autoreg["attributes"]["roles"],
auto_create_groups=autoreg["auto_create_groups"],
auto_create_roles=autoreg["auto_create_roles"],
auto_assign_roles_to_groups_only=autoreg[
"auto_assign_roles_to_groups_only"
],
)
else:
message = (
"Auto-registration failed, contact your local Galaxy administrator. %s"
% message
)
else:
message = (
"Auto-registration failed, contact your local Galaxy administrator. %s"
% message
)
else:
message = "No such user or invalid password."
return message, status, user, success
|
https://github.com/galaxyproject/galaxy/issues/7617
|
Traceback (most recent call last):
File "lib/galaxy/web/framework/decorators.py", line 283, in decorator
rval = func(self, trans, *args, **kwargs)
File "lib/galaxy/webapps/galaxy/controllers/user.py", line 114, in login
return self.__validate_login(trans, payload, **kwd)
File "lib/galaxy/webapps/galaxy/controllers/user.py", line 135, in __validate_login
message, status, user, success = self.__autoregistration(trans, login, password, status, kwd)
File "lib/galaxy/webapps/galaxy/controllers/user.py", line 90, in __autoregistration
message, status, user, success = self.__register(trans, **kwd)
AttributeError: 'User' object has no attribute '_User__register'
|
AttributeError
|
def __validate_login(self, trans, payload={}, **kwd):
"""Handle Galaxy Log in"""
if not payload:
payload = kwd
message = trans.check_csrf_token(payload)
if message:
return self.message_exception(trans, message)
login = payload.get("login")
password = payload.get("password")
redirect = payload.get("redirect")
status = None
if not login or not password:
return self.message_exception(trans, "Please specify a username and password.")
user = (
trans.sa_session.query(trans.app.model.User)
.filter(
or_(
trans.app.model.User.table.c.email == login,
trans.app.model.User.table.c.username == login,
)
)
.first()
)
log.debug(
"trans.app.config.auth_config_file: %s" % trans.app.config.auth_config_file
)
if user is None:
message, user = self.__autoregistration(trans, login, password)
if message:
return self.message_exception(trans, message)
elif user.deleted:
message = "This account has been marked deleted, contact your local Galaxy administrator to restore the account."
if trans.app.config.error_email_to is not None:
message += " Contact: %s." % trans.app.config.error_email_to
return self.message_exception(trans, message, sanitize=False)
elif user.external:
message = "This account was created for use with an external authentication method, contact your local Galaxy administrator to activate it."
if trans.app.config.error_email_to is not None:
message += " Contact: %s." % trans.app.config.error_email_to
return self.message_exception(trans, message, sanitize=False)
elif not trans.app.auth_manager.check_password(user, password):
return self.message_exception(trans, "Invalid password.")
elif (
trans.app.config.user_activation_on and not user.active
): # activation is ON and the user is INACTIVE
if trans.app.config.activation_grace_period != 0: # grace period is ON
if self.is_outside_grace_period(
trans, user.create_time
): # User is outside the grace period. Login is disabled and he will have the activation email resent.
message, status = self.resend_activation_email(
trans, user.email, user.username
)
return self.message_exception(trans, message, sanitize=False)
else: # User is within the grace period, let him log in.
trans.handle_user_login(user)
trans.log_event("User logged in")
else: # Grace period is off. Login is disabled and user will have the activation email resent.
message, status = self.resend_activation_email(
trans, user.email, user.username
)
return self.message_exception(trans, message, sanitize=False)
else: # activation is OFF
pw_expires = trans.app.config.password_expiration_period
if pw_expires and user.last_password_change < datetime.today() - pw_expires:
# Password is expired, we don't log them in.
return {
"message": "Your password has expired. Please reset or change it to access Galaxy.",
"status": "warning",
"expired_user": trans.security.encode_id(user.id),
}
trans.handle_user_login(user)
trans.log_event("User logged in")
if pw_expires and user.last_password_change < datetime.today() - timedelta(
days=pw_expires.days / 10
):
# If password is about to expire, modify message to state that.
expiredate = datetime.today() - user.last_password_change + pw_expires
return {
"message": "Your password will expire in %s day(s)." % expiredate.days,
"status": "warning",
}
return {"message": "Success.", "redirect": self.__get_redirect_url(redirect)}
|
def __validate_login(self, trans, payload={}, **kwd):
"""Handle Galaxy Log in"""
if not payload:
payload = kwd
message = trans.check_csrf_token(payload)
if message:
return self.message_exception(trans, message)
login = payload.get("login")
password = payload.get("password")
redirect = payload.get("redirect")
status = None
if not login or not password:
return self.message_exception(trans, "Please specify a username and password.")
user = (
trans.sa_session.query(trans.app.model.User)
.filter(
or_(
trans.app.model.User.table.c.email == login,
trans.app.model.User.table.c.username == login,
)
)
.first()
)
log.debug(
"trans.app.config.auth_config_file: %s" % trans.app.config.auth_config_file
)
if user is None:
message, status, user, success = self.__autoregistration(
trans, login, password, status, kwd
)
if not success:
return self.message_exception(trans, message)
elif user.deleted:
message = "This account has been marked deleted, contact your local Galaxy administrator to restore the account."
if trans.app.config.error_email_to is not None:
message += " Contact: %s." % trans.app.config.error_email_to
return self.message_exception(trans, message, sanitize=False)
elif user.external:
message = "This account was created for use with an external authentication method, contact your local Galaxy administrator to activate it."
if trans.app.config.error_email_to is not None:
message += " Contact: %s." % trans.app.config.error_email_to
return self.message_exception(trans, message, sanitize=False)
elif not trans.app.auth_manager.check_password(user, password):
return self.message_exception(trans, "Invalid password.")
elif (
trans.app.config.user_activation_on and not user.active
): # activation is ON and the user is INACTIVE
if trans.app.config.activation_grace_period != 0: # grace period is ON
if self.is_outside_grace_period(
trans, user.create_time
): # User is outside the grace period. Login is disabled and he will have the activation email resent.
message, status = self.resend_activation_email(
trans, user.email, user.username
)
return self.message_exception(trans, message, sanitize=False)
else: # User is within the grace period, let him log in.
trans.handle_user_login(user)
trans.log_event("User logged in")
else: # Grace period is off. Login is disabled and user will have the activation email resent.
message, status = self.resend_activation_email(
trans, user.email, user.username
)
return self.message_exception(trans, message, sanitize=False)
else: # activation is OFF
pw_expires = trans.app.config.password_expiration_period
if pw_expires and user.last_password_change < datetime.today() - pw_expires:
# Password is expired, we don't log them in.
return {
"message": "Your password has expired. Please reset or change it to access Galaxy.",
"status": "warning",
"expired_user": trans.security.encode_id(user.id),
}
trans.handle_user_login(user)
trans.log_event("User logged in")
if pw_expires and user.last_password_change < datetime.today() - timedelta(
days=pw_expires.days / 10
):
# If password is about to expire, modify message to state that.
expiredate = datetime.today() - user.last_password_change + pw_expires
return {
"message": "Your password will expire in %s day(s)." % expiredate.days,
"status": "warning",
}
return {"message": "Success.", "redirect": self.__get_redirect_url(redirect)}
|
https://github.com/galaxyproject/galaxy/issues/7617
|
Traceback (most recent call last):
File "lib/galaxy/web/framework/decorators.py", line 283, in decorator
rval = func(self, trans, *args, **kwargs)
File "lib/galaxy/webapps/galaxy/controllers/user.py", line 114, in login
return self.__validate_login(trans, payload, **kwd)
File "lib/galaxy/webapps/galaxy/controllers/user.py", line 135, in __validate_login
message, status, user, success = self.__autoregistration(trans, login, password, status, kwd)
File "lib/galaxy/webapps/galaxy/controllers/user.py", line 90, in __autoregistration
message, status, user, success = self.__register(trans, **kwd)
AttributeError: 'User' object has no attribute '_User__register'
|
AttributeError
|
def resend_verification(self, trans):
"""
Exposed function for use outside of the class. E.g. when user click on the resend link in the masthead.
"""
message, status = self.resend_activation_email(trans, None, None)
if status:
return trans.show_ok_message(message)
else:
return trans.show_error_message(message)
|
def resend_verification(self, trans):
"""
Exposed function for use outside of the class. E.g. when user click on the resend link in the masthead.
"""
message, status = self.resend_activation_email(trans, None, None)
if status == "done":
return trans.show_ok_message(message)
else:
return trans.show_error_message(message)
|
https://github.com/galaxyproject/galaxy/issues/7617
|
Traceback (most recent call last):
File "lib/galaxy/web/framework/decorators.py", line 283, in decorator
rval = func(self, trans, *args, **kwargs)
File "lib/galaxy/webapps/galaxy/controllers/user.py", line 114, in login
return self.__validate_login(trans, payload, **kwd)
File "lib/galaxy/webapps/galaxy/controllers/user.py", line 135, in __validate_login
message, status, user, success = self.__autoregistration(trans, login, password, status, kwd)
File "lib/galaxy/webapps/galaxy/controllers/user.py", line 90, in __autoregistration
message, status, user, success = self.__register(trans, **kwd)
AttributeError: 'User' object has no attribute '_User__register'
|
AttributeError
|
def resend_activation_email(self, trans, email, username):
"""
Function resends the verification email in case user wants to log in with an inactive account or he clicks the resend link.
"""
if (
email is None
): # User is coming from outside registration form, load email from trans
email = trans.user.email
if (
username is None
): # User is coming from outside registration form, load email from trans
username = trans.user.username
is_activation_sent = self.user_manager.send_activation_email(trans, email, username)
if is_activation_sent:
message = (
'This account has not been activated yet. The activation link has been sent again. Please check your email address <b>%s</b> including the spam/trash folder. <a target="_top" href="%s">Return to the home page</a>.'
% (escape(email), url_for("/"))
)
else:
message = (
'This account has not been activated yet but we are unable to send the activation link. Please contact your local Galaxy administrator. <a target="_top" href="%s">Return to the home page</a>.'
% url_for("/")
)
if trans.app.config.error_email_to is not None:
message += " Error contact: %s." % trans.app.config.error_email_to
return message, is_activation_sent
|
def resend_activation_email(self, trans, email, username):
"""
Function resends the verification email in case user wants to log in with an inactive account or he clicks the resend link.
"""
if (
email is None
): # User is coming from outside registration form, load email from trans
email = trans.user.email
if (
username is None
): # User is coming from outside registration form, load email from trans
username = trans.user.username
is_activation_sent = self.user_manager.send_activation_email(trans, email, username)
if is_activation_sent:
message = (
'This account has not been activated yet. The activation link has been sent again. Please check your email address <b>%s</b> including the spam/trash folder.<br><a target="_top" href="%s">Return to the home page</a>.'
% (escape(email), url_for("/"))
)
status = "error"
else:
message = (
'This account has not been activated yet but we are unable to send the activation link. Please contact your local Galaxy administrator.<br><a target="_top" href="%s">Return to the home page</a>.'
% url_for("/")
)
status = "error"
if trans.app.config.error_email_to is not None:
message += "<br>Error contact: %s" % trans.app.config.error_email_to
return message, status
|
https://github.com/galaxyproject/galaxy/issues/7617
|
Traceback (most recent call last):
File "lib/galaxy/web/framework/decorators.py", line 283, in decorator
rval = func(self, trans, *args, **kwargs)
File "lib/galaxy/webapps/galaxy/controllers/user.py", line 114, in login
return self.__validate_login(trans, payload, **kwd)
File "lib/galaxy/webapps/galaxy/controllers/user.py", line 135, in __validate_login
message, status, user, success = self.__autoregistration(trans, login, password, status, kwd)
File "lib/galaxy/webapps/galaxy/controllers/user.py", line 90, in __autoregistration
message, status, user, success = self.__register(trans, **kwd)
AttributeError: 'User' object has no attribute '_User__register'
|
AttributeError
|
def create(self, trans, payload={}, **kwd):
if not payload:
payload = kwd
message = trans.check_csrf_token(payload)
if message:
return self.message_exception(trans, message)
user, message = self.user_manager.register(
trans, **_filtered_registration_params_dict(payload)
)
if message:
return self.message_exception(trans, message, sanitize=False)
elif user and not trans.user_is_admin:
trans.handle_user_login(user)
trans.log_event("User created a new account")
trans.log_event("User logged in")
return {"message": "Success."}
|
def create(self, trans, payload={}, **kwd):
if not payload:
payload = kwd
message = trans.check_csrf_token(payload)
if message:
return self.message_exception(trans, message)
user, message = self.user_manager.register(trans, **payload)
if message:
return self.message_exception(trans, message, sanitize=False)
elif user and not trans.user_is_admin:
trans.handle_user_login(user)
trans.log_event("User created a new account")
trans.log_event("User logged in")
return {"message": "Success."}
|
https://github.com/galaxyproject/galaxy/issues/7617
|
Traceback (most recent call last):
File "lib/galaxy/web/framework/decorators.py", line 283, in decorator
rval = func(self, trans, *args, **kwargs)
File "lib/galaxy/webapps/galaxy/controllers/user.py", line 114, in login
return self.__validate_login(trans, payload, **kwd)
File "lib/galaxy/webapps/galaxy/controllers/user.py", line 135, in __validate_login
message, status, user, success = self.__autoregistration(trans, login, password, status, kwd)
File "lib/galaxy/webapps/galaxy/controllers/user.py", line 90, in __autoregistration
message, status, user, success = self.__register(trans, **kwd)
AttributeError: 'User' object has no attribute '_User__register'
|
AttributeError
|
def send_mail(frm, to, subject, body, config, html=None):
"""
Sends an email.
:type frm: str
:param frm: from address
:type to: str
:param to: to address
:type subject: str
:param subject: Subject line
:type body: str
:param body: Body text (should be plain text)
:type config: object
:param config: Galaxy configuration object
:type html: str
:param html: Alternative HTML representation of the body content. If
provided will convert the message to a MIMEMultipart. (Default 'None')
"""
to = listify(to)
if html:
msg = MIMEMultipart("alternative")
else:
msg = MIMEText(body, "plain", "utf-8")
msg["To"] = ", ".join(to)
msg["From"] = frm
msg["Subject"] = subject
if config.smtp_server is None:
log.error("Mail is not configured for this Galaxy instance.")
log.info(msg)
return
if html:
mp_text = MIMEText(body, "plain", "utf-8")
mp_html = MIMEText(html, "html", "utf-8")
msg.attach(mp_text)
msg.attach(mp_html)
smtp_ssl = asbool(getattr(config, "smtp_ssl", False))
if smtp_ssl:
s = smtplib.SMTP_SSL()
else:
s = smtplib.SMTP()
s.connect(config.smtp_server)
if not smtp_ssl:
try:
s.starttls()
log.debug(
"Initiated SSL/TLS connection to SMTP server: %s" % config.smtp_server
)
except RuntimeError as e:
log.warning(
"SSL/TLS support is not available to your Python interpreter: %s" % e
)
except smtplib.SMTPHeloError as e:
log.error("The server didn't reply properly to the HELO greeting: %s" % e)
s.close()
raise
except smtplib.SMTPException as e:
log.warning("The server does not support the STARTTLS extension: %s" % e)
if config.smtp_username and config.smtp_password:
try:
s.login(config.smtp_username, config.smtp_password)
except smtplib.SMTPHeloError as e:
log.error("The server didn't reply properly to the HELO greeting: %s" % e)
s.close()
raise
except smtplib.SMTPAuthenticationError as e:
log.error(
"The server didn't accept the username/password combination: %s" % e
)
s.close()
raise
except smtplib.SMTPException as e:
log.error("No suitable authentication method was found: %s" % e)
s.close()
raise
s.sendmail(frm, to, msg.as_string())
s.quit()
|
def send_mail(frm, to, subject, body, config, html=None):
"""
Sends an email.
:type frm: str
:param frm: from address
:type to: str
:param to: to address
:type subject: str
:param subject: Subject line
:type body: str
:param body: Body text (should be plain text)
:type config: object
:param config: Galaxy configuration object
:type html: str
:param html: Alternative HTML representation of the body content. If
provided will convert the message to a MIMEMultipart. (Default 'None')
"""
to = listify(to)
if html:
msg = email_mime_multipart.MIMEMultipart("alternative")
else:
msg = email_mime_text.MIMEText(body.encode("ascii", "replace"))
msg["To"] = ", ".join(to)
msg["From"] = frm
msg["Subject"] = subject
if config.smtp_server is None:
log.error("Mail is not configured for this Galaxy instance.")
log.info(msg)
return
if html:
mp_text = email_mime_text.MIMEText(body.encode("ascii", "replace"), "plain")
mp_html = email_mime_text.MIMEText(html.encode("ascii", "replace"), "html")
msg.attach(mp_text)
msg.attach(mp_html)
smtp_ssl = asbool(getattr(config, "smtp_ssl", False))
if smtp_ssl:
s = smtplib.SMTP_SSL()
else:
s = smtplib.SMTP()
s.connect(config.smtp_server)
if not smtp_ssl:
try:
s.starttls()
log.debug(
"Initiated SSL/TLS connection to SMTP server: %s" % config.smtp_server
)
except RuntimeError as e:
log.warning(
"SSL/TLS support is not available to your Python interpreter: %s" % e
)
except smtplib.SMTPHeloError as e:
log.error("The server didn't reply properly to the HELO greeting: %s" % e)
s.close()
raise
except smtplib.SMTPException as e:
log.warning("The server does not support the STARTTLS extension: %s" % e)
if config.smtp_username and config.smtp_password:
try:
s.login(config.smtp_username, config.smtp_password)
except smtplib.SMTPHeloError as e:
log.error("The server didn't reply properly to the HELO greeting: %s" % e)
s.close()
raise
except smtplib.SMTPAuthenticationError as e:
log.error(
"The server didn't accept the username/password combination: %s" % e
)
s.close()
raise
except smtplib.SMTPException as e:
log.error("No suitable authentication method was found: %s" % e)
s.close()
raise
s.sendmail(frm, to, msg.as_string())
s.quit()
|
https://github.com/galaxyproject/galaxy/issues/7809
|
Traceback (most recent call last):
File "lib/galaxy/managers/users.py", line 365, in send_activation_email
util.send_mail(frm, to, subject, body, self.app.config)
File "lib/galaxy/util/__init__.py", line 1436, in send_mail
msg = email_mime_text.MIMEText(body.encode('ascii', 'replace'))
File "/cvmfs/test.galaxyproject.org/deps/_conda/envs/_galaxy_/lib/python3.6/email/mime/text.py", line 34, in __init__
_text.encode('us-ascii')
AttributeError: 'bytes' object has no attribute 'encode'
|
AttributeError
|
def add_output(self, workflow_output, step, output_object):
if step.type == "parameter_input":
# TODO: these should be properly tracked.
return
if output_object.history_content_type == "dataset":
output_assoc = WorkflowInvocationOutputDatasetAssociation()
output_assoc.workflow_invocation = self
output_assoc.workflow_output = workflow_output
output_assoc.workflow_step = step
output_assoc.dataset = output_object
self.output_datasets.append(output_assoc)
elif output_object.history_content_type == "dataset_collection":
output_assoc = WorkflowInvocationOutputDatasetCollectionAssociation()
output_assoc.workflow_invocation = self
output_assoc.workflow_output = workflow_output
output_assoc.workflow_step = step
output_assoc.dataset_collection = output_object
self.output_dataset_collections.append(output_assoc)
else:
raise Exception("Unknown output type encountered")
|
def add_output(self, workflow_output, step, output_object):
if output_object.history_content_type == "dataset":
output_assoc = WorkflowInvocationOutputDatasetAssociation()
output_assoc.workflow_invocation = self
output_assoc.workflow_output = workflow_output
output_assoc.workflow_step = step
output_assoc.dataset = output_object
self.output_datasets.append(output_assoc)
elif output_object.history_content_type == "dataset_collection":
output_assoc = WorkflowInvocationOutputDatasetCollectionAssociation()
output_assoc.workflow_invocation = self
output_assoc.workflow_output = workflow_output
output_assoc.workflow_step = step
output_assoc.dataset_collection = output_object
self.output_dataset_collections.append(output_assoc)
else:
raise Exception("Unknown output type encountered")
|
https://github.com/galaxyproject/galaxy/issues/7263
|
galaxy.workflow.scheduling_manager DEBUG 2019-01-23 23:20:06,529 [p:6571,w:0,m:3] [WorkflowRequestMonitor.monitor_thread] Attempting to schedule workflow invocation [(808,)]
galaxy.workflow.run ERROR 2019-01-23 23:20:06,736 [p:6571,w:0,m:3] [WorkflowRequestMonitor.monitor_thread] Failed to schedule Workflow[id=879,name=findcluster complete], problem occurred on WorkflowStep[index=0,type=parameter_input].
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 702, in execute
progress.set_outputs_for_input(invocation_step, step_outputs)
File "lib/galaxy/workflow/run.py", line 397, in set_outputs_for_input
self.set_step_outputs(invocation_step, outputs)
File "lib/galaxy/workflow/run.py", line 428, in set_step_outputs
output=output,
File "lib/galaxy/workflow/run.py", line 432, in _record_workflow_output
self.workflow_invocation.add_output(workflow_output, step, output)
File "lib/galaxy/model/__init__.py", line 4444, in add_output
if output_object.history_content_type == "dataset":
AttributeError: 'unicode' object has no attribute 'history_content_type'
galaxy.workflow.run ERROR 2019-01-23 23:20:06,736 [p:6571,w:0,m:3] [WorkflowRequestMonitor.monitor_thread] Failed to execute scheduled workflow.
Traceback (most recent call last):
File "lib/galaxy/workflow/run.py", line 83, in __invoke
outputs = invoker.invoke()
File "lib/galaxy/workflow/run.py", line 190, in invoke
incomplete_or_none = self._invoke_step(workflow_invocation_step)
File "lib/galaxy/workflow/run.py", line 266, in _invoke_step
use_cached_job=self.workflow_invocation.use_cached_job)
File "lib/galaxy/workflow/modules.py", line 702, in execute
progress.set_outputs_for_input(invocation_step, step_outputs)
File "lib/galaxy/workflow/run.py", line 397, in set_outputs_for_input
self.set_step_outputs(invocation_step, outputs)
File "lib/galaxy/workflow/run.py", line 428, in set_step_outputs
output=output,
File "lib/galaxy/workflow/run.py", line 432, in _record_workflow_output
self.workflow_invocation.add_output(workflow_output, step, output)
File "lib/galaxy/model/__init__.py", line 4444, in add_output
if output_object.history_content_type == "dataset":
AttributeError: 'unicode' object has no attribute 'history_content_type'
|
AttributeError
|
def url_get(base_url, password_mgr=None, pathspec=None, params=None):
"""Make contact with the uri provided and return any contents."""
# Uses system proxy settings if they exist.
proxy = urlrequest.ProxyHandler()
if password_mgr is not None:
auth = urlrequest.HTTPDigestAuthHandler(password_mgr)
urlopener = urlrequest.build_opener(proxy, auth)
else:
urlopener = urlrequest.build_opener(proxy)
urlrequest.install_opener(urlopener)
full_url = build_url(base_url, pathspec=pathspec, params=params)
response = urlopener.open(full_url)
content = response.read()
response.close()
return unicodify(content)
|
def url_get(base_url, password_mgr=None, pathspec=None, params=None):
"""Make contact with the uri provided and return any contents."""
# Uses system proxy settings if they exist.
proxy = urlrequest.ProxyHandler()
if password_mgr is not None:
auth = urlrequest.HTTPDigestAuthHandler(password_mgr)
urlopener = urlrequest.build_opener(proxy, auth)
else:
urlopener = urlrequest.build_opener(proxy)
urlrequest.install_opener(urlopener)
full_url = build_url(base_url, pathspec=pathspec, params=params)
response = urlopener.open(full_url)
content = response.read()
response.close()
return content
|
https://github.com/galaxyproject/galaxy/issues/7769
|
Traceback (most recent call last):
File "lib/galaxy/web/framework/middleware/error.py", line 154, in __call__
app_iter = self.application(environ, sr_checker)
File "/home/nate/work/galaxy2/.venv/lib/python3.5/site-packages/paste/recursive.py", line 85, in __call__
return self.application(environ, start_response)
File "/home/nate/work/galaxy2/.venv/lib/python3.5/site-packages/paste/httpexceptions.py", line 640, in __call__
return self.application(environ, start_response)
File "lib/galaxy/web/framework/base.py", line 143, in __call__
return self.handle_request(environ, start_response)
File "lib/galaxy/web/framework/base.py", line 222, in handle_request
body = method(trans, **kwargs)
File "lib/galaxy/web/framework/decorators.py", line 101, in decorator
return func(self, trans, *args, **kwargs)
File "lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py", line 892, in prepare_for_install
repo_information_dict = json.loads(raw_text)
File "/usr/lib/python3.5/json/__init__.py", line 312, in loads
s.__class__.__name__))
TypeError: the JSON object must be str, not 'bytes'
|
TypeError
|
def get_app_kwds(cls, config_section, app_name=None):
kwds = {
"config_file": None,
"config_section": config_section,
}
uwsgi_opt = uwsgi.opt
config_file = None
# check for --set galaxy_config_file=<path>, this overrides whatever config file uWSGI was loaded with (which
# may not actually include a Galaxy config)
if uwsgi_opt.get("galaxy_config_file"):
config_file = uwsgi_opt.get("galaxy_config_file")
# check for --yaml or --json uWSGI config options next
if config_file is None:
config_file = UWSGIApplicationStack._get_config_file(
uwsgi_opt.get("yaml"), yaml.safe_load, config_section
) or UWSGIApplicationStack._get_config_file(
uwsgi_opt.get("json"), json.load, config_section
)
# --ini and --ini-paste don't behave the same way, but this method will only be called by mules if the main
# application was loaded with --ini-paste, so we can make some assumptions, most notably, uWSGI does not have
# any way to set the app name when loading with paste.deploy:loadapp(), so hardcoding the alternate section
# name to `app:main` is fine.
has_ini_config = (
config_file is None and uwsgi_opt.get("ini") or uwsgi_opt.get("ini-paste")
)
has_ini_config = has_ini_config or (
config_file and has_ext(config_file, "ini", aliases=True, ignore="sample")
)
if has_ini_config:
config_file = config_file or uwsgi_opt.get("ini") or uwsgi_opt.get("ini-paste")
parser = nice_config_parser(config_file)
if not parser.has_section(config_section) and parser.has_section("app:main"):
kwds["config_section"] = "app:main"
kwds["config_file"] = unicodify(config_file)
return kwds
|
def get_app_kwds(cls, config_section, app_name=None):
kwds = {
"config_file": None,
"config_section": config_section,
}
uwsgi_opt = uwsgi.opt
config_file = None
# check for --set galaxy_config_file=<path>, this overrides whatever config file uWSGI was loaded with (which
# may not actually include a Galaxy config)
if uwsgi_opt.get("galaxy_config_file"):
config_file = uwsgi_opt.get("galaxy_config_file")
# check for --yaml or --json uWSGI config options next
if config_file is None:
config_file = UWSGIApplicationStack._get_config_file(
uwsgi_opt.get("yaml"), yaml.safe_load, config_section
) or UWSGIApplicationStack._get_config_file(
uwsgi_opt.get("json"), json.load, config_section
)
# --ini and --ini-paste don't behave the same way, but this method will only be called by mules if the main
# application was loaded with --ini-paste, so we can make some assumptions, most notably, uWSGI does not have
# any way to set the app name when loading with paste.deploy:loadapp(), so hardcoding the alternate section
# name to `app:main` is fine.
has_ini_config = (
config_file is None and uwsgi_opt.get("ini") or uwsgi_opt.get("ini-paste")
)
has_ini_config = has_ini_config or (
config_file and has_ext(config_file, "ini", aliases=True, ignore="sample")
)
if has_ini_config:
config_file = config_file or uwsgi_opt.get("ini") or uwsgi_opt.get("ini-paste")
parser = nice_config_parser(config_file)
if not parser.has_section(config_section) and parser.has_section("app:main"):
kwds["config_section"] = "app:main"
kwds["config_file"] = config_file
return kwds
|
https://github.com/galaxyproject/galaxy/issues/7714
|
Traceback (most recent call last):
File "lib/galaxy/webapps/galaxy/buildapp.py", line 184, in uwsgi_app
return galaxy.web.framework.webapp.build_native_uwsgi_app(app_factory, "galaxy")
File "lib/galaxy/web/framework/webapp.py", line 965, in build_native_uwsgi_app
uwsgi_app = paste_factory({}, load_app_kwds=app_kwds)
File "lib/galaxy/webapps/galaxy/buildapp.py", line 41, in app_factory
**load_app_kwds
File "lib/galaxy/util/properties.py", line 84, in load_app_properties
properties.update(dict(parser.items(config_section)))
File "/usr/lib/python3.5/configparser.py", line 855, in items
return [(option, value_getter(option)) for option in d.keys()]
File "/usr/lib/python3.5/configparser.py", line 855, in <listcomp>
return [(option, value_getter(option)) for option in d.keys()]
File "/usr/lib/python3.5/configparser.py", line 852, in <lambda>
section, option, d[option], d)
File "lib/galaxy/util/properties.py", line 162, in before_get
value, defaults)
File "/usr/lib/python3.5/configparser.py", line 393, in before_get
self._interpolate_some(parser, option, L, value, section, defaults, 1)
File "/usr/lib/python3.5/configparser.py", line 410, in _interpolate_some
p = rest.find("%")
TypeError: Error in file b'/path/to/galaxy/config/galaxy.ini': a bytes-like object is required, not 'str'
|
TypeError
|
def guess_ext(fname, sniff_order, is_binary=False):
"""
Returns an extension that can be used in the datatype factory to
generate a data for the 'fname' file
>>> from galaxy.datatypes.registry import example_datatype_registry_for_sample
>>> datatypes_registry = example_datatype_registry_for_sample()
>>> sniff_order = datatypes_registry.sniff_order
>>> fname = get_test_fname('megablast_xml_parser_test1.blastxml')
>>> guess_ext(fname, sniff_order)
'blastxml'
>>> fname = get_test_fname('interval.interval')
>>> guess_ext(fname, sniff_order)
'interval'
>>> fname = get_test_fname('interval1.bed')
>>> guess_ext(fname, sniff_order)
'bed'
>>> fname = get_test_fname('test_tab.bed')
>>> guess_ext(fname, sniff_order)
'bed'
>>> fname = get_test_fname('sequence.maf')
>>> guess_ext(fname, sniff_order)
'maf'
>>> fname = get_test_fname('sequence.fasta')
>>> guess_ext(fname, sniff_order)
'fasta'
>>> fname = get_test_fname('1.genbank')
>>> guess_ext(fname, sniff_order)
'genbank'
>>> fname = get_test_fname('1.genbank.gz')
>>> guess_ext(fname, sniff_order)
'genbank.gz'
>>> fname = get_test_fname('file.html')
>>> guess_ext(fname, sniff_order)
'html'
>>> fname = get_test_fname('test.gtf')
>>> guess_ext(fname, sniff_order)
'gtf'
>>> fname = get_test_fname('test.gff')
>>> guess_ext(fname, sniff_order)
'gff'
>>> fname = get_test_fname('gff_version_3.gff')
>>> guess_ext(fname, sniff_order)
'gff3'
>>> fname = get_test_fname('2.txt')
>>> guess_ext(fname, sniff_order) # 2.txt
'txt'
>>> fname = get_test_fname('2.tabular')
>>> guess_ext(fname, sniff_order)
'tabular'
>>> fname = get_test_fname('3.txt')
>>> guess_ext(fname, sniff_order) # 3.txt
'txt'
>>> fname = get_test_fname('test_tab1.tabular')
>>> guess_ext(fname, sniff_order)
'tabular'
>>> fname = get_test_fname('alignment.lav')
>>> guess_ext(fname, sniff_order)
'lav'
>>> fname = get_test_fname('1.sff')
>>> guess_ext(fname, sniff_order)
'sff'
>>> fname = get_test_fname('1.bam')
>>> guess_ext(fname, sniff_order)
'bam'
>>> fname = get_test_fname('3unsorted.bam')
>>> guess_ext(fname, sniff_order)
'unsorted.bam'
>>> fname = get_test_fname('test.idpDB')
>>> guess_ext(fname, sniff_order)
'idpdb'
>>> fname = get_test_fname('test.mz5')
>>> guess_ext(fname, sniff_order)
'h5'
>>> fname = get_test_fname('issue1818.tabular')
>>> guess_ext(fname, sniff_order)
'tabular'
>>> fname = get_test_fname('drugbank_drugs.cml')
>>> guess_ext(fname, sniff_order)
'cml'
>>> fname = get_test_fname('q.fps')
>>> guess_ext(fname, sniff_order)
'fps'
>>> fname = get_test_fname('drugbank_drugs.inchi')
>>> guess_ext(fname, sniff_order)
'inchi'
>>> fname = get_test_fname('drugbank_drugs.mol2')
>>> guess_ext(fname, sniff_order)
'mol2'
>>> fname = get_test_fname('drugbank_drugs.sdf')
>>> guess_ext(fname, sniff_order)
'sdf'
>>> fname = get_test_fname('5e5z.pdb')
>>> guess_ext(fname, sniff_order)
'pdb'
>>> fname = get_test_fname('mothur_datatypetest_true.mothur.otu')
>>> guess_ext(fname, sniff_order)
'mothur.otu'
>>> fname = get_test_fname('mothur_datatypetest_true.mothur.lower.dist')
>>> guess_ext(fname, sniff_order)
'mothur.lower.dist'
>>> fname = get_test_fname('mothur_datatypetest_true.mothur.square.dist')
>>> guess_ext(fname, sniff_order)
'mothur.square.dist'
>>> fname = get_test_fname('mothur_datatypetest_true.mothur.pair.dist')
>>> guess_ext(fname, sniff_order)
'mothur.pair.dist'
>>> fname = get_test_fname('mothur_datatypetest_true.mothur.freq')
>>> guess_ext(fname, sniff_order)
'mothur.freq'
>>> fname = get_test_fname('mothur_datatypetest_true.mothur.quan')
>>> guess_ext(fname, sniff_order)
'mothur.quan'
>>> fname = get_test_fname('mothur_datatypetest_true.mothur.ref.taxonomy')
>>> guess_ext(fname, sniff_order)
'mothur.ref.taxonomy'
>>> fname = get_test_fname('mothur_datatypetest_true.mothur.axes')
>>> guess_ext(fname, sniff_order)
'mothur.axes'
>>> guess_ext(get_test_fname('infernal_model.cm'), sniff_order)
'cm'
>>> fname = get_test_fname('1.gg')
>>> guess_ext(fname, sniff_order)
'gg'
>>> fname = get_test_fname('diamond_db.dmnd')
>>> guess_ext(fname, sniff_order)
'dmnd'
>>> fname = get_test_fname('1.xls')
>>> guess_ext(fname, sniff_order)
'excel.xls'
>>> fname = get_test_fname('biom2_sparse_otu_table_hdf5.biom')
>>> guess_ext(fname, sniff_order)
'biom2'
>>> fname = get_test_fname('454Score.pdf')
>>> guess_ext(fname, sniff_order)
'pdf'
>>> fname = get_test_fname('1.obo')
>>> guess_ext(fname, sniff_order)
'obo'
>>> fname = get_test_fname('1.arff')
>>> guess_ext(fname, sniff_order)
'arff'
>>> fname = get_test_fname('1.afg')
>>> guess_ext(fname, sniff_order)
'afg'
>>> fname = get_test_fname('1.owl')
>>> guess_ext(fname, sniff_order)
'owl'
>>> fname = get_test_fname('Acanium.hmm')
>>> guess_ext(fname, sniff_order)
'snaphmm'
>>> fname = get_test_fname('wiggle.wig')
>>> guess_ext(fname, sniff_order)
'wig'
>>> fname = get_test_fname('example.iqtree')
>>> guess_ext(fname, sniff_order)
'iqtree'
>>> fname = get_test_fname('1.stockholm')
>>> guess_ext(fname, sniff_order)
'stockholm'
>>> fname = get_test_fname('1.xmfa')
>>> guess_ext(fname, sniff_order)
'xmfa'
>>> fname = get_test_fname('test.blib')
>>> guess_ext(fname, sniff_order)
'blib'
>>> fname = get_test_fname('test.phylip')
>>> guess_ext(fname, sniff_order)
'phylip'
>>> fname = get_test_fname('1.smat')
>>> guess_ext(fname, sniff_order)
'smat'
>>> fname = get_test_fname('1.ttl')
>>> guess_ext(fname, sniff_order)
'ttl'
>>> fname = get_test_fname('1.hdt')
>>> guess_ext(fname, sniff_order)
'hdt'
>>> fname = get_test_fname('1.phyloxml')
>>> guess_ext(fname, sniff_order)
'phyloxml'
>>> fname = get_test_fname('1.tiff')
>>> guess_ext(fname, sniff_order)
'tiff'
>>> fname = get_test_fname('1.fastqsanger.gz')
>>> guess_ext(fname, sniff_order) # See test_datatype_registry for more compressed type tests.
'fastqsanger.gz'
"""
file_prefix = FilePrefix(fname)
file_ext = run_sniffers_raw(file_prefix, sniff_order, is_binary)
# Ugly hack for tsv vs tabular sniffing, we want to prefer tabular
# to tsv but it doesn't have a sniffer - is TSV was sniffed just check
# if it is an okay tabular and use that instead.
if file_ext == "tsv":
if is_column_based(file_prefix, "\t", 1):
file_ext = "tabular"
if file_ext is not None:
return file_ext
# skip header check if data is already known to be binary
if is_binary:
return file_ext or "binary"
try:
get_headers(file_prefix, None)
except UnicodeDecodeError:
return "data" # default data type file extension
if is_column_based(file_prefix, "\t", 1):
return "tabular" # default tabular data type file extension
return "txt" # default text data type file extension
|
def guess_ext(fname, sniff_order, is_binary=False):
"""
Returns an extension that can be used in the datatype factory to
generate a data for the 'fname' file
>>> from galaxy.datatypes.registry import example_datatype_registry_for_sample
>>> datatypes_registry = example_datatype_registry_for_sample()
>>> sniff_order = datatypes_registry.sniff_order
>>> fname = get_test_fname('megablast_xml_parser_test1.blastxml')
>>> guess_ext(fname, sniff_order)
'blastxml'
>>> fname = get_test_fname('interval.interval')
>>> guess_ext(fname, sniff_order)
'interval'
>>> fname = get_test_fname('interval1.bed')
>>> guess_ext(fname, sniff_order)
'bed'
>>> fname = get_test_fname('test_tab.bed')
>>> guess_ext(fname, sniff_order)
'bed'
>>> fname = get_test_fname('sequence.maf')
>>> guess_ext(fname, sniff_order)
'maf'
>>> fname = get_test_fname('sequence.fasta')
>>> guess_ext(fname, sniff_order)
'fasta'
>>> fname = get_test_fname('1.genbank')
>>> guess_ext(fname, sniff_order)
'genbank'
>>> fname = get_test_fname('1.genbank.gz')
>>> guess_ext(fname, sniff_order)
'genbank.gz'
>>> fname = get_test_fname('file.html')
>>> guess_ext(fname, sniff_order)
'html'
>>> fname = get_test_fname('test.gtf')
>>> guess_ext(fname, sniff_order)
'gtf'
>>> fname = get_test_fname('test.gff')
>>> guess_ext(fname, sniff_order)
'gff'
>>> fname = get_test_fname('gff_version_3.gff')
>>> guess_ext(fname, sniff_order)
'gff3'
>>> fname = get_test_fname('2.txt')
>>> guess_ext(fname, sniff_order) # 2.txt
'txt'
>>> fname = get_test_fname('2.tabular')
>>> guess_ext(fname, sniff_order)
'tabular'
>>> fname = get_test_fname('3.txt')
>>> guess_ext(fname, sniff_order) # 3.txt
'txt'
>>> fname = get_test_fname('test_tab1.tabular')
>>> guess_ext(fname, sniff_order)
'tabular'
>>> fname = get_test_fname('alignment.lav')
>>> guess_ext(fname, sniff_order)
'lav'
>>> fname = get_test_fname('1.sff')
>>> guess_ext(fname, sniff_order)
'sff'
>>> fname = get_test_fname('1.bam')
>>> guess_ext(fname, sniff_order)
'bam'
>>> fname = get_test_fname('3unsorted.bam')
>>> guess_ext(fname, sniff_order)
'unsorted.bam'
>>> fname = get_test_fname('test.idpDB')
>>> guess_ext(fname, sniff_order)
'idpdb'
>>> fname = get_test_fname('test.mz5')
>>> guess_ext(fname, sniff_order)
'h5'
>>> fname = get_test_fname('issue1818.tabular')
>>> guess_ext(fname, sniff_order)
'tabular'
>>> fname = get_test_fname('drugbank_drugs.cml')
>>> guess_ext(fname, sniff_order)
'cml'
>>> fname = get_test_fname('q.fps')
>>> guess_ext(fname, sniff_order)
'fps'
>>> fname = get_test_fname('drugbank_drugs.inchi')
>>> guess_ext(fname, sniff_order)
'inchi'
>>> fname = get_test_fname('drugbank_drugs.mol2')
>>> guess_ext(fname, sniff_order)
'mol2'
>>> fname = get_test_fname('drugbank_drugs.sdf')
>>> guess_ext(fname, sniff_order)
'sdf'
>>> fname = get_test_fname('5e5z.pdb')
>>> guess_ext(fname, sniff_order)
'pdb'
>>> fname = get_test_fname('mothur_datatypetest_true.mothur.otu')
>>> guess_ext(fname, sniff_order)
'mothur.otu'
>>> fname = get_test_fname('mothur_datatypetest_true.mothur.lower.dist')
>>> guess_ext(fname, sniff_order)
'mothur.lower.dist'
>>> fname = get_test_fname('mothur_datatypetest_true.mothur.square.dist')
>>> guess_ext(fname, sniff_order)
'mothur.square.dist'
>>> fname = get_test_fname('mothur_datatypetest_true.mothur.pair.dist')
>>> guess_ext(fname, sniff_order)
'mothur.pair.dist'
>>> fname = get_test_fname('mothur_datatypetest_true.mothur.freq')
>>> guess_ext(fname, sniff_order)
'mothur.freq'
>>> fname = get_test_fname('mothur_datatypetest_true.mothur.quan')
>>> guess_ext(fname, sniff_order)
'mothur.quan'
>>> fname = get_test_fname('mothur_datatypetest_true.mothur.ref.taxonomy')
>>> guess_ext(fname, sniff_order)
'mothur.ref.taxonomy'
>>> fname = get_test_fname('mothur_datatypetest_true.mothur.axes')
>>> guess_ext(fname, sniff_order)
'mothur.axes'
>>> guess_ext(get_test_fname('infernal_model.cm'), sniff_order)
'cm'
>>> fname = get_test_fname('1.gg')
>>> guess_ext(fname, sniff_order)
'gg'
>>> fname = get_test_fname('diamond_db.dmnd')
>>> guess_ext(fname, sniff_order)
'dmnd'
>>> fname = get_test_fname('1.xls')
>>> guess_ext(fname, sniff_order)
'excel.xls'
>>> fname = get_test_fname('biom2_sparse_otu_table_hdf5.biom')
>>> guess_ext(fname, sniff_order)
'biom2'
>>> fname = get_test_fname('454Score.pdf')
>>> guess_ext(fname, sniff_order)
'pdf'
>>> fname = get_test_fname('1.obo')
>>> guess_ext(fname, sniff_order)
'obo'
>>> fname = get_test_fname('1.arff')
>>> guess_ext(fname, sniff_order)
'arff'
>>> fname = get_test_fname('1.afg')
>>> guess_ext(fname, sniff_order)
'afg'
>>> fname = get_test_fname('1.owl')
>>> guess_ext(fname, sniff_order)
'owl'
>>> fname = get_test_fname('Acanium.hmm')
>>> guess_ext(fname, sniff_order)
'snaphmm'
>>> fname = get_test_fname('wiggle.wig')
>>> guess_ext(fname, sniff_order)
'wig'
>>> fname = get_test_fname('example.iqtree')
>>> guess_ext(fname, sniff_order)
'iqtree'
>>> fname = get_test_fname('1.stockholm')
>>> guess_ext(fname, sniff_order)
'stockholm'
>>> fname = get_test_fname('1.xmfa')
>>> guess_ext(fname, sniff_order)
'xmfa'
>>> fname = get_test_fname('test.blib')
>>> guess_ext(fname, sniff_order)
'blib'
>>> fname = get_test_fname('test.phylip')
>>> guess_ext(fname, sniff_order)
'phylip'
>>> fname = get_test_fname('1.smat')
>>> guess_ext(fname, sniff_order)
'smat'
>>> fname = get_test_fname('1.ttl')
>>> guess_ext(fname, sniff_order)
'ttl'
>>> fname = get_test_fname('1.hdt')
>>> guess_ext(fname, sniff_order)
'hdt'
>>> fname = get_test_fname('1.phyloxml')
>>> guess_ext(fname, sniff_order)
'phyloxml'
>>> fname = get_test_fname('1.fastqsanger.gz')
>>> guess_ext(fname, sniff_order) # See test_datatype_registry for more compressed type tests.
'fastqsanger.gz'
"""
file_prefix = FilePrefix(fname)
file_ext = run_sniffers_raw(file_prefix, sniff_order, is_binary)
# Ugly hack for tsv vs tabular sniffing, we want to prefer tabular
# to tsv but it doesn't have a sniffer - is TSV was sniffed just check
# if it is an okay tabular and use that instead.
if file_ext == "tsv":
if is_column_based(file_prefix, "\t", 1):
file_ext = "tabular"
if file_ext is not None:
return file_ext
# skip header check if data is already known to be binary
if is_binary:
return file_ext or "binary"
try:
get_headers(file_prefix, None)
except UnicodeDecodeError:
return "data" # default data type file extension
if is_column_based(file_prefix, "\t", 1):
return "tabular" # default tabular data type file extension
return "txt" # default text data type file extension
|
https://github.com/galaxyproject/galaxy/issues/6209
|
Fatal error: Exit code 1 ()
Traceback (most recent call last):
File "/Users/guq/galaxy/tools/data_source/upload.py", line 323, in <module>
__main__()
File "/Users/guq/galaxy/tools/data_source/upload.py", line 316, in __main__
metadata.append(add_file(dataset, registry, output_path))
File "/Users/guq/galaxy/tools/data_source/upload.py", line 150, in add_file
ext = sniff.guess_ext(dataset.path, registry.sniff_order, is_binary=is_binary)
File "/Users/guq/galaxy/lib/galaxy/datatypes/sniff.py", line 484, in guess_ext
get_headers(file_prefix, None)
File "/Users/guq/galaxy/lib/galaxy/datatypes/sniff.py", line 230, in get_headers
return list(iter_headers(fname_or_file_prefix=fname_or_file_prefix, sep=sep, count=count, comment_designator=comment_designator))
File "/Users/guq/galaxy/lib/galaxy/datatypes/sniff.py", line 197, in iter_headers
for line in fname_or_file_prefix.line_iterator():
File "/Users/guq/galaxy/lib/galaxy/datatypes/sniff.py", line 590, in line_iterator
s = self.string_io()
File "/Users/guq/galaxy/lib/galaxy/datatypes/sniff.py", line 582, in string_io
raise Exception("Attempting to create a StringIO object for binary data.")
Exception: Attempting to create a StringIO object for binary data.
|
Exception
|
def __int__(self):
return int(float(self))
|
def __int__(self):
return int(str(self))
|
https://github.com/galaxyproject/galaxy/issues/998
|
Traceback (most recent call last):
File "/usr/local/galaxy/galaxy-dist/lib/galaxy/jobs/runners/__init__.py", line 163, in prepare_job
job_wrapper.prepare()
File "/usr/local/galaxy/galaxy-dist/lib/galaxy/jobs/__init__.py", line 862, in prepare
self.command_line, self.extra_filenames, self.environment_variables = tool_evaluator.build()
File "/usr/local/galaxy/galaxy-dist/lib/galaxy/tools/evaluation.py", line 422, in build
raise e
ValueError: invalid literal for int() with base 10: '-1.0'
|
ValueError
|
def execute(
self,
cmd,
persist=False,
timeout=DEFAULT_TIMEOUT,
timeout_check_interval=DEFAULT_TIMEOUT_CHECK_INTERVAL,
**kwds,
):
is_cmd_string = isinstance(cmd, six.string_types)
outf = TemporaryFile()
p = Popen(cmd, stdin=None, stdout=outf, stderr=PIPE, shell=is_cmd_string)
# poll until timeout
for i in range(int(timeout / timeout_check_interval)):
sleep(0.1) # For fast returning commands
r = p.poll()
if r is not None:
break
sleep(timeout_check_interval)
else:
kill_pid(p.pid)
return Bunch(
stdout="", stderr=TIMEOUT_ERROR_MESSAGE, returncode=TIMEOUT_RETURN_CODE
)
outf.seek(0)
return Bunch(
stdout=_read_str(outf), stderr=_read_str(p.stderr), returncode=p.returncode
)
|
def execute(
self,
cmd,
persist=False,
timeout=DEFAULT_TIMEOUT,
timeout_check_interval=DEFAULT_TIMEOUT_CHECK_INTERVAL,
**kwds,
):
outf = TemporaryFile()
p = Popen(cmd, stdin=None, stdout=outf, stderr=PIPE)
# poll until timeout
for i in range(int(timeout / timeout_check_interval)):
sleep(0.1) # For fast returning commands
r = p.poll()
if r is not None:
break
sleep(timeout_check_interval)
else:
kill_pid(p.pid)
return Bunch(
stdout="", stderr=TIMEOUT_ERROR_MESSAGE, returncode=TIMEOUT_RETURN_CODE
)
outf.seek(0)
return Bunch(
stdout=_read_str(outf), stderr=_read_str(p.stderr), returncode=p.returncode
)
|
https://github.com/galaxyproject/galaxy/issues/7269
|
galaxy.jobs.runners.cli DEBUG 2019-01-25 19:13:53,789 [p:3868,w:1,m:0] [ShellRunner.work_thread-0] (5) submitting file: /opt/galaxy/database/jobs_directory/000/5/galaxy_5.s
h
galaxy.jobs.runners ERROR 2019-01-25 19:13:53,964 [p:3868,w:1,m:0] [ShellRunner.work_thread-0] (5) Unhandled exception calling queue_job
Traceback (most recent call last):
File "lib/galaxy/jobs/runners/__init__.py", line 113, in run_next
method(arg)
File "lib/galaxy/jobs/runners/cli.py", line 98, in queue_job
returncode, stdout = self.submit(shell, job_interface, ajs.job_file, galaxy_id_tag, retry=MAX_SUBMIT_RETRY)
File "lib/galaxy/jobs/runners/cli.py", line 130, in submit
cmd_out = shell.execute(job_interface.submit(job_file))
File "lib/galaxy/jobs/runners/util/cli/shell/local.py", line 47, in execute
p = Popen(cmd, stdin=None, stdout=outf, stderr=PIPE)
File "/usr/lib/python2.7/subprocess.py", line 394, in __init__
errread, errwrite)
File "/usr/lib/python2.7/subprocess.py", line 1047, in _execute_child
raise child_exception
OSError: [Errno 2] No such file or directory
|
OSError
|
def top_level_workflow(self):
"""If this workflow is not attached to stored workflow directly,
recursively grab its parents until it is the top level workflow
which must have a stored workflow associated with it.
"""
top_level_workflow = self
if self.stored_workflow is None:
# TODO: enforce this at creation...
assert len(set(w.uuid for w in self.parent_workflow_steps)) == 1
return self.parent_workflow_steps[0].workflow.top_level_workflow
return top_level_workflow
|
def top_level_workflow(self):
"""If this workflow is not attached to stored workflow directly,
recursively grab its parents until it is the top level workflow
which must have a stored workflow associated with it.
"""
top_level_workflow = self
if self.stored_workflow is None:
# TODO: enforce this at creation...
assert len(self.parent_workflow_steps) == 1
return self.parent_workflow_steps[0].workflow.top_level_workflow
return top_level_workflow
|
https://github.com/galaxyproject/galaxy/issues/7261
|
Traceback (most recent call last):
File "/Users/mvandenb/src/galaxy/lib/galaxy/web/framework/decorators.py", line 283, in decorator
rval = func(self, trans, *args, **kwargs)
File "/Users/mvandenb/src/galaxy/lib/galaxy/webapps/galaxy/api/workflows.py", line 545, in update
**from_dict_kwds
File "/Users/mvandenb/src/galaxy/lib/galaxy/managers/workflows.py", line 362, in update_workflow_from_raw_description
**kwds
File "/Users/mvandenb/src/galaxy/lib/galaxy/managers/workflows.py", line 417, in _workflow_from_raw_description
self.__load_subworkflows(trans, step_dict, subworkflow_id_map, **kwds)
File "/Users/mvandenb/src/galaxy/lib/galaxy/managers/workflows.py", line 1069, in __load_subworkflows
trans, step_dict, subworkflow_id_map, **kwds
File "/Users/mvandenb/src/galaxy/lib/galaxy/managers/workflows.py", line 1151, in __load_subworkflow_from_step_dict
trans, subworkflow_id
File "/Users/mvandenb/src/galaxy/lib/galaxy/managers/workflows.py", line 105, in get_owned_workflow
self.check_security(trans, workflow, check_ownership=True)
File "/Users/mvandenb/src/galaxy/lib/galaxy/managers/workflows.py", line 128, in check_security
stored_workflow = has_workflow.top_level_stored_workflow
File "/Users/mvandenb/src/galaxy/lib/galaxy/model/__init__.py", line 4063, in top_level_stored_workflow
return self.top_level_workflow.stored_workflow
File "/Users/mvandenb/src/galaxy/lib/galaxy/model/__init__.py", line 4052, in top_level_workflow
assert len(self.parent_workflow_steps) == 1
AssertionError
|
AssertionError
|
def update_permissions(self, trans, dataset_id, payload, **kwd):
"""
PUT /api/datasets/{encoded_dataset_id}/permissions
Updates permissions of a dataset.
:rtype: dict
:returns: dictionary containing new permissions
"""
if payload:
kwd.update(payload)
hda_ldda = kwd.get("hda_ldda", "hda")
dataset_assoc = self.get_hda_or_ldda(
trans, hda_ldda=hda_ldda, dataset_id=dataset_id
)
if hda_ldda == "hda":
self.hda_manager.update_permissions(trans, dataset_assoc, **kwd)
return self.hda_manager.serialize_dataset_association_roles(
trans, dataset_assoc
)
else:
self.ldda_manager.update_permissions(trans, dataset_assoc, **kwd)
return self.ldda_manager.serialize_dataset_association_roles(
trans, dataset_assoc
)
|
def update_permissions(self, trans, dataset_id, payload, **kwd):
"""
PUT /api/datasets/{encoded_dataset_id}/permissions
Updates permissions of a dataset.
:rtype: dict
:returns: dictionary containing new permissions
"""
if payload:
kwd.update(payload)
hda_ldda = kwd.get("hda_ldda", "hda")
dataset_assoc = self.get_hda_or_ldda(
trans, hda_ldda=hda_ldda, dataset_id=dataset_id
)
if hda_ldda == "hda":
self.hda_manager.update_permissions(trans, dataset_assoc, **kwd)
return self.hda_manager.serialize_dataset_association_roles(
trans, dataset_assoc
)
else:
self.ldda_manager.update_permissions(trans, dataset_assoc, **kwd)
return self.hda_manager.serialize_dataset_association_roles(
trans, dataset_assoc
)
|
https://github.com/galaxyproject/galaxy/issues/7108
|
Traceback (most recent call last):
File "lib/galaxy/web/framework/decorators.py", line 283, in decorator rval = func(self, trans, *args, **kwargs) File "lib/galaxy/webapps/galaxy/api/library_datasets.py", line 125, in show_roles return self._get_current_roles(trans, library_dataset)
File "lib/galaxy/webapps/galaxy/api/library_datasets.py", line 158, in _get_current_roles return self.serialize_dataset_association_roles(library_dataset)AttributeError: 'LibraryDatasetsController' object has no attribute 'serialize_dataset_association_roles'
|
AttributeError
|
def _get_current_roles(self, trans, library_dataset):
"""
Find all roles currently connected to relevant permissions
on the library dataset and the underlying dataset.
:param library_dataset: the model object
:type library_dataset: LibraryDataset
:rtype: dictionary
:returns: dict of current roles for all available permission types
"""
return self.ldda_manager.serialize_dataset_association_roles(trans, library_dataset)
|
def _get_current_roles(self, trans, library_dataset):
"""
Find all roles currently connected to relevant permissions
on the library dataset and the underlying dataset.
:param library_dataset: the model object
:type library_dataset: LibraryDataset
:rtype: dictionary
:returns: dict of current roles for all available permission types
"""
return self.serialize_dataset_association_roles(library_dataset)
|
https://github.com/galaxyproject/galaxy/issues/7108
|
Traceback (most recent call last):
File "lib/galaxy/web/framework/decorators.py", line 283, in decorator rval = func(self, trans, *args, **kwargs) File "lib/galaxy/webapps/galaxy/api/library_datasets.py", line 125, in show_roles return self._get_current_roles(trans, library_dataset)
File "lib/galaxy/webapps/galaxy/api/library_datasets.py", line 158, in _get_current_roles return self.serialize_dataset_association_roles(library_dataset)AttributeError: 'LibraryDatasetsController' object has no attribute 'serialize_dataset_association_roles'
|
AttributeError
|
def process_key(incoming_key, incoming_value, d):
key_parts = incoming_key.split("|")
if len(key_parts) == 1:
# Regular parameter
d[incoming_key] = incoming_value
elif key_parts[0].rsplit("_", 1)[-1].isdigit():
# Repeat
input_name_index = key_parts[0].rsplit("_", 1)
input_name, index = input_name_index
index = int(index)
if input_name not in d:
d[input_name] = []
if len(d[input_name]) > index:
subdict = d[input_name][index]
else:
subdict = {}
d[input_name].append(subdict)
process_key("|".join(key_parts[1:]), incoming_value=incoming_value, d=subdict)
else:
# Section / Conditional
input_name = key_parts[0]
subdict = {}
d[input_name] = subdict
process_key("|".join(key_parts[1:]), incoming_value=incoming_value, d=subdict)
|
def process_key(incoming_key, d):
key_parts = incoming_key.split("|")
if len(key_parts) == 1:
# Regular parameter
d[incoming_key] = object()
elif key_parts[0].rsplit("_", 1)[-1].isdigit():
# Repeat
input_name_index = key_parts[0].rsplit("_", 1)
input_name, index = input_name_index
if input_name not in d:
d[input_name] = []
subdict = {}
d[input_name].append(subdict)
process_key("|".join(key_parts[1:]), d=subdict)
else:
# Section / Conditional
input_name = key_parts[0]
subdict = {}
d[input_name] = subdict
process_key("|".join(key_parts[1:]), d=subdict)
|
https://github.com/galaxyproject/galaxy/issues/7048
|
Traceback (most recent call last):
File "lib/galaxy/web/framework/decorators.py", line 283, in decorator
rval = func(self, trans, *args, **kwargs)
File "lib/galaxy/webapps/galaxy/api/tools.py", line 418, in create
return self._create(trans, payload, **kwd)
File "lib/galaxy/webapps/galaxy/api/tools.py", line 476, in _create
vars = tool.handle_input(trans, incoming, history=target_history, use_cached_job=use_cached_job)
File "lib/galaxy/tools/__init__.py", line 1385, in handle_input
all_params, all_errors, rerun_remap_job_id, collection_info = self.expand_incoming(trans=trans, incoming=incoming, request_context=request_context)
File "lib/galaxy/tools/__init__.py", line 1340, in expand_incoming
expanded_incomings, collection_info = expand_meta_parameters(trans, self, incoming)
File "lib/galaxy/tools/parameters/meta.py", line 118, in expand_meta_parameters
process_key(incoming_key, d=nested_dict)
File "lib/galaxy/tools/parameters/meta.py", line 87, in process_key
d[input_name].append(subdict)
AttributeError: 'object' object has no attribute 'append'
|
AttributeError
|
def expand_meta_parameters(trans, tool, incoming):
"""
Take in a dictionary of raw incoming parameters and expand to a list
of expanded incoming parameters (one set of parameters per tool
execution).
"""
to_remove = []
for key in incoming.keys():
if key.endswith("|__identifier__"):
to_remove.append(key)
for key in to_remove:
incoming.pop(key)
# If we're going to multiply input dataset combinations
# order matters, so the following reorders incoming
# according to tool.inputs (which is ordered).
incoming_copy = incoming.copy()
nested_dict = {}
for incoming_key, incoming_value in incoming_copy.items():
if not incoming_key.startswith("__"):
process_key(incoming_key, incoming_value=incoming_value, d=nested_dict)
reordered_incoming = OrderedDict()
def visitor(input, value, prefix, prefixed_name, prefixed_label, error, **kwargs):
if prefixed_name in incoming_copy:
reordered_incoming[prefixed_name] = incoming_copy[prefixed_name]
del incoming_copy[prefixed_name]
visit_input_values(inputs=tool.inputs, input_values=nested_dict, callback=visitor)
reordered_incoming.update(incoming_copy)
def classifier(input_key):
value = incoming[input_key]
if isinstance(value, dict) and "values" in value:
# Explicit meta wrapper for inputs...
is_batch = value.get("batch", False)
is_linked = value.get("linked", True)
if is_batch and is_linked:
classification = permutations.input_classification.MATCHED
elif is_batch:
classification = permutations.input_classification.MULTIPLIED
else:
classification = permutations.input_classification.SINGLE
if __collection_multirun_parameter(value):
collection_value = value["values"][0]
values = __expand_collection_parameter(
trans,
input_key,
collection_value,
collections_to_match,
linked=is_linked,
)
else:
values = value["values"]
else:
classification = permutations.input_classification.SINGLE
values = value
return classification, values
from galaxy.dataset_collections import matching
collections_to_match = matching.CollectionsToMatch()
# Stick an unexpanded version of multirun keys so they can be replaced,
# by expand_mult_inputs.
incoming_template = reordered_incoming
expanded_incomings = permutations.expand_multi_inputs(incoming_template, classifier)
if collections_to_match.has_collections():
collection_info = trans.app.dataset_collections_service.match_collections(
collections_to_match
)
else:
collection_info = None
return expanded_incomings, collection_info
|
def expand_meta_parameters(trans, tool, incoming):
"""
Take in a dictionary of raw incoming parameters and expand to a list
of expanded incoming parameters (one set of parameters per tool
execution).
"""
to_remove = []
for key in incoming.keys():
if key.endswith("|__identifier__"):
to_remove.append(key)
for key in to_remove:
incoming.pop(key)
# If we're going to multiply input dataset combinations
# order matters, so the following reorders incoming
# according to tool.inputs (which is ordered).
incoming_copy = incoming.copy()
nested_dict = {}
for incoming_key in incoming_copy:
if not incoming_key.startswith("__"):
process_key(incoming_key, d=nested_dict)
reordered_incoming = OrderedDict()
def visitor(input, value, prefix, prefixed_name, prefixed_label, error, **kwargs):
if prefixed_name in incoming_copy:
reordered_incoming[prefixed_name] = incoming_copy[prefixed_name]
del incoming_copy[prefixed_name]
visit_input_values(inputs=tool.inputs, input_values=nested_dict, callback=visitor)
reordered_incoming.update(incoming_copy)
def classifier(input_key):
value = incoming[input_key]
if isinstance(value, dict) and "values" in value:
# Explicit meta wrapper for inputs...
is_batch = value.get("batch", False)
is_linked = value.get("linked", True)
if is_batch and is_linked:
classification = permutations.input_classification.MATCHED
elif is_batch:
classification = permutations.input_classification.MULTIPLIED
else:
classification = permutations.input_classification.SINGLE
if __collection_multirun_parameter(value):
collection_value = value["values"][0]
values = __expand_collection_parameter(
trans,
input_key,
collection_value,
collections_to_match,
linked=is_linked,
)
else:
values = value["values"]
else:
classification = permutations.input_classification.SINGLE
values = value
return classification, values
from galaxy.dataset_collections import matching
collections_to_match = matching.CollectionsToMatch()
# Stick an unexpanded version of multirun keys so they can be replaced,
# by expand_mult_inputs.
incoming_template = reordered_incoming
expanded_incomings = permutations.expand_multi_inputs(incoming_template, classifier)
if collections_to_match.has_collections():
collection_info = trans.app.dataset_collections_service.match_collections(
collections_to_match
)
else:
collection_info = None
return expanded_incomings, collection_info
|
https://github.com/galaxyproject/galaxy/issues/7048
|
Traceback (most recent call last):
File "lib/galaxy/web/framework/decorators.py", line 283, in decorator
rval = func(self, trans, *args, **kwargs)
File "lib/galaxy/webapps/galaxy/api/tools.py", line 418, in create
return self._create(trans, payload, **kwd)
File "lib/galaxy/webapps/galaxy/api/tools.py", line 476, in _create
vars = tool.handle_input(trans, incoming, history=target_history, use_cached_job=use_cached_job)
File "lib/galaxy/tools/__init__.py", line 1385, in handle_input
all_params, all_errors, rerun_remap_job_id, collection_info = self.expand_incoming(trans=trans, incoming=incoming, request_context=request_context)
File "lib/galaxy/tools/__init__.py", line 1340, in expand_incoming
expanded_incomings, collection_info = expand_meta_parameters(trans, self, incoming)
File "lib/galaxy/tools/parameters/meta.py", line 118, in expand_meta_parameters
process_key(incoming_key, d=nested_dict)
File "lib/galaxy/tools/parameters/meta.py", line 87, in process_key
d[input_name].append(subdict)
AttributeError: 'object' object has no attribute 'append'
|
AttributeError
|
def get_private_user_role(self, user, auto_create=False):
role = (
self.sa_session.query(self.model.Role)
.filter(
and_(
self.model.UserRoleAssociation.table.c.user_id == user.id,
self.model.Role.table.c.id
== self.model.UserRoleAssociation.table.c.role_id,
self.model.Role.table.c.type == self.model.Role.types.PRIVATE,
)
)
.first()
)
if not role:
if auto_create:
return self.create_private_user_role(user)
else:
return None
return role
|
def get_private_user_role(self, user, auto_create=False):
role = (
self.sa_session.query(self.model.Role)
.filter(
and_(
self.model.Role.table.c.name == user.email,
self.model.Role.table.c.type == self.model.Role.types.PRIVATE,
)
)
.first()
)
if not role:
if auto_create:
return self.create_private_user_role(user)
else:
return None
return role
|
https://github.com/galaxyproject/galaxy/issues/6756
|
galaxy.web.framework.decorators ERROR 2018-09-21 22:02:02,371 [p:10163,w:3,m:0] [uWSGIWorker3Core1] Uncaught exception in exposed API method:
Traceback (most recent call last):
File "/work/project/w4m/galaxy4metabolomics/galaxy/lib/galaxy/web/framework/decorators.py", line 154, in decorator
rval = func(self, trans, *args, **kwargs)
File "/work/project/w4m/galaxy4metabolomics/galaxy/lib/galaxy/webapps/galaxy/controllers/dataset.py", line 282, in get_edit
all_roles = [(r.name, trans.security.encode_id(r.id)) for r in trans.app.security_agent.get_legitimate_roles(trans, data.dataset, 'root')]
File "/work/project/w4m/galaxy4metabolomics/galaxy/lib/galaxy/security/__init__.py", line 345, in get_legitimate_roles
return self.get_all_roles(trans, cntrller)
File "/work/project/w4m/galaxy4metabolomics/galaxy/lib/galaxy/security/__init__.py", line 211, in get_all_roles
return self.sort_by_attr([role for role in roles], 'name')
File "/work/project/w4m/galaxy4metabolomics/galaxy/lib/galaxy/security/__init__.py", line 177, in sort_by_attr
intermed = [(getattr(v, attr), i, v) for i, v in enumerate(seq)]
AttributeError: 'NoneType' object has no attribute 'name'
|
AttributeError
|
def __init__(
self,
config_file,
tool_source,
app,
guid=None,
repository_id=None,
allow_code_files=True,
):
"""Load a tool from the config named by `config_file`"""
# Determine the full path of the directory where the tool config is
self.config_file = config_file
self.tool_dir = os.path.dirname(config_file)
self.app = app
self.repository_id = repository_id
self._allow_code_files = allow_code_files
# setup initial attribute values
self.inputs = odict()
self.stdio_exit_codes = list()
self.stdio_regexes = list()
self.inputs_by_page = list()
self.display_by_page = list()
self.action = "/tool_runner/index"
self.target = "galaxy_main"
self.method = "post"
self.labels = []
self.check_values = True
self.nginx_upload = False
self.input_required = False
self.display_interface = True
self.require_login = False
self.rerun = False
# Define a place to keep track of all input These
# differ from the inputs dictionary in that inputs can be page
# elements like conditionals, but input_params are basic form
# parameters like SelectField objects. This enables us to more
# easily ensure that parameter dependencies like index files or
# tool_data_table_conf.xml entries exist.
self.input_params = []
# Attributes of tools installed from Galaxy tool sheds.
self.tool_shed = None
self.repository_name = None
self.repository_owner = None
self.changeset_revision = None
self.installed_changeset_revision = None
self.sharable_url = None
# The tool.id value will be the value of guid, but we'll keep the
# guid attribute since it is useful to have.
self.guid = guid
self.old_id = None
self.version = None
self._lineage = None
self.dependencies = []
# populate toolshed repository info, if available
self.populate_tool_shed_info()
# add tool resource parameters
self.populate_resource_parameters(tool_source)
self.tool_errors = None
# Parse XML element containing configuration
try:
self.parse(tool_source, guid=guid)
except Exception as e:
global_tool_errors.add_error(config_file, "Tool Loading", e)
raise e
# The job search is only relevant in a galaxy context, and breaks
# loading tools into the toolshed for validation.
if self.app.name == "galaxy":
self.job_search = JobSearch(app=self.app)
|
def __init__(
self,
config_file,
tool_source,
app,
guid=None,
repository_id=None,
allow_code_files=True,
):
"""Load a tool from the config named by `config_file`"""
# Determine the full path of the directory where the tool config is
self.config_file = config_file
self.tool_dir = os.path.dirname(config_file)
self.app = app
self.repository_id = repository_id
self._allow_code_files = allow_code_files
# setup initial attribute values
self.inputs = odict()
self.stdio_exit_codes = list()
self.stdio_regexes = list()
self.inputs_by_page = list()
self.display_by_page = list()
self.action = "/tool_runner/index"
self.target = "galaxy_main"
self.method = "post"
self.labels = []
self.check_values = True
self.nginx_upload = False
self.input_required = False
self.display_interface = True
self.require_login = False
self.rerun = False
# Define a place to keep track of all input These
# differ from the inputs dictionary in that inputs can be page
# elements like conditionals, but input_params are basic form
# parameters like SelectField objects. This enables us to more
# easily ensure that parameter dependencies like index files or
# tool_data_table_conf.xml entries exist.
self.input_params = []
# Attributes of tools installed from Galaxy tool sheds.
self.tool_shed = None
self.repository_name = None
self.repository_owner = None
self.changeset_revision = None
self.installed_changeset_revision = None
self.sharable_url = None
# The tool.id value will be the value of guid, but we'll keep the
# guid attribute since it is useful to have.
self.guid = guid
self.old_id = None
self.version = None
self._lineage = None
self.dependencies = []
# populate toolshed repository info, if available
self.populate_tool_shed_info()
# add tool resource parameters
self.populate_resource_parameters(tool_source)
# Parse XML element containing configuration
try:
self.parse(tool_source, guid=guid)
except Exception as e:
global_tool_errors.add_error(config_file, "Tool Loading", e)
raise e
# The job search is only relevant in a galaxy context, and breaks
# loading tools into the toolshed for validation.
if self.app.name == "galaxy":
self.job_search = JobSearch(app=self.app)
|
https://github.com/galaxyproject/galaxy/issues/6584
|
galaxy.tools.toolbox.base INFO 2018-08-09 10:57:04,513 Parsing the tool configuration /private/var/folders/df/6xqpqpcd7h73b6jpx9t6cwhw0000gn/T/tmpicezz6xb/tool_conf.xml
galaxy.tools.toolbox.base ERROR 2018-08-09 10:57:04,519 Error reading tool from path: /Users/mvandenb/src/tools-iuc/tools/fastp/fastp.xml
Traceback (most recent call last):
File "/private/var/folders/df/6xqpqpcd7h73b6jpx9t6cwhw0000gn/T/tmpicezz6xb/galaxy-dev/lib/galaxy/tools/toolbox/base.py", line 567, in _load_tool_tag_set
tool = self.load_tool(concrete_path, use_cached=False)
File "/private/var/folders/df/6xqpqpcd7h73b6jpx9t6cwhw0000gn/T/tmpicezz6xb/galaxy-dev/lib/galaxy/tools/toolbox/base.py", line 751, in load_tool
tool = self.create_tool(config_file=config_file, repository_id=repository_id, guid=guid, **kwds)
File "/private/var/folders/df/6xqpqpcd7h73b6jpx9t6cwhw0000gn/T/tmpicezz6xb/galaxy-dev/lib/galaxy/tools/__init__.py", line 270, in create_tool
raise e
KeyError: 'adapter_trimming_options'
|
KeyError
|
def to_json(self, trans, kwd={}, job=None, workflow_building_mode=False):
"""
Recursively creates a tool dictionary containing repeats, dynamic options and updated states.
"""
history_id = kwd.get("history_id", None)
history = None
if (
workflow_building_mode is workflow_building_modes.USE_HISTORY
or workflow_building_mode is workflow_building_modes.DISABLED
):
# We don't need a history when exporting a workflow for the workflow editor or when downloading a workflow
try:
if history_id is not None:
history = self.history_manager.get_owned(
trans.security.decode_id(history_id),
trans.user,
current_history=trans.history,
)
else:
history = trans.get_history()
if history is None and job is not None:
history = self.history_manager.get_owned(
job.history.id, trans.user, current_history=trans.history
)
if history is None:
raise exceptions.MessageException(
"History unavailable. Please specify a valid history id"
)
except Exception as e:
raise exceptions.MessageException(
"[history_id=%s] Failed to retrieve history. %s." % (history_id, str(e))
)
# build request context
request_context = WorkRequestContext(
app=trans.app,
user=trans.user,
history=history,
workflow_building_mode=workflow_building_mode,
)
# load job parameters into incoming
tool_message = ""
tool_warnings = ""
if job:
try:
job_params = job.get_param_values(self.app, ignore_errors=True)
tool_warnings = self.check_and_update_param_values(
job_params, request_context, update_values=True
)
self._map_source_to_history(request_context, self.inputs, job_params)
tool_message = self._compare_tool_version(job)
params_to_incoming(kwd, self.inputs, job_params, self.app)
except Exception as e:
raise exceptions.MessageException(str(e))
# create parameter object
params = Params(kwd, sanitize=False)
# expand incoming parameters (parameters might trigger multiple tool executions,
# here we select the first execution only in order to resolve dynamic parameters)
expanded_incomings, _ = expand_meta_parameters(trans, self, params.__dict__)
if expanded_incomings:
params.__dict__ = expanded_incomings[0]
# do param translation here, used by datasource tools
if self.input_translator:
self.input_translator.translate(params)
set_dataset_matcher_factory(request_context, self)
# create tool state
state_inputs = {}
state_errors = {}
populate_state(
request_context, self.inputs, params.__dict__, state_inputs, state_errors
)
# create tool model
tool_model = self.to_dict(request_context)
tool_model["inputs"] = []
self.populate_model(
request_context, self.inputs, state_inputs, tool_model["inputs"]
)
unset_dataset_matcher_factory(request_context)
# create tool help
tool_help = ""
if self.help:
tool_help = self.help.render(
static_path=url_for("/static"), host_url=url_for("/", qualified=True)
)
tool_help = unicodify(tool_help, "utf-8")
# update tool model
tool_model.update(
{
"id": self.id,
"help": tool_help,
"citations": bool(self.citations),
"biostar_url": self.app.config.biostar_url,
"sharable_url": self.sharable_url,
"message": tool_message,
"warnings": tool_warnings,
"versions": self.tool_versions,
"requirements": [
{"name": r.name, "version": r.version} for r in self.requirements
],
"errors": state_errors,
"tool_errors": self.tool_errors,
"state_inputs": params_to_strings(self.inputs, state_inputs, self.app),
"job_id": trans.security.encode_id(job.id) if job else None,
"job_remap": self._get_job_remap(job),
"history_id": trans.security.encode_id(history.id) if history else None,
"display": self.display_interface,
"action": url_for(self.action),
"method": self.method,
"enctype": self.enctype,
}
)
return tool_model
|
def to_json(self, trans, kwd={}, job=None, workflow_building_mode=False):
"""
Recursively creates a tool dictionary containing repeats, dynamic options and updated states.
"""
history_id = kwd.get("history_id", None)
history = None
if (
workflow_building_mode is workflow_building_modes.USE_HISTORY
or workflow_building_mode is workflow_building_modes.DISABLED
):
# We don't need a history when exporting a workflow for the workflow editor or when downloading a workflow
try:
if history_id is not None:
history = self.history_manager.get_owned(
trans.security.decode_id(history_id),
trans.user,
current_history=trans.history,
)
else:
history = trans.get_history()
if history is None and job is not None:
history = self.history_manager.get_owned(
job.history.id, trans.user, current_history=trans.history
)
if history is None:
raise exceptions.MessageException(
"History unavailable. Please specify a valid history id"
)
except Exception as e:
raise exceptions.MessageException(
"[history_id=%s] Failed to retrieve history. %s." % (history_id, str(e))
)
# build request context
request_context = WorkRequestContext(
app=trans.app,
user=trans.user,
history=history,
workflow_building_mode=workflow_building_mode,
)
# load job parameters into incoming
tool_message = ""
tool_warnings = ""
if job:
try:
job_params = job.get_param_values(self.app, ignore_errors=True)
tool_warnings = self.check_and_update_param_values(
job_params, request_context, update_values=True
)
self._map_source_to_history(request_context, self.inputs, job_params)
tool_message = self._compare_tool_version(job)
params_to_incoming(kwd, self.inputs, job_params, self.app)
except Exception as e:
raise exceptions.MessageException(str(e))
# create parameter object
params = Params(kwd, sanitize=False)
# expand incoming parameters (parameters might trigger multiple tool executions,
# here we select the first execution only in order to resolve dynamic parameters)
expanded_incomings, _ = expand_meta_parameters(trans, self, params.__dict__)
if expanded_incomings:
params.__dict__ = expanded_incomings[0]
# do param translation here, used by datasource tools
if self.input_translator:
self.input_translator.translate(params)
set_dataset_matcher_factory(request_context, self)
# create tool state
state_inputs = {}
state_errors = {}
populate_state(
request_context, self.inputs, params.__dict__, state_inputs, state_errors
)
# create tool model
tool_model = self.to_dict(request_context)
tool_model["inputs"] = []
self.populate_model(
request_context, self.inputs, state_inputs, tool_model["inputs"]
)
unset_dataset_matcher_factory(request_context)
# create tool help
tool_help = ""
if self.help:
tool_help = self.help.render(
static_path=url_for("/static"), host_url=url_for("/", qualified=True)
)
tool_help = unicodify(tool_help, "utf-8")
# update tool model
tool_model.update(
{
"id": self.id,
"help": tool_help,
"citations": bool(self.citations),
"biostar_url": self.app.config.biostar_url,
"sharable_url": self.sharable_url,
"message": tool_message,
"warnings": tool_warnings,
"versions": self.tool_versions,
"requirements": [
{"name": r.name, "version": r.version} for r in self.requirements
],
"errors": state_errors,
"state_inputs": params_to_strings(self.inputs, state_inputs, self.app),
"job_id": trans.security.encode_id(job.id) if job else None,
"job_remap": self._get_job_remap(job),
"history_id": trans.security.encode_id(history.id) if history else None,
"display": self.display_interface,
"action": url_for(self.action),
"method": self.method,
"enctype": self.enctype,
}
)
return tool_model
|
https://github.com/galaxyproject/galaxy/issues/6584
|
galaxy.tools.toolbox.base INFO 2018-08-09 10:57:04,513 Parsing the tool configuration /private/var/folders/df/6xqpqpcd7h73b6jpx9t6cwhw0000gn/T/tmpicezz6xb/tool_conf.xml
galaxy.tools.toolbox.base ERROR 2018-08-09 10:57:04,519 Error reading tool from path: /Users/mvandenb/src/tools-iuc/tools/fastp/fastp.xml
Traceback (most recent call last):
File "/private/var/folders/df/6xqpqpcd7h73b6jpx9t6cwhw0000gn/T/tmpicezz6xb/galaxy-dev/lib/galaxy/tools/toolbox/base.py", line 567, in _load_tool_tag_set
tool = self.load_tool(concrete_path, use_cached=False)
File "/private/var/folders/df/6xqpqpcd7h73b6jpx9t6cwhw0000gn/T/tmpicezz6xb/galaxy-dev/lib/galaxy/tools/toolbox/base.py", line 751, in load_tool
tool = self.create_tool(config_file=config_file, repository_id=repository_id, guid=guid, **kwds)
File "/private/var/folders/df/6xqpqpcd7h73b6jpx9t6cwhw0000gn/T/tmpicezz6xb/galaxy-dev/lib/galaxy/tools/__init__.py", line 270, in create_tool
raise e
KeyError: 'adapter_trimming_options'
|
KeyError
|
def __init__(self):
self._hash_by_tool_paths = {}
self._tools_by_path = {}
self._tool_paths_by_id = {}
self._macro_paths_by_id = {}
self._tool_ids_by_macro_paths = {}
self._mod_time_by_path = {}
self._new_tool_ids = set()
self._removed_tool_ids = set()
self._removed_tools_by_path = {}
|
def __init__(self):
self._hash_by_tool_paths = {}
self._tools_by_path = {}
self._tool_paths_by_id = {}
self._macro_paths_by_id = {}
self._tool_ids_by_macro_paths = {}
self._mod_time_by_path = {}
self._new_tool_ids = set()
self._removed_tool_ids = set()
|
https://github.com/galaxyproject/galaxy/issues/6584
|
galaxy.tools.toolbox.base INFO 2018-08-09 10:57:04,513 Parsing the tool configuration /private/var/folders/df/6xqpqpcd7h73b6jpx9t6cwhw0000gn/T/tmpicezz6xb/tool_conf.xml
galaxy.tools.toolbox.base ERROR 2018-08-09 10:57:04,519 Error reading tool from path: /Users/mvandenb/src/tools-iuc/tools/fastp/fastp.xml
Traceback (most recent call last):
File "/private/var/folders/df/6xqpqpcd7h73b6jpx9t6cwhw0000gn/T/tmpicezz6xb/galaxy-dev/lib/galaxy/tools/toolbox/base.py", line 567, in _load_tool_tag_set
tool = self.load_tool(concrete_path, use_cached=False)
File "/private/var/folders/df/6xqpqpcd7h73b6jpx9t6cwhw0000gn/T/tmpicezz6xb/galaxy-dev/lib/galaxy/tools/toolbox/base.py", line 751, in load_tool
tool = self.create_tool(config_file=config_file, repository_id=repository_id, guid=guid, **kwds)
File "/private/var/folders/df/6xqpqpcd7h73b6jpx9t6cwhw0000gn/T/tmpicezz6xb/galaxy-dev/lib/galaxy/tools/__init__.py", line 270, in create_tool
raise e
KeyError: 'adapter_trimming_options'
|
KeyError
|
def cleanup(self):
"""
Remove uninstalled tools from tool cache if they are not on disk anymore or if their content has changed.
Returns list of tool_ids that have been removed.
"""
removed_tool_ids = []
try:
paths_to_cleanup = {
path: tool.all_ids
for path, tool in self._tools_by_path.items()
if self._should_cleanup(path)
}
for config_filename, tool_ids in paths_to_cleanup.items():
del self._hash_by_tool_paths[config_filename]
if os.path.exists(config_filename):
# This tool has probably been broken while editing on disk
# We record it here, so that we can recover it
self._removed_tools_by_path[config_filename] = self._tools_by_path[
config_filename
]
del self._tools_by_path[config_filename]
for tool_id in tool_ids:
if tool_id in self._tool_paths_by_id:
del self._tool_paths_by_id[tool_id]
removed_tool_ids.extend(tool_ids)
for tool_id in removed_tool_ids:
self._removed_tool_ids.add(tool_id)
if tool_id in self._new_tool_ids:
self._new_tool_ids.remove(tool_id)
except Exception:
# If by chance the file is being removed while calculating the hash or modtime
# we don't want the thread to die.
pass
return removed_tool_ids
|
def cleanup(self):
"""
Remove uninstalled tools from tool cache if they are not on disk anymore or if their content has changed.
Returns list of tool_ids that have been removed.
"""
removed_tool_ids = []
try:
paths_to_cleanup = {
path: tool.all_ids
for path, tool in self._tools_by_path.items()
if self._should_cleanup(path)
}
for config_filename, tool_ids in paths_to_cleanup.items():
del self._hash_by_tool_paths[config_filename]
del self._tools_by_path[config_filename]
for tool_id in tool_ids:
if tool_id in self._tool_paths_by_id:
del self._tool_paths_by_id[tool_id]
removed_tool_ids.extend(tool_ids)
for tool_id in removed_tool_ids:
self._removed_tool_ids.add(tool_id)
if tool_id in self._new_tool_ids:
self._new_tool_ids.remove(tool_id)
except Exception:
# If by chance the file is being removed while calculating the hash or modtime
# we don't want the thread to die.
pass
return removed_tool_ids
|
https://github.com/galaxyproject/galaxy/issues/6584
|
galaxy.tools.toolbox.base INFO 2018-08-09 10:57:04,513 Parsing the tool configuration /private/var/folders/df/6xqpqpcd7h73b6jpx9t6cwhw0000gn/T/tmpicezz6xb/tool_conf.xml
galaxy.tools.toolbox.base ERROR 2018-08-09 10:57:04,519 Error reading tool from path: /Users/mvandenb/src/tools-iuc/tools/fastp/fastp.xml
Traceback (most recent call last):
File "/private/var/folders/df/6xqpqpcd7h73b6jpx9t6cwhw0000gn/T/tmpicezz6xb/galaxy-dev/lib/galaxy/tools/toolbox/base.py", line 567, in _load_tool_tag_set
tool = self.load_tool(concrete_path, use_cached=False)
File "/private/var/folders/df/6xqpqpcd7h73b6jpx9t6cwhw0000gn/T/tmpicezz6xb/galaxy-dev/lib/galaxy/tools/toolbox/base.py", line 751, in load_tool
tool = self.create_tool(config_file=config_file, repository_id=repository_id, guid=guid, **kwds)
File "/private/var/folders/df/6xqpqpcd7h73b6jpx9t6cwhw0000gn/T/tmpicezz6xb/galaxy-dev/lib/galaxy/tools/__init__.py", line 270, in create_tool
raise e
KeyError: 'adapter_trimming_options'
|
KeyError
|
def reset_status(self):
"""
Reset tracking of new and newly disabled tools.
"""
self._new_tool_ids = set()
self._removed_tool_ids = set()
self._removed_tools_by_path = {}
|
def reset_status(self):
"""Reset self._new_tool_ids and self._removed_tool_ids once
all operations that need to know about new tools have finished running."""
self._new_tool_ids = set()
self._removed_tool_ids = set()
|
https://github.com/galaxyproject/galaxy/issues/6584
|
galaxy.tools.toolbox.base INFO 2018-08-09 10:57:04,513 Parsing the tool configuration /private/var/folders/df/6xqpqpcd7h73b6jpx9t6cwhw0000gn/T/tmpicezz6xb/tool_conf.xml
galaxy.tools.toolbox.base ERROR 2018-08-09 10:57:04,519 Error reading tool from path: /Users/mvandenb/src/tools-iuc/tools/fastp/fastp.xml
Traceback (most recent call last):
File "/private/var/folders/df/6xqpqpcd7h73b6jpx9t6cwhw0000gn/T/tmpicezz6xb/galaxy-dev/lib/galaxy/tools/toolbox/base.py", line 567, in _load_tool_tag_set
tool = self.load_tool(concrete_path, use_cached=False)
File "/private/var/folders/df/6xqpqpcd7h73b6jpx9t6cwhw0000gn/T/tmpicezz6xb/galaxy-dev/lib/galaxy/tools/toolbox/base.py", line 751, in load_tool
tool = self.create_tool(config_file=config_file, repository_id=repository_id, guid=guid, **kwds)
File "/private/var/folders/df/6xqpqpcd7h73b6jpx9t6cwhw0000gn/T/tmpicezz6xb/galaxy-dev/lib/galaxy/tools/__init__.py", line 270, in create_tool
raise e
KeyError: 'adapter_trimming_options'
|
KeyError
|
def to_dict(self, view="collection", value_mapper=None, app=None):
as_dict = super(ToolOutput, self).to_dict(
view=view, value_mapper=value_mapper, app=app
)
format = self.format
if format and format != "input" and app:
edam_format = app.datatypes_registry.edam_formats.get(self.format)
as_dict["edam_format"] = edam_format
edam_data = app.datatypes_registry.edam_data.get(self.format)
as_dict["edam_data"] = edam_data
return as_dict
|
def to_dict(self, view="collection", value_mapper=None, app=None):
as_dict = super(ToolOutput, self).to_dict(view=view, value_mapper=value_mapper)
format = self.format
if format and format != "input" and app:
edam_format = app.datatypes_registry.edam_formats.get(self.format)
as_dict["edam_format"] = edam_format
edam_data = app.datatypes_registry.edam_data.get(self.format)
as_dict["edam_data"] = edam_data
return as_dict
|
https://github.com/galaxyproject/galaxy/issues/6537
|
gi.tools.show_tool(tool_id, io_details = True)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/path/to/bioblend/galaxy/tools/__init__.py", line 98, in show_tool
return self._get(id=tool_id, params=params)
File "/path/to/bioblend/galaxy/client.py", line 136, in _get
status_code=r.status_code)
bioblend.ConnectionError: GET: error 500: b'{"err_msg": "Uncaught exception in exposed API method:", "err_code": 0}', 0 attempts left: {"err_msg": "Uncaught exception in exposed API method:", "err_code": 0}
|
bioblend.ConnectionError
|
def sliced_input_collection_structure(self, input_name):
unqualified_recurse = self.tool.profile < 18.09 and "|" not in input_name
def find_collection(input_dict, input_name):
for key, value in input_dict.items():
if key == input_name:
return value
if isinstance(value, dict):
if "|" in input_name:
prefix, rest_input_name = input_name.split("|", 1)
if key == prefix:
return find_collection(value, rest_input_name)
elif unqualified_recurse:
# Looking for "input1" instead of "cond|input1" for instance.
# See discussion on https://github.com/galaxyproject/galaxy/issues/6157.
unqualified_match = find_collection(value, input_name)
if unqualified_match:
return unqualified_match
input_collection = find_collection(self.example_params, input_name)
if input_collection is None:
raise Exception("Failed to find referenced collection in inputs.")
if not hasattr(input_collection, "collection"):
raise Exception("Referenced input parameter is not a collection.")
collection_type_description = self.trans.app.dataset_collections_service.collection_type_descriptions.for_collection_type(
input_collection.collection.collection_type
)
subcollection_mapping_type = None
if self.is_implicit_input(input_name):
subcollection_mapping_type = self.collection_info.subcollection_mapping_type(
input_name
)
return get_structure(
input_collection,
collection_type_description,
leaf_subcollection_type=subcollection_mapping_type,
)
|
def sliced_input_collection_structure(self, input_name):
input_collection = self.example_params[input_name]
collection_type_description = self.trans.app.dataset_collections_service.collection_type_descriptions.for_collection_type(
input_collection.collection.collection_type
)
subcollection_mapping_type = None
if self.is_implicit_input(input_name):
subcollection_mapping_type = self.collection_info.subcollection_mapping_type(
input_name
)
return get_structure(
input_collection,
collection_type_description,
leaf_subcollection_type=subcollection_mapping_type,
)
|
https://github.com/galaxyproject/galaxy/issues/6157
|
127.0.0.1 - - [16/May/2018:23:42:03 +0100] "POST /api/tools/toolshed.g2.bx.psu.edu/repos/iuc/sickle/sickle/1.33.1/build HTTP/1.1" 200 - "http://127.0.0.1:8080/?tool_id=toolshed.g2.bx.psu.edu%2Frepos%2Fiuc%2Fsickle%2Fsickle%2F1.33.1&version=1.33.1&__identifer=eqdjaao7u85" "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0"
galaxy.tools DEBUG 2018-05-16 23:42:05,300 Validated and populated state for tool request (43.218 ms)
galaxy.web.framework.decorators ERROR 2018-05-16 23:42:05,332 Uncaught exception in exposed API method:
Traceback (most recent call last):
File "/opt/galaxy/lib/galaxy/web/framework/decorators.py", line 281, in decorator
rval = func(self, trans, *args, **kwargs)
File "/opt/galaxy/lib/galaxy/webapps/galaxy/api/tools.py", line 354, in create
vars = tool.handle_input(trans, incoming, history=target_history, use_cached_job=use_cached_job)
File "/opt/galaxy/lib/galaxy/tools/__init__.py", line 1337, in handle_input
execution_tracker = execute_job(trans, self, mapping_params, history=request_context.history, rerun_remap_job_id=rerun_remap_job_id, collection_info=collection_info, completed_jobs=completed_jobs)
File "/opt/galaxy/lib/galaxy/tools/execute.py", line 81, in execute
execution_tracker.ensure_implicit_collections_populated(history, mapping_params.param_template)
File "/opt/galaxy/lib/galaxy/tools/execute.py", line 260, in ensure_implicit_collections_populated
self.precreate_output_collections(history, params)
File "/opt/galaxy/lib/galaxy/tools/execute.py", line 278, in precreate_output_collections
effective_structure = self._mapped_output_structure(trans, output)
File "/opt/galaxy/lib/galaxy/tools/execute.py", line 244, in _mapped_output_structure
output_structure = tool_output_to_structure(self.sliced_input_collection_structure, tool_output, collections_manager)
File "/opt/galaxy/lib/galaxy/dataset_collections/structure.py", line 183, in tool_output_to_structure
tree = get_sliced_input_collection_structure(structured_like)
File "/opt/galaxy/lib/galaxy/tools/execute.py", line 214, in sliced_input_collection_structure
input_collection = self.example_params[input_name]
KeyError: 'input_paired'
127.0.0.1 - - [16/May/2018:23:42:05 +0100] "POST /api/tools HTTP/1.1" 500 - "http://127.0.0.1:8080/?tool_id=toolshed.g2.bx.psu.edu%2Frepos%2Fiuc%2Fsickle%2Fsickle%2F1.33.1&version=1.33.1&__identifer=eqdjaao7u85" "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0"
|
KeyError
|
def add_file(dataset, registry, output_path):
ext = None
compression_type = None
line_count = None
converted_path = None
stdout = None
link_data_only_str = dataset.get("link_data_only", "copy_files")
if link_data_only_str not in ["link_to_files", "copy_files"]:
raise UploadProblemException(
"Invalid setting '%s' for option link_data_only - upload request misconfigured"
% link_data_only_str
)
link_data_only = link_data_only_str == "link_to_files"
# run_as_real_user is estimated from galaxy config (external chmod indicated of inputs executed)
# If this is True we always purge supplied upload inputs so they are cleaned up and we reuse their
# paths during data conversions since this user already owns that path.
# Older in_place check for upload jobs created before 18.01, TODO remove in 19.XX. xref #5206
run_as_real_user = dataset.get("run_as_real_user", False) or dataset.get(
"in_place", False
)
# purge_source defaults to True unless this is an FTP import and
# ftp_upload_purge has been overridden to False in Galaxy's config.
# We set purge_source to False if:
# - the job does not have write access to the file, e.g. when running as the
# real user
# - the files are uploaded from external paths.
purge_source = (
dataset.get("purge_source", True)
and not run_as_real_user
and dataset.type not in ("server_dir", "path_paste")
)
# in_place is True unless we are running as a real user or importing external paths (i.e.
# this is a real upload and not a path paste or ftp import).
# in_place should always be False if running as real user because the uploaded file will
# be owned by Galaxy and not the user and it should be False for external paths so Galaxy doesn't
# modify files not controlled by Galaxy.
in_place = not run_as_real_user and dataset.type not in (
"server_dir",
"path_paste",
"ftp_import",
)
# Base on the check_upload_content Galaxy config option and on by default, this enables some
# security related checks on the uploaded content, but can prevent uploads from working in some cases.
check_content = dataset.get("check_content", True)
# auto_decompress is a request flag that can be swapped off to prevent Galaxy from automatically
# decompressing archive files before sniffing.
auto_decompress = dataset.get("auto_decompress", True)
try:
dataset.file_type
except AttributeError:
raise UploadProblemException(
"Unable to process uploaded file, missing file_type parameter."
)
if dataset.type == "url":
try:
dataset.path = sniff.stream_url_to_file(dataset.path)
except Exception as e:
raise UploadProblemException(
"Unable to fetch %s\n%s" % (dataset.path, str(e))
)
# See if we have an empty file
if not os.path.exists(dataset.path):
raise UploadProblemException(
"Uploaded temporary file (%s) does not exist." % dataset.path
)
if not os.path.getsize(dataset.path) > 0:
raise UploadProblemException("The uploaded file is empty")
# Does the first 1K contain a null?
is_binary = check_binary(dataset.path)
# Decompress if needed/desired and determine/validate filetype. If a keep-compressed datatype is explicitly selected
# or if autodetection is selected and the file sniffs as a keep-compressed datatype, it will not be decompressed.
if not link_data_only:
if is_zip(dataset.path) and not is_single_file_zip(dataset.path):
stdout = "ZIP file contained more than one file, only the first file was added to Galaxy."
try:
ext, converted_path, compression_type = sniff.handle_uploaded_dataset_file(
dataset.path,
registry,
ext=dataset.file_type,
tmp_prefix="data_id_%s_upload_" % dataset.dataset_id,
tmp_dir=output_adjacent_tmpdir(output_path),
in_place=in_place,
check_content=check_content,
is_binary=is_binary,
auto_decompress=auto_decompress,
uploaded_file_ext=os.path.splitext(dataset.name)[1].lower().lstrip("."),
convert_to_posix_lines=dataset.to_posix_lines,
convert_spaces_to_tabs=dataset.space_to_tab,
)
except sniff.InappropriateDatasetContentError as exc:
raise UploadProblemException(str(exc))
elif dataset.file_type == "auto":
# Link mode can't decompress anyway, so enable sniffing for keep-compressed datatypes even when auto_decompress
# is enabled
os.environ["GALAXY_SNIFFER_VALIDATE_MODE"] = "1"
ext = sniff.guess_ext(dataset.path, registry.sniff_order, is_binary=is_binary)
os.environ.pop("GALAXY_SNIFFER_VALIDATE_MODE")
else:
ext = dataset.file_type
# The converted path will be the same as the input path if no conversion was done (or in-place conversion is used)
converted_path = None if converted_path == dataset.path else converted_path
# Validate datasets where the filetype was explicitly set using the filetype's sniffer (if any)
if dataset.file_type != "auto":
datatype = registry.get_datatype_by_extension(dataset.file_type)
# Enable sniffer "validate mode" (prevents certain sniffers from disabling themselves)
os.environ["GALAXY_SNIFFER_VALIDATE_MODE"] = "1"
if hasattr(datatype, "sniff") and not datatype.sniff(dataset.path):
stdout = (
"Warning: The file 'Type' was set to '{ext}' but the file does not appear to be of that"
" type".format(ext=dataset.file_type)
)
os.environ.pop("GALAXY_SNIFFER_VALIDATE_MODE")
# Handle unsniffable binaries
if is_binary and ext == "binary":
upload_ext = os.path.splitext(dataset.name)[1].lower().lstrip(".")
if registry.is_extension_unsniffable_binary(upload_ext):
stdout = (
"Warning: The file's datatype cannot be determined from its contents and was guessed based on"
" its extension, to avoid this warning, manually set the file 'Type' to '{ext}' when uploading"
" this type of file".format(ext=upload_ext)
)
ext = upload_ext
else:
stdout = (
"The uploaded binary file format cannot be determined automatically, please set the file 'Type'"
" manually"
)
datatype = registry.get_datatype_by_extension(ext)
# Strip compression extension from name
if (
compression_type
and not getattr(datatype, "compressed", False)
and dataset.name.endswith("." + compression_type)
):
dataset.name = dataset.name[: -len("." + compression_type)]
# Move dataset
if link_data_only:
# Never alter a file that will not be copied to Galaxy's local file store.
if datatype.dataset_content_needs_grooming(dataset.path):
err_msg = (
"The uploaded files need grooming, so change your <b>Copy data into Galaxy?</b> selection to be "
+ "<b>Copy files into Galaxy</b> instead of <b>Link to files without copying into Galaxy</b> so grooming can be performed."
)
raise UploadProblemException(err_msg)
if not link_data_only:
# Move the dataset to its "real" path. converted_path is a tempfile so we move it even if purge_source is False.
if purge_source or converted_path:
try:
shutil.move(converted_path or dataset.path, output_path)
except OSError as e:
# We may not have permission to remove the input
if e.errno != errno.EACCES:
raise
else:
shutil.copy(dataset.path, output_path)
# Write the job info
stdout = stdout or "uploaded %s file" % ext
info = dict(
type="dataset",
dataset_id=dataset.dataset_id,
ext=ext,
stdout=stdout,
name=dataset.name,
line_count=line_count,
)
if dataset.get("uuid", None) is not None:
info["uuid"] = dataset.get("uuid")
# FIXME: does this belong here? also not output-adjacent-tmpdir aware =/
if (
not link_data_only
and datatype
and datatype.dataset_content_needs_grooming(output_path)
):
# Groom the dataset content if necessary
datatype.groom_dataset_content(output_path)
return info
|
def add_file(dataset, registry, output_path):
ext = None
compression_type = None
line_count = None
converted_path = None
stdout = None
link_data_only_str = dataset.get("link_data_only", "copy_files")
if link_data_only_str not in ["link_to_files", "copy_files"]:
raise UploadProblemException(
"Invalid setting '%s' for option link_data_only - upload request misconfigured"
% link_data_only_str
)
link_data_only = link_data_only_str == "link_to_files"
# run_as_real_user is estimated from galaxy config (external chmod indicated of inputs executed)
# If this is True we always purge supplied upload inputs so they are cleaned up and we reuse their
# paths during data conversions since this user already owns that path.
# Older in_place check for upload jobs created before 18.01, TODO remove in 19.XX. xref #5206
run_as_real_user = dataset.get("run_as_real_user", False) or dataset.get(
"in_place", False
)
# purge_source defaults to True unless this is an FTP import and
# ftp_upload_purge has been overridden to False in Galaxy's config.
# We set purge_source to False if:
# - the job does not have write access to the file, e.g. when running as the
# real user
# - the files are uploaded from external paths.
purge_source = (
dataset.get("purge_source", True)
and not run_as_real_user
and dataset.type not in ("server_dir", "path_paste")
)
# in_place is True unless we are running as a real user or importing external paths (i.e.
# this is a real upload and not a path paste or ftp import).
# in_place should always be False if running as real user because the uploaded file will
# be owned by Galaxy and not the user and it should be False for external paths so Galaxy doesn't
# modify files not controlled by Galaxy.
in_place = not run_as_real_user and dataset.type not in (
"server_dir",
"path_paste",
"ftp_import",
)
# Base on the check_upload_content Galaxy config option and on by default, this enables some
# security related checks on the uploaded content, but can prevent uploads from working in some cases.
check_content = dataset.get("check_content", True)
# auto_decompress is a request flag that can be swapped off to prevent Galaxy from automatically
# decompressing archive files before sniffing.
auto_decompress = dataset.get("auto_decompress", True)
try:
dataset.file_type
except AttributeError:
raise UploadProblemException(
"Unable to process uploaded file, missing file_type parameter."
)
if dataset.type == "url":
try:
dataset.path = sniff.stream_url_to_file(dataset.path)
except Exception as e:
raise UploadProblemException(
"Unable to fetch %s\n%s" % (dataset.path, str(e))
)
# See if we have an empty file
if not os.path.exists(dataset.path):
raise UploadProblemException(
"Uploaded temporary file (%s) does not exist." % dataset.path
)
if not os.path.getsize(dataset.path) > 0:
raise UploadProblemException("The uploaded file is empty")
# Does the first 1K contain a null?
is_binary = check_binary(dataset.path)
# Decompress if needed/desired and determine/validate filetype. If a keep-compressed datatype is explicitly selected
# or if autodetection is selected and the file sniffs as a keep-compressed datatype, it will not be decompressed.
if not link_data_only:
if is_zip(dataset.path) and not is_single_file_zip(dataset.path):
stdout = "ZIP file contained more than one file, only the first file was added to Galaxy."
try:
ext, converted_path, compression_type = sniff.handle_uploaded_dataset_file(
dataset.path,
registry,
ext=dataset.file_type,
tmp_prefix="data_id_%s_upload_" % dataset.dataset_id,
tmp_dir=output_adjacent_tmpdir(output_path),
in_place=in_place,
check_content=check_content,
is_binary=is_binary,
auto_decompress=auto_decompress,
uploaded_file_ext=os.path.splitext(dataset.name)[1].lower().lstrip("."),
convert_to_posix_lines=dataset.to_posix_lines,
convert_spaces_to_tabs=dataset.space_to_tab,
)
except sniff.InappropriateDatasetContentError as exc:
raise UploadProblemException(str(exc))
elif dataset.file_type == "auto":
# Link mode can't decompress anyway, so enable sniffing for keep-compressed datatypes even when auto_decompress
# is enabled
os.environ["GALAXY_SNIFFER_VALIDATE_MODE"] = "1"
ext = sniff.guess_ext(dataset.path, registry.sniff_order, is_binary=is_binary)
os.environ.pop("GALAXY_SNIFFER_VALIDATE_MODE")
# The converted path will be the same as the input path if no conversion was done (or in-place conversion is used)
converted_path = None if converted_path == dataset.path else converted_path
# Validate datasets where the filetype was explicitly set using the filetype's sniffer (if any)
if dataset.file_type != "auto":
datatype = registry.get_datatype_by_extension(dataset.file_type)
# Enable sniffer "validate mode" (prevents certain sniffers from disabling themselves)
os.environ["GALAXY_SNIFFER_VALIDATE_MODE"] = "1"
if hasattr(datatype, "sniff") and not datatype.sniff(dataset.path):
stdout = (
"Warning: The file 'Type' was set to '{ext}' but the file does not appear to be of that"
" type".format(ext=dataset.file_type)
)
os.environ.pop("GALAXY_SNIFFER_VALIDATE_MODE")
# Handle unsniffable binaries
if is_binary and ext == "binary":
upload_ext = os.path.splitext(dataset.name)[1].lower().lstrip(".")
if registry.is_extension_unsniffable_binary(upload_ext):
stdout = (
"Warning: The file's datatype cannot be determined from its contents and was guessed based on"
" its extension, to avoid this warning, manually set the file 'Type' to '{ext}' when uploading"
" this type of file".format(ext=upload_ext)
)
ext = upload_ext
else:
stdout = (
"The uploaded binary file format cannot be determined automatically, please set the file 'Type'"
" manually"
)
datatype = registry.get_datatype_by_extension(ext)
# Strip compression extension from name
if (
compression_type
and not getattr(datatype, "compressed", False)
and dataset.name.endswith("." + compression_type)
):
dataset.name = dataset.name[: -len("." + compression_type)]
# Move dataset
if link_data_only:
# Never alter a file that will not be copied to Galaxy's local file store.
if datatype.dataset_content_needs_grooming(dataset.path):
err_msg = (
"The uploaded files need grooming, so change your <b>Copy data into Galaxy?</b> selection to be "
+ "<b>Copy files into Galaxy</b> instead of <b>Link to files without copying into Galaxy</b> so grooming can be performed."
)
raise UploadProblemException(err_msg)
if not link_data_only:
# Move the dataset to its "real" path. converted_path is a tempfile so we move it even if purge_source is False.
if purge_source or converted_path:
try:
shutil.move(converted_path or dataset.path, output_path)
except OSError as e:
# We may not have permission to remove the input
if e.errno != errno.EACCES:
raise
else:
shutil.copy(dataset.path, output_path)
# Write the job info
stdout = stdout or "uploaded %s file" % ext
info = dict(
type="dataset",
dataset_id=dataset.dataset_id,
ext=ext,
stdout=stdout,
name=dataset.name,
line_count=line_count,
)
if dataset.get("uuid", None) is not None:
info["uuid"] = dataset.get("uuid")
# FIXME: does this belong here? also not output-adjacent-tmpdir aware =/
if (
not link_data_only
and datatype
and datatype.dataset_content_needs_grooming(output_path)
):
# Groom the dataset content if necessary
datatype.groom_dataset_content(output_path)
return info
|
https://github.com/galaxyproject/galaxy/issues/5915
|
Fatal error: Exit code 1 ()
Traceback (most recent call last):
File "/Users/mvandenb/src/galaxy/tools/data_source/upload.py", line 321, in <module>
__main__()
File "/Users/mvandenb/src/galaxy/tools/data_source/upload.py", line 314, in __main__
metadata.append(add_file(dataset, registry, output_path))
File "/Users/mvandenb/src/galaxy/tools/data_source/upload.py", line 187, in add_file
if datatype.dataset_content_needs_grooming(dataset.path):
AttributeError: 'NoneType' object has no attribute 'dataset_content_needs_grooming'
|
AttributeError
|
def copy_sample_file(app, filename, dest_path=None):
"""
Copies a sample file at `filename` to `the dest_path`
directory and strips the '.sample' extensions from `filename`.
Returns the path to the copied file (with the .sample extension).
"""
if dest_path is None:
dest_path = os.path.abspath(app.config.tool_data_path)
sample_file_name = basic_util.strip_path(filename)
copied_file = sample_file_name.rsplit(".sample", 1)[0]
full_source_path = os.path.abspath(filename)
full_destination_path = os.path.join(dest_path, sample_file_name)
# Don't copy a file to itself - not sure how this happens, but sometimes it does...
if full_source_path != full_destination_path:
# It's ok to overwrite the .sample version of the file.
shutil.copy(full_source_path, full_destination_path)
# Only create the .loc file if it does not yet exist. We don't overwrite it in case it
# contains stuff proprietary to the local instance.
non_sample_path = os.path.join(dest_path, copied_file)
if not os.path.lexists(non_sample_path):
shutil.copy(full_source_path, os.path.join(dest_path, copied_file))
return non_sample_path
|
def copy_sample_file(app, filename, dest_path=None):
"""
Copies a sample file at `filename` to `the dest_path`
directory and strips the '.sample' extensions from `filename`.
"""
if dest_path is None:
dest_path = os.path.abspath(app.config.tool_data_path)
sample_file_name = basic_util.strip_path(filename)
copied_file = sample_file_name.rsplit(".sample", 1)[0]
full_source_path = os.path.abspath(filename)
full_destination_path = os.path.join(dest_path, sample_file_name)
# Don't copy a file to itself - not sure how this happens, but sometimes it does...
if full_source_path != full_destination_path:
# It's ok to overwrite the .sample version of the file.
shutil.copy(full_source_path, full_destination_path)
# Only create the .loc file if it does not yet exist. We don't overwrite it in case it
# contains stuff proprietary to the local instance.
if not os.path.lexists(os.path.join(dest_path, copied_file)):
shutil.copy(full_source_path, os.path.join(dest_path, copied_file))
|
https://github.com/galaxyproject/galaxy/issues/5648
|
galaxy.tools.data WARNING 2018-03-06 20:31:23,419 [p:121,w:1,m:0] [uWSGIWorker1Core2] Cannot find index file '/export/galaxy-central/database/files/tmp-toolshed-gmfcr9qbPdD/bowtie2_indices.loc' for tool data table 'bowtie2_indexes'
galaxy.tools.data DEBUG 2018-03-06 20:31:23,419 [p:121,w:1,m:0] [uWSGIWorker1Core2] Loaded tool data table 'bowtie2_indexes' from file '../shed_tools/toolshed.g2.bx.psu.edu/repos/devteam/bowtie2/dc1639b66f12/bowtie2/tool_data_table_conf.xml.sample'
galaxy.tools.data WARNING 2018-03-06 20:31:23,502 [p:121,w:1,m:0] [uWSGIWorker1Core2] Cannot find index file '/export/galaxy-central/database/files/tmp-toolshed-gmfcr9qbPdD/bowtie2_indices.loc' for tool data table 'bowtie2_indexes'
galaxy.tools.data DEBUG 2018-03-06 20:31:23,502 [p:121,w:1,m:0] [uWSGIWorker1Core2] Loading another instance of data table 'bowtie2_indexes' from file '/shed_tools/toolshed.g2.bx.psu.edu/repos/devteam/bowtie2/dc1639b66f12/bowtie2/tool_data_table_conf.xml.sample', attempting to merge content.
galaxy.tools.data DEBUG 2018-03-06 20:31:25,436 [p:121,w:1,m:0] [uWSGIWorker1Core2] Could not parse existing tool data table config, assume no existing elements: [Errno 2] No such file or directory: u'/galaxy-central/tool-data/toolshed.g2.bx.psu.edu/repos/devteam/bowtie2/dc1639b66f12/tool_data_table_conf.xml'
galaxy.tools.data DEBUG 2018-03-06 20:31:25,441 [p:121,w:1,m:0] [uWSGIWorker1Core2] Loaded 0 lines from '/galaxy-central/tool-data/toolshed.g2.bx.psu.edu/repos/devteam/bowtie2/dc1639b66f12/bowtie2_indices.loc' for 'bowtie2_indexes'
galaxy.tools.data DEBUG 2018-03-06 20:31:25,442 [p:121,w:1,m:0] [uWSGIWorker1Core2] Loading another instance of data table 'bowtie2_indexes' from file '/galaxy-central/tool-data/toolshed.g2.bx.psu.edu/repos/devteam/bowtie2/dc1639b66f12/tool_data_table_conf.xml', attempting to merge content.
galaxy.web.framework.decorators ERROR 2018-03-06 20:31:25,532 [p:121,w:1,m:0] [uWSGIWorker1Core2] Uncaught exception in exposed API method:
Traceback (most recent call last):
File "lib/galaxy/web/framework/decorators.py", line 281, in decorator
rval = func(self, trans, *args, **kwargs)
File "lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py", line 531, in install_repository_revision
payload)
File "lib/tool_shed/galaxy_install/install_manager.py", line 700, in install
install_options
File "lib/tool_shed/galaxy_install/install_manager.py", line 799, in __initiate_and_install_repositories
return self.install_repositories(tsr_ids, decoded_kwd, reinstalling=False)
File "lib/tool_shed/galaxy_install/install_manager.py", line 846, in install_repositories
tool_panel_section_mapping=tool_panel_section_mapping)
File "lib/tool_shed/galaxy_install/install_manager.py", line 896, in install_tool_shed_repository
tool_panel_section_mapping=tool_panel_section_mapping)
File "lib/tool_shed/galaxy_install/install_manager.py", line 555, in __handle_repository_contents
sample_files_copied)
File "lib/tool_shed/util/tool_util.py", line 164, in handle_missing_index_file
options.tool_data_table.handle_found_index_file(options.missing_index_file)
File "lib/galaxy/tools/data/__init__.py", line 422, in handle_found_index_file
self.extend_data_with(filename)
File "lib/galaxy/tools/data/__init__.py", line 488, in extend_data_with
self.data.extend(self.parse_file_fields(open(filename), errors=errors, here=here))
IOError: [Errno 2] No such file or directory: '/galaxy/data/location/bowtie2_indices.loc'
127.0.0.1 - - [06/Mar/2018:20:31:14 +0000] "POST /api/tool_shed_repositories/new/install_repository_revision?key=admin HTTP/1.1" 500 - "-" "python-requests/2.18.4"
|
IOError
|
def handle_missing_index_file(
app, tool_path, sample_files, repository_tools_tups, sample_files_copied
):
"""
Inspect each tool to see if it has any input parameters that are dynamically
generated select lists that depend on a .loc file. This method is not called
from the tool shed, but from Galaxy when a repository is being installed.
"""
for index, repository_tools_tup in enumerate(repository_tools_tups):
tup_path, guid, repository_tool = repository_tools_tup
params_with_missing_index_file = repository_tool.params_with_missing_index_file
for param in params_with_missing_index_file:
options = param.options
missing_file_name = basic_util.strip_path(options.missing_index_file)
if missing_file_name not in sample_files_copied:
# The repository must contain the required xxx.loc.sample file.
for sample_file in sample_files:
sample_file_name = basic_util.strip_path(sample_file)
if sample_file_name == "%s.sample" % missing_file_name:
target_path = copy_sample_file(
app, os.path.join(tool_path, sample_file)
)
if (
options.tool_data_table
and options.tool_data_table.missing_index_file
):
options.tool_data_table.handle_found_index_file(target_path)
sample_files_copied.append(target_path)
break
return repository_tools_tups, sample_files_copied
|
def handle_missing_index_file(
app, tool_path, sample_files, repository_tools_tups, sample_files_copied
):
"""
Inspect each tool to see if it has any input parameters that are dynamically
generated select lists that depend on a .loc file. This method is not called
from the tool shed, but from Galaxy when a repository is being installed.
"""
for index, repository_tools_tup in enumerate(repository_tools_tups):
tup_path, guid, repository_tool = repository_tools_tup
params_with_missing_index_file = repository_tool.params_with_missing_index_file
for param in params_with_missing_index_file:
options = param.options
missing_file_name = basic_util.strip_path(options.missing_index_file)
if missing_file_name not in sample_files_copied:
# The repository must contain the required xxx.loc.sample file.
for sample_file in sample_files:
sample_file_name = basic_util.strip_path(sample_file)
if sample_file_name == "%s.sample" % missing_file_name:
copy_sample_file(app, os.path.join(tool_path, sample_file))
if (
options.tool_data_table
and options.tool_data_table.missing_index_file
):
options.tool_data_table.handle_found_index_file(
options.missing_index_file
)
sample_files_copied.append(options.missing_index_file)
break
return repository_tools_tups, sample_files_copied
|
https://github.com/galaxyproject/galaxy/issues/5648
|
galaxy.tools.data WARNING 2018-03-06 20:31:23,419 [p:121,w:1,m:0] [uWSGIWorker1Core2] Cannot find index file '/export/galaxy-central/database/files/tmp-toolshed-gmfcr9qbPdD/bowtie2_indices.loc' for tool data table 'bowtie2_indexes'
galaxy.tools.data DEBUG 2018-03-06 20:31:23,419 [p:121,w:1,m:0] [uWSGIWorker1Core2] Loaded tool data table 'bowtie2_indexes' from file '../shed_tools/toolshed.g2.bx.psu.edu/repos/devteam/bowtie2/dc1639b66f12/bowtie2/tool_data_table_conf.xml.sample'
galaxy.tools.data WARNING 2018-03-06 20:31:23,502 [p:121,w:1,m:0] [uWSGIWorker1Core2] Cannot find index file '/export/galaxy-central/database/files/tmp-toolshed-gmfcr9qbPdD/bowtie2_indices.loc' for tool data table 'bowtie2_indexes'
galaxy.tools.data DEBUG 2018-03-06 20:31:23,502 [p:121,w:1,m:0] [uWSGIWorker1Core2] Loading another instance of data table 'bowtie2_indexes' from file '/shed_tools/toolshed.g2.bx.psu.edu/repos/devteam/bowtie2/dc1639b66f12/bowtie2/tool_data_table_conf.xml.sample', attempting to merge content.
galaxy.tools.data DEBUG 2018-03-06 20:31:25,436 [p:121,w:1,m:0] [uWSGIWorker1Core2] Could not parse existing tool data table config, assume no existing elements: [Errno 2] No such file or directory: u'/galaxy-central/tool-data/toolshed.g2.bx.psu.edu/repos/devteam/bowtie2/dc1639b66f12/tool_data_table_conf.xml'
galaxy.tools.data DEBUG 2018-03-06 20:31:25,441 [p:121,w:1,m:0] [uWSGIWorker1Core2] Loaded 0 lines from '/galaxy-central/tool-data/toolshed.g2.bx.psu.edu/repos/devteam/bowtie2/dc1639b66f12/bowtie2_indices.loc' for 'bowtie2_indexes'
galaxy.tools.data DEBUG 2018-03-06 20:31:25,442 [p:121,w:1,m:0] [uWSGIWorker1Core2] Loading another instance of data table 'bowtie2_indexes' from file '/galaxy-central/tool-data/toolshed.g2.bx.psu.edu/repos/devteam/bowtie2/dc1639b66f12/tool_data_table_conf.xml', attempting to merge content.
galaxy.web.framework.decorators ERROR 2018-03-06 20:31:25,532 [p:121,w:1,m:0] [uWSGIWorker1Core2] Uncaught exception in exposed API method:
Traceback (most recent call last):
File "lib/galaxy/web/framework/decorators.py", line 281, in decorator
rval = func(self, trans, *args, **kwargs)
File "lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py", line 531, in install_repository_revision
payload)
File "lib/tool_shed/galaxy_install/install_manager.py", line 700, in install
install_options
File "lib/tool_shed/galaxy_install/install_manager.py", line 799, in __initiate_and_install_repositories
return self.install_repositories(tsr_ids, decoded_kwd, reinstalling=False)
File "lib/tool_shed/galaxy_install/install_manager.py", line 846, in install_repositories
tool_panel_section_mapping=tool_panel_section_mapping)
File "lib/tool_shed/galaxy_install/install_manager.py", line 896, in install_tool_shed_repository
tool_panel_section_mapping=tool_panel_section_mapping)
File "lib/tool_shed/galaxy_install/install_manager.py", line 555, in __handle_repository_contents
sample_files_copied)
File "lib/tool_shed/util/tool_util.py", line 164, in handle_missing_index_file
options.tool_data_table.handle_found_index_file(options.missing_index_file)
File "lib/galaxy/tools/data/__init__.py", line 422, in handle_found_index_file
self.extend_data_with(filename)
File "lib/galaxy/tools/data/__init__.py", line 488, in extend_data_with
self.data.extend(self.parse_file_fields(open(filename), errors=errors, here=here))
IOError: [Errno 2] No such file or directory: '/galaxy/data/location/bowtie2_indices.loc'
127.0.0.1 - - [06/Mar/2018:20:31:14 +0000] "POST /api/tool_shed_repositories/new/install_repository_revision?key=admin HTTP/1.1" 500 - "-" "python-requests/2.18.4"
|
IOError
|
def __link_file_check(self):
"""outputs_to_working_directory breaks library uploads where data is
linked. This method is a hack that solves that problem, but is
specific to the upload tool and relies on an injected job param. This
method should be removed ASAP and replaced with some properly generic
and stateful way of determining link-only datasets. -nate
"""
if self.tool:
job = self.get_job()
param_dict = job.get_param_values(self.app)
return (
self.tool.id == "upload1"
and param_dict.get("link_data_only", None) == "link_to_files"
)
else:
# The tool is unavailable, we try to move the outputs.
return False
|
def __link_file_check(self):
"""outputs_to_working_directory breaks library uploads where data is
linked. This method is a hack that solves that problem, but is
specific to the upload tool and relies on an injected job param. This
method should be removed ASAP and replaced with some properly generic
and stateful way of determining link-only datasets. -nate
"""
job = self.get_job()
param_dict = job.get_param_values(self.app)
return (
self.tool.id == "upload1"
and param_dict.get("link_data_only", None) == "link_to_files"
)
|
https://github.com/galaxyproject/galaxy/issues/5424
|
galaxy.jobs.handler WARNING 2018-01-31 15:36:15,704 (78691) Tool 'motifgen-fasta_1' removed from tool config, unable to recover job
galaxy.jobs DEBUG 2018-01-31 15:36:15,705 (78691) Working directory for job is: /tools/galaxy_ratschlab/database/job_working_directory/078/78691
Traceback (most recent call last):
File "/tools/galaxy_ratschlab/lib/galaxy/webapps/galaxy/buildapp.py", line 58, in paste_app_factory
app = galaxy.app.UniverseApplication(global_conf=global_conf, **kwargs)
File "/tools/galaxy_ratschlab/lib/galaxy/app.py", line 190, in __init__
self.job_manager.start()
File "/tools/galaxy_ratschlab/lib/galaxy/jobs/manager.py", line 33, in start
self.job_handler.start()
File "/tools/galaxy_ratschlab/lib/galaxy/jobs/handler.py", line 40, in start
self.job_queue.start()
File "/tools/galaxy_ratschlab/lib/galaxy/jobs/handler.py", line 85, in start
self.__check_jobs_at_startup()
File "/tools/galaxy_ratschlab/lib/galaxy/jobs/handler.py", line 140, in __check_jobs_at_startup
self.job_wrapper(job).fail('This tool was disabled before the job completed. Please contact your Galaxy administrator.')
File "/tools/galaxy_ratschlab/lib/galaxy/jobs/__init__.py", line 993, in fail
if outputs_to_working_directory and not self.__link_file_check():
File "/tools/galaxy_ratschlab/lib/galaxy/jobs/__init__.py", line 1747, in __link_file_check
param_dict = job.get_param_values(self.app)
File "/tools/galaxy_ratschlab/lib/galaxy/model/__init__.py", line 702, in get_param_values
param_dict = tool.params_from_strings(param_dict, app, ignore_errors=ignore_errors)
AttributeError: 'NoneType' object has no attribute 'params_from_strings'`
|
AttributeError
|
def _write_integrated_tool_panel_config_file(self):
"""
Write the current in-memory version of the integrated_tool_panel.xml file to disk. Since Galaxy administrators
use this file to manage the tool panel, we'll not use xml_to_string() since it doesn't write XML quite right.
"""
tracking_directory = self._integrated_tool_panel_tracking_directory
if not tracking_directory:
fd, filename = tempfile.mkstemp()
else:
if not os.path.exists(tracking_directory):
os.makedirs(tracking_directory)
name = "integrated_tool_panel_%.10f.xml" % time.time()
filename = os.path.join(tracking_directory, name)
open_file = open(filename, "w")
fd = open_file.fileno()
os.write(fd, '<?xml version="1.0"?>\n')
os.write(fd, "<toolbox>\n")
os.write(fd, " <!--\n ")
os.write(
fd,
"\n ".join([l for l in INTEGRATED_TOOL_PANEL_DESCRIPTION.split("\n") if l]),
)
os.write(fd, "\n -->\n")
for key, item_type, item in self._integrated_tool_panel.panel_items_iter():
if item:
if item_type == panel_item_types.TOOL:
os.write(fd, ' <tool id="%s" />\n' % item.id)
elif item_type == panel_item_types.WORKFLOW:
os.write(fd, ' <workflow id="%s" />\n' % item.id)
elif item_type == panel_item_types.LABEL:
label_id = item.id or ""
label_text = item.text or ""
label_version = item.version or ""
os.write(
fd,
' <label id="%s" text="%s" version="%s" />\n'
% (label_id, label_text, label_version),
)
elif item_type == panel_item_types.SECTION:
section_id = item.id or ""
section_name = item.name or ""
section_version = item.version or ""
os.write(
fd,
' <section id="%s" name="%s" version="%s">\n'
% (escape(section_id), escape(section_name), section_version),
)
for (
section_key,
section_item_type,
section_item,
) in item.panel_items_iter():
if section_item_type == panel_item_types.TOOL:
if section_item:
os.write(fd, ' <tool id="%s" />\n' % section_item.id)
elif section_item_type == panel_item_types.WORKFLOW:
if section_item:
os.write(
fd, ' <workflow id="%s" />\n' % section_item.id
)
elif section_item_type == panel_item_types.LABEL:
if section_item:
label_id = section_item.id or ""
label_text = section_item.text or ""
label_version = section_item.version or ""
os.write(
fd,
' <label id="%s" text="%s" version="%s" />\n'
% (label_id, label_text, label_version),
)
os.write(fd, " </section>\n")
os.write(fd, "</toolbox>\n")
os.close(fd)
destination = os.path.abspath(self._integrated_tool_panel_config)
if tracking_directory:
open(filename + ".stack", "w").write("".join(traceback.format_stack()))
shutil.copy(filename, filename + ".copy")
filename = filename + ".copy"
shutil.move(filename, destination)
try:
os.chmod(destination, 0o644)
except OSError:
# That can happen if multiple threads are simultaneously moving/chmod'ing this file
# Should be harmless, though this race condition should be avoided.
pass
|
def _write_integrated_tool_panel_config_file(self):
"""
Write the current in-memory version of the integrated_tool_panel.xml file to disk. Since Galaxy administrators
use this file to manage the tool panel, we'll not use xml_to_string() since it doesn't write XML quite right.
"""
tracking_directory = self._integrated_tool_panel_tracking_directory
if not tracking_directory:
fd, filename = tempfile.mkstemp()
else:
if not os.path.exists(tracking_directory):
os.makedirs(tracking_directory)
name = "integrated_tool_panel_%.10f.xml" % time.time()
filename = os.path.join(tracking_directory, name)
open_file = open(filename, "w")
fd = open_file.fileno()
os.write(fd, '<?xml version="1.0"?>\n')
os.write(fd, "<toolbox>\n")
os.write(fd, " <!--\n ")
os.write(
fd,
"\n ".join([l for l in INTEGRATED_TOOL_PANEL_DESCRIPTION.split("\n") if l]),
)
os.write(fd, "\n -->\n")
for key, item_type, item in self._integrated_tool_panel.panel_items_iter():
if item:
if item_type == panel_item_types.TOOL:
os.write(fd, ' <tool id="%s" />\n' % item.id)
elif item_type == panel_item_types.WORKFLOW:
os.write(fd, ' <workflow id="%s" />\n' % item.id)
elif item_type == panel_item_types.LABEL:
label_id = item.id or ""
label_text = item.text or ""
label_version = item.version or ""
os.write(
fd,
' <label id="%s" text="%s" version="%s" />\n'
% (label_id, label_text, label_version),
)
elif item_type == panel_item_types.SECTION:
section_id = item.id or ""
section_name = item.name or ""
section_version = item.version or ""
os.write(
fd,
' <section id="%s" name="%s" version="%s">\n'
% (escape(section_id), escape(section_name), section_version),
)
for (
section_key,
section_item_type,
section_item,
) in item.panel_items_iter():
if section_item_type == panel_item_types.TOOL:
if section_item:
os.write(fd, ' <tool id="%s" />\n' % section_item.id)
elif section_item_type == panel_item_types.WORKFLOW:
if section_item:
os.write(
fd, ' <workflow id="%s" />\n' % section_item.id
)
elif section_item_type == panel_item_types.LABEL:
if section_item:
label_id = section_item.id or ""
label_text = section_item.text or ""
label_version = section_item.version or ""
os.write(
fd,
' <label id="%s" text="%s" version="%s" />\n'
% (label_id, label_text, label_version),
)
os.write(fd, " </section>\n")
os.write(fd, "</toolbox>\n")
os.close(fd)
destination = os.path.abspath(self._integrated_tool_panel_config)
if tracking_directory:
open(filename + ".stack", "w").write("".join(traceback.format_stack()))
shutil.copy(filename, filename + ".copy")
filename = filename + ".copy"
shutil.move(filename, destination)
os.chmod(self._integrated_tool_panel_config, 0o644)
|
https://github.com/galaxyproject/galaxy/issues/5031
|
Exception in thread ToolConfWatcher.thread:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "lib/galaxy/tools/toolbox/watcher.py", line 138, in check
self.reload_callback()
File "lib/galaxy/webapps/galaxy/config_watchers.py", line 24, in <lambda>
self.tool_config_watcher = get_tool_conf_watcher(reload_callback=lambda: reload_toolbox(self.app), tool_cache=self.app.tool_cache)
File "lib/galaxy/queue_worker.py", line 92, in reload_toolbox
_get_new_toolbox(app)
File "lib/galaxy/queue_worker.py", line 111, in _get_new_toolbox
new_toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
File "lib/galaxy/tools/__init__.py", line 226, in __init__
app=app,
File "lib/galaxy/tools/toolbox/base.py", line 1061, in __init__
super(BaseGalaxyToolBox, self).__init__(config_filenames, tool_root_dir, app)
File "lib/galaxy/tools/toolbox/base.py", line 87, in __init__
self._save_integrated_tool_panel()
File "lib/galaxy/tools/toolbox/integrated_panel.py", line 46, in _save_integrated_tool_panel
self._write_integrated_tool_panel_config_file()
File "lib/galaxy/tools/toolbox/integrated_panel.py", line 106, in _write_integrated_tool_panel_config_file
os.chmod(self._integrated_tool_panel_config, 0o644)
OSError: [Errno 2] No such file or directory: '/data/users/mvandenb/gx/config/integrated_tool_panel.xml'
|
OSError
|
def guess_shed_config(self, app, default=None):
tool_ids = []
metadata = self.metadata or {}
for tool in metadata.get("tools", []):
tool_ids.append(tool.get("guid"))
for shed_tool_conf_dict in app.toolbox.dynamic_confs(
include_migrated_tool_conf=True
):
name = shed_tool_conf_dict["config_filename"]
for elem in shed_tool_conf_dict["config_elems"]:
if elem.tag == "tool":
for sub_elem in elem.findall("id"):
tool_id = sub_elem.text.strip()
if tool_id in tool_ids:
self.shed_config_filename = name
return shed_tool_conf_dict
elif elem.tag == "section":
for tool_elem in elem.findall("tool"):
for sub_elem in tool_elem.findall("id"):
tool_id = sub_elem.text.strip()
if tool_id in tool_ids:
self.shed_config_filename = name
return shed_tool_conf_dict
if self.includes_datatypes or self.includes_data_managers:
# We need to search by file paths here, which is less desirable.
tool_shed = common_util.remove_protocol_and_port_from_tool_shed_url(
self.tool_shed
)
for shed_tool_conf_dict in app.toolbox.dynamic_confs(
include_migrated_tool_conf=True
):
tool_path = shed_tool_conf_dict["tool_path"]
relative_path = os.path.join(
tool_path,
tool_shed,
"repos",
self.owner,
self.name,
self.installed_changeset_revision,
)
if os.path.exists(relative_path):
self.shed_config_filename = shed_tool_conf_dict["config_filename"]
return shed_tool_conf_dict
return default
|
def guess_shed_config(self, app, default=None):
tool_ids = []
metadata = self.metadata or {}
for tool in metadata.get("tools", []):
tool_ids.append(tool.get("guid"))
for shed_tool_conf_dict in app.toolbox.dynamic_confs(
include_migrated_tool_conf=True
):
name = shed_tool_conf_dict["config_filename"]
for elem in shed_tool_conf_dict["config_elems"]:
if elem.tag == "tool":
for sub_elem in elem.findall("id"):
tool_id = sub_elem.text.strip()
if tool_id in tool_ids:
self.shed_config_filename = name
return shed_tool_conf_dict
elif elem.tag == "section":
for tool_elem in elem.findall("tool"):
for sub_elem in tool_elem.findall("id"):
tool_id = sub_elem.text.strip()
if tool_id in tool_ids:
self.shed_config_filename = name
return shed_tool_conf_dict
if self.includes_datatypes:
# We need to search by file paths here, which is less desirable.
tool_shed = common_util.remove_protocol_and_port_from_tool_shed_url(
self.tool_shed
)
for shed_tool_conf_dict in app.toolbox.dynamic_confs(
include_migrated_tool_conf=True
):
tool_path = shed_tool_conf_dict["tool_path"]
relative_path = os.path.join(
tool_path,
tool_shed,
"repos",
self.owner,
self.name,
self.installed_changeset_revision,
)
if os.path.exists(relative_path):
self.shed_config_filename = shed_tool_conf_dict["config_filename"]
return shed_tool_conf_dict
return default
|
https://github.com/galaxyproject/galaxy/issues/5031
|
Exception in thread ToolConfWatcher.thread:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "lib/galaxy/tools/toolbox/watcher.py", line 138, in check
self.reload_callback()
File "lib/galaxy/webapps/galaxy/config_watchers.py", line 24, in <lambda>
self.tool_config_watcher = get_tool_conf_watcher(reload_callback=lambda: reload_toolbox(self.app), tool_cache=self.app.tool_cache)
File "lib/galaxy/queue_worker.py", line 92, in reload_toolbox
_get_new_toolbox(app)
File "lib/galaxy/queue_worker.py", line 111, in _get_new_toolbox
new_toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
File "lib/galaxy/tools/__init__.py", line 226, in __init__
app=app,
File "lib/galaxy/tools/toolbox/base.py", line 1061, in __init__
super(BaseGalaxyToolBox, self).__init__(config_filenames, tool_root_dir, app)
File "lib/galaxy/tools/toolbox/base.py", line 87, in __init__
self._save_integrated_tool_panel()
File "lib/galaxy/tools/toolbox/integrated_panel.py", line 46, in _save_integrated_tool_panel
self._write_integrated_tool_panel_config_file()
File "lib/galaxy/tools/toolbox/integrated_panel.py", line 106, in _write_integrated_tool_panel_config_file
os.chmod(self._integrated_tool_panel_config, 0o644)
OSError: [Errno 2] No such file or directory: '/data/users/mvandenb/gx/config/integrated_tool_panel.xml'
|
OSError
|
def to_xml_file(self, shed_tool_data_table_config, new_elems=None, remove_elems=None):
"""
Write the current in-memory version of the shed_tool_data_table_conf.xml file to disk.
remove_elems are removed before new_elems are added.
"""
if not (new_elems or remove_elems):
log.debug(
"ToolDataTableManager.to_xml_file called without any elements to add or remove."
)
return # no changes provided, no need to persist any changes
if not new_elems:
new_elems = []
if not remove_elems:
remove_elems = []
full_path = os.path.abspath(shed_tool_data_table_config)
# FIXME: we should lock changing this file by other threads / head nodes
try:
tree = util.parse_xml(full_path)
root = tree.getroot()
out_elems = [elem for elem in root]
except Exception as e:
out_elems = []
log.debug(
"Could not parse existing tool data table config, assume no existing elements: %s",
e,
)
for elem in remove_elems:
# handle multiple occurrences of remove elem in existing elems
while elem in out_elems:
remove_elems.remove(elem)
# add new elems
out_elems.extend(new_elems)
out_path_is_new = not os.path.exists(full_path)
with RenamedTemporaryFile(full_path) as out:
out.write('<?xml version="1.0"?>\n<tables>\n')
for elem in out_elems:
out.write(util.xml_to_string(elem, pretty=True))
out.write("</tables>\n")
os.chmod(full_path, 0o644)
if out_path_is_new:
self.tool_data_path_files.update_files()
|
def to_xml_file(self, shed_tool_data_table_config, new_elems=None, remove_elems=None):
"""
Write the current in-memory version of the shed_tool_data_table_conf.xml file to disk.
remove_elems are removed before new_elems are added.
"""
if not (new_elems or remove_elems):
log.debug(
"ToolDataTableManager.to_xml_file called without any elements to add or remove."
)
return # no changes provided, no need to persist any changes
if not new_elems:
new_elems = []
if not remove_elems:
remove_elems = []
full_path = os.path.abspath(shed_tool_data_table_config)
# FIXME: we should lock changing this file by other threads / head nodes
try:
tree = util.parse_xml(full_path)
root = tree.getroot()
out_elems = [elem for elem in root]
except Exception as e:
out_elems = []
log.debug(
"Could not parse existing tool data table config, assume no existing elements: %s",
e,
)
for elem in remove_elems:
# handle multiple occurrences of remove elem in existing elems
while elem in out_elems:
remove_elems.remove(elem)
# add new elems
out_elems.extend(new_elems)
out_path_is_new = not os.path.exists(full_path)
with open(full_path, "wb") as out:
out.write('<?xml version="1.0"?>\n<tables>\n')
for elem in out_elems:
out.write(util.xml_to_string(elem, pretty=True))
out.write("</tables>\n")
os.chmod(full_path, 0o644)
if out_path_is_new:
self.tool_data_path_files.update_files()
|
https://github.com/galaxyproject/galaxy/issues/5031
|
Exception in thread ToolConfWatcher.thread:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "lib/galaxy/tools/toolbox/watcher.py", line 138, in check
self.reload_callback()
File "lib/galaxy/webapps/galaxy/config_watchers.py", line 24, in <lambda>
self.tool_config_watcher = get_tool_conf_watcher(reload_callback=lambda: reload_toolbox(self.app), tool_cache=self.app.tool_cache)
File "lib/galaxy/queue_worker.py", line 92, in reload_toolbox
_get_new_toolbox(app)
File "lib/galaxy/queue_worker.py", line 111, in _get_new_toolbox
new_toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
File "lib/galaxy/tools/__init__.py", line 226, in __init__
app=app,
File "lib/galaxy/tools/toolbox/base.py", line 1061, in __init__
super(BaseGalaxyToolBox, self).__init__(config_filenames, tool_root_dir, app)
File "lib/galaxy/tools/toolbox/base.py", line 87, in __init__
self._save_integrated_tool_panel()
File "lib/galaxy/tools/toolbox/integrated_panel.py", line 46, in _save_integrated_tool_panel
self._write_integrated_tool_panel_config_file()
File "lib/galaxy/tools/toolbox/integrated_panel.py", line 106, in _write_integrated_tool_panel_config_file
os.chmod(self._integrated_tool_panel_config, 0o644)
OSError: [Errno 2] No such file or directory: '/data/users/mvandenb/gx/config/integrated_tool_panel.xml'
|
OSError
|
def pause(self, job=None, message=None):
if job is None:
job = self.get_job()
if message is None:
message = "Execution of this dataset's job is paused"
if job.state == job.states.NEW:
for dataset_assoc in job.output_datasets + job.output_library_datasets:
dataset_assoc.dataset.dataset.state = (
dataset_assoc.dataset.dataset.states.PAUSED
)
dataset_assoc.dataset.info = message
self.sa_session.add(dataset_assoc.dataset)
log.debug("Pausing Job '%d', %s", job.id, message)
job.set_state(job.states.PAUSED)
self.sa_session.add(job)
|
def pause(self, job=None, message=None):
if job is None:
job = self.get_job()
if message is None:
message = "Execution of this dataset's job is paused"
if job.state == job.states.NEW:
for dataset_assoc in job.output_datasets + job.output_library_datasets:
dataset_assoc.dataset.dataset.state = (
dataset_assoc.dataset.dataset.states.PAUSED
)
dataset_assoc.dataset.info = message
self.sa_session.add(dataset_assoc.dataset)
job.set_state(job.states.PAUSED)
self.sa_session.add(job)
|
https://github.com/galaxyproject/galaxy/issues/5222
|
galaxy.jobs.handler DEBUG 2017-12-15 09:04:34,777 (3332) Dispatching to slurm runner
galaxy.jobs DEBUG 2017-12-15 09:04:34,808 (3332) Persisting job destination (destination id: cluster16c40)
galaxy.jobs.runners DEBUG 2017-12-15 09:04:34,815 Job [3332] queued (37.893 ms)
galaxy.jobs.handler INFO 2017-12-15 09:04:34,822 (3332) Job dispatched
galaxy.jobs.runners ERROR 2017-12-15 09:04:34,874 (3332) Failure preparing job
Traceback (most recent call last):
File "/galaxy/lib/galaxy/jobs/runners/__init__.py", line 170, in prepare_job
job_wrapper.prepare()
File "/galaxy/lib/galaxy/jobs/__init__.py", line 878, in prepare
tool_evaluator.set_compute_environment(compute_environment, get_special=get_special)
File "/galaxy/lib/galaxy/tools/evaluation.py", line 73, in set_compute_environment
visit_input_values(self.tool.inputs, incoming, validate_inputs)
File "/galaxy/lib/galaxy/tools/parameters/__init__.py", line 106, in visit_input_values
visit_input_values(input.cases[values['__current_case__']].inputs, values, callback, new_name_prefix, label_prefix, parent_prefix=name_prefix, **payload)
File "/galaxy/lib/galaxy/tools/parameters/__init__.py", line 112, in visit_input_values
callback_helper(input, input_values, name_prefix, label_prefix, parent_prefix=parent_prefix, context=context)
File "/galaxy/lib/galaxy/tools/parameters/__init__.py", line 77, in callback_helper
new_value = callback(**args)
File "/galaxy/lib/galaxy/tools/evaluation.py", line 71, in validate_inputs
value = input.from_json(value, request_context, context)
File "/galaxy/lib/galaxy/tools/parameters/basic.py", line 1676, in from_json
raise ValueError("The previously selected dataset has entered an unusable state")
ValueError: The previously selected dataset has entered an unusable state
|
ValueError
|
def execute(
self,
tool,
trans,
incoming={},
return_job=False,
set_output_hid=True,
history=None,
job_params=None,
rerun_remap_job_id=None,
execution_cache=None,
dataset_collection_elements=None,
):
"""
Executes a tool, creating job and tool outputs, associating them, and
submitting the job to the job queue. If history is not specified, use
trans.history as destination for tool's output datasets.
"""
self._check_access(tool, trans)
app = trans.app
if execution_cache is None:
execution_cache = ToolExecutionCache(trans)
current_user_roles = execution_cache.current_user_roles
history, inp_data, inp_dataset_collections = self._collect_inputs(
tool, trans, incoming, history, current_user_roles
)
# Build name for output datasets based on tool name and input names
on_text = self._get_on_text(inp_data)
# format='input" previously would give you a random extension from
# the input extensions, now it should just give "input" as the output
# format.
input_ext = "data" if tool.profile < 16.04 else "input"
input_dbkey = incoming.get("dbkey", "?")
preserved_tags = {}
for name, data in reversed(inp_data.items()):
if not data:
data = NoneDataset(datatypes_registry=app.datatypes_registry)
continue
# Convert LDDA to an HDA.
if isinstance(data, LibraryDatasetDatasetAssociation):
data = data.to_history_dataset_association(None)
inp_data[name] = data
if tool.profile < 16.04:
input_ext = data.ext
if data.dbkey not in [None, "?"]:
input_dbkey = data.dbkey
identifier = getattr(data, "element_identifier", None)
if identifier is not None:
incoming["%s|__identifier__" % name] = identifier
for tag in [t for t in data.tags if t.user_tname == "name"]:
preserved_tags[tag.value] = tag
# Collect chromInfo dataset and add as parameters to incoming
(chrom_info, db_dataset) = app.genome_builds.get_chrom_info(
input_dbkey,
trans=trans,
custom_build_hack_get_len_from_fasta_conversion=tool.id
!= "CONVERTER_fasta_to_len",
)
if db_dataset:
inp_data.update({"chromInfo": db_dataset})
incoming["chromInfo"] = chrom_info
# Determine output dataset permission/roles list
existing_datasets = [inp for inp in inp_data.values() if inp]
if existing_datasets:
output_permissions = app.security_agent.guess_derived_permissions_for_datasets(
existing_datasets
)
else:
# No valid inputs, we will use history defaults
output_permissions = app.security_agent.history_get_default_permissions(history)
# Add the dbkey to the incoming parameters
incoming["dbkey"] = input_dbkey
# wrapped params are used by change_format action and by output.label; only perform this wrapping once, as needed
wrapped_params = self._wrapped_params(trans, tool, incoming, inp_data)
out_data = odict()
input_collections = dict((k, v[0][0]) for k, v in inp_dataset_collections.items())
output_collections = OutputCollections(
trans,
history,
tool=tool,
tool_action=self,
input_collections=input_collections,
dataset_collection_elements=dataset_collection_elements,
on_text=on_text,
incoming=incoming,
params=wrapped_params.params,
job_params=job_params,
)
# Keep track of parent / child relationships, we'll create all the
# datasets first, then create the associations
parent_to_child_pairs = []
child_dataset_names = set()
object_store_populator = ObjectStorePopulator(app)
def handle_output(name, output, hidden=None):
if output.parent:
parent_to_child_pairs.append((output.parent, name))
child_dataset_names.add(name)
# What is the following hack for? Need to document under what
# conditions can the following occur? (james@bx.psu.edu)
# HACK: the output data has already been created
# this happens i.e. as a result of the async controller
if name in incoming:
dataid = incoming[name]
data = trans.sa_session.query(app.model.HistoryDatasetAssociation).get(
dataid
)
assert data is not None
out_data[name] = data
else:
ext = determine_output_format(
output,
wrapped_params.params,
inp_data,
inp_dataset_collections,
input_ext,
)
data = app.model.HistoryDatasetAssociation(
extension=ext, create_dataset=True, flush=False
)
if hidden is None:
hidden = output.hidden
if (
not hidden and dataset_collection_elements is not None
): # Mapping over a collection - hide datasets
hidden = True
if hidden:
data.visible = False
if (
dataset_collection_elements is not None
and name in dataset_collection_elements
):
dataset_collection_elements[name].hda = data
trans.sa_session.add(data)
trans.app.security_agent.set_all_dataset_permissions(
data.dataset, output_permissions, new=True
)
for _, tag in preserved_tags.items():
data.tags.append(tag.copy())
# Must flush before setting object store id currently.
# TODO: optimize this.
trans.sa_session.flush()
object_store_populator.set_object_store_id(data)
# This may not be neccesary with the new parent/child associations
data.designation = name
# Copy metadata from one of the inputs if requested.
# metadata source can be either a string referencing an input
# or an actual object to copy.
metadata_source = output.metadata_source
if metadata_source:
if isinstance(metadata_source, string_types):
metadata_source = inp_data.get(metadata_source)
if metadata_source is not None:
data.init_meta(copy_from=metadata_source)
else:
data.init_meta()
# Take dbkey from LAST input
data.dbkey = str(input_dbkey)
# Set state
data.blurb = "queued"
# Set output label
data.name = self.get_output_name(
output,
data,
tool,
on_text,
trans,
incoming,
history,
wrapped_params.params,
job_params,
)
# Store output
out_data[name] = data
if output.actions:
# Apply pre-job tool-output-dataset actions; e.g. setting metadata, changing format
output_action_params = dict(out_data)
output_action_params.update(incoming)
output.actions.apply_action(data, output_action_params)
# Also set the default values of actions of type metadata
self.set_metadata_defaults(
output,
data,
tool,
on_text,
trans,
incoming,
history,
wrapped_params.params,
job_params,
)
# Flush all datasets at once.
return data
for name, output in tool.outputs.items():
if not filter_output(output, incoming):
handle_output_timer = ExecutionTimer()
if output.collection:
collections_manager = app.dataset_collections_service
element_identifiers = []
known_outputs = output.known_outputs(
input_collections, collections_manager.type_registry
)
# Just to echo TODO elsewhere - this should be restructured to allow
# nested collections.
for output_part_def in known_outputs:
# Add elements to top-level collection, unless nested...
current_element_identifiers = element_identifiers
current_collection_type = output.structure.collection_type
for parent_id in output_part_def.parent_ids or []:
# TODO: replace following line with formal abstractions for doing this.
current_collection_type = ":".join(
current_collection_type.split(":")[1:]
)
name_to_index = dict(
(value["name"], index)
for (index, value) in enumerate(current_element_identifiers)
)
if parent_id not in name_to_index:
if parent_id not in current_element_identifiers:
index = len(current_element_identifiers)
current_element_identifiers.append(
dict(
name=parent_id,
collection_type=current_collection_type,
src="new_collection",
element_identifiers=[],
)
)
else:
index = name_to_index[parent_id]
current_element_identifiers = current_element_identifiers[
index
]["element_identifiers"]
effective_output_name = output_part_def.effective_output_name
element = handle_output(
effective_output_name, output_part_def.output_def, hidden=True
)
# TODO: this shouldn't exist in the top-level of the history at all
# but for now we are still working around that by hiding the contents
# there.
# Following hack causes dataset to no be added to history...
child_dataset_names.add(effective_output_name)
history.add_dataset(element, set_hid=set_output_hid, quota=False)
trans.sa_session.add(element)
trans.sa_session.flush()
current_element_identifiers.append(
{
"__object__": element,
"name": output_part_def.element_identifier,
}
)
log.info(element_identifiers)
if output.dynamic_structure:
assert not element_identifiers # known_outputs must have been empty
element_kwds = dict(
elements=collections_manager.ELEMENTS_UNINITIALIZED
)
else:
element_kwds = dict(element_identifiers=element_identifiers)
output_collections.create_collection(
output=output, name=name, tags=preserved_tags, **element_kwds
)
log.info(
"Handled collection output named %s for tool %s %s"
% (name, tool.id, handle_output_timer)
)
else:
handle_output(name, output)
log.info(
"Handled output named %s for tool %s %s"
% (name, tool.id, handle_output_timer)
)
add_datasets_timer = ExecutionTimer()
# Add all the top-level (non-child) datasets to the history unless otherwise specified
datasets_to_persist = []
for name in out_data.keys():
if (
name not in child_dataset_names and name not in incoming
): # don't add children; or already existing datasets, i.e. async created
data = out_data[name]
datasets_to_persist.append(data)
# Set HID and add to history.
# This is brand new and certainly empty so don't worry about quota.
# TOOL OPTIMIZATION NOTE - from above loop to the job create below 99%+
# of execution time happens within in history.add_datasets.
history.add_datasets(
trans.sa_session,
datasets_to_persist,
set_hid=set_output_hid,
quota=False,
flush=False,
)
# Add all the children to their parents
for parent_name, child_name in parent_to_child_pairs:
parent_dataset = out_data[parent_name]
child_dataset = out_data[child_name]
parent_dataset.children.append(child_dataset)
log.info("Added output datasets to history %s" % add_datasets_timer)
job_setup_timer = ExecutionTimer()
# Create the job object
job, galaxy_session = self._new_job_for_session(trans, tool, history)
self._record_inputs(
trans,
tool,
job,
incoming,
inp_data,
inp_dataset_collections,
current_user_roles,
)
self._record_outputs(job, out_data, output_collections)
job.object_store_id = object_store_populator.object_store_id
if job_params:
job.params = dumps(job_params)
job.set_handler(tool.get_job_handler(job_params))
trans.sa_session.add(job)
# Now that we have a job id, we can remap any outputs if this is a rerun and the user chose to continue dependent jobs
# This functionality requires tracking jobs in the database.
if app.config.track_jobs_in_database and rerun_remap_job_id is not None:
self._remap_job_on_rerun(
trans=trans,
galaxy_session=galaxy_session,
rerun_remap_job_id=rerun_remap_job_id,
current_job=job,
out_data=out_data,
)
log.info(
"Setup for job %s complete, ready to flush %s"
% (job.log_str(), job_setup_timer)
)
job_flush_timer = ExecutionTimer()
trans.sa_session.flush()
log.info("Flushed transaction for job %s %s" % (job.log_str(), job_flush_timer))
# Some tools are not really executable, but jobs are still created for them ( for record keeping ).
# Examples include tools that redirect to other applications ( epigraph ). These special tools must
# include something that can be retrieved from the params ( e.g., REDIRECT_URL ) to keep the job
# from being queued.
if "REDIRECT_URL" in incoming:
# Get the dataset - there should only be 1
for name in inp_data.keys():
dataset = inp_data[name]
redirect_url = tool.parse_redirect_url(dataset, incoming)
# GALAXY_URL should be include in the tool params to enable the external application
# to send back to the current Galaxy instance
GALAXY_URL = incoming.get("GALAXY_URL", None)
assert GALAXY_URL is not None, "GALAXY_URL parameter missing in tool config."
redirect_url += "&GALAXY_URL=%s" % GALAXY_URL
# Job should not be queued, so set state to ok
job.set_state(app.model.Job.states.OK)
job.info = "Redirected to: %s" % redirect_url
trans.sa_session.add(job)
trans.sa_session.flush()
trans.response.send_redirect(
url_for(
controller="tool_runner", action="redirect", redirect_url=redirect_url
)
)
else:
# Put the job in the queue if tracking in memory
app.job_manager.job_queue.put(job.id, job.tool_id)
trans.log_event(
"Added job to the job queue, id: %s" % str(job.id), tool_id=job.tool_id
)
return job, out_data
|
def execute(
self,
tool,
trans,
incoming={},
return_job=False,
set_output_hid=True,
history=None,
job_params=None,
rerun_remap_job_id=None,
execution_cache=None,
dataset_collection_elements=None,
):
"""
Executes a tool, creating job and tool outputs, associating them, and
submitting the job to the job queue. If history is not specified, use
trans.history as destination for tool's output datasets.
"""
self._check_access(tool, trans)
app = trans.app
if execution_cache is None:
execution_cache = ToolExecutionCache(trans)
current_user_roles = execution_cache.current_user_roles
history, inp_data, inp_dataset_collections = self._collect_inputs(
tool, trans, incoming, history, current_user_roles
)
# Build name for output datasets based on tool name and input names
on_text = self._get_on_text(inp_data)
# format='input" previously would give you a random extension from
# the input extensions, now it should just give "input" as the output
# format.
input_ext = "data" if tool.profile < 16.04 else "input"
input_dbkey = incoming.get("dbkey", "?")
preserved_tags = {}
for name, data in reversed(inp_data.items()):
if not data:
data = NoneDataset(datatypes_registry=app.datatypes_registry)
continue
# Convert LDDA to an HDA.
if isinstance(data, LibraryDatasetDatasetAssociation):
data = data.to_history_dataset_association(None)
inp_data[name] = data
if tool.profile < 16.04:
input_ext = data.ext
if data.dbkey not in [None, "?"]:
input_dbkey = data.dbkey
identifier = getattr(data, "element_identifier", None)
if identifier is not None:
incoming["%s|__identifier__" % name] = identifier
for tag in [t for t in data.tags if t.user_tname == "name"]:
preserved_tags[tag.value] = tag
# Collect chromInfo dataset and add as parameters to incoming
(chrom_info, db_dataset) = app.genome_builds.get_chrom_info(
input_dbkey,
trans=trans,
custom_build_hack_get_len_from_fasta_conversion=tool.id
!= "CONVERTER_fasta_to_len",
)
if db_dataset:
inp_data.update({"chromInfo": db_dataset})
incoming["chromInfo"] = chrom_info
# Determine output dataset permission/roles list
existing_datasets = [inp for inp in inp_data.values() if inp]
if existing_datasets:
output_permissions = app.security_agent.guess_derived_permissions_for_datasets(
existing_datasets
)
else:
# No valid inputs, we will use history defaults
output_permissions = app.security_agent.history_get_default_permissions(history)
# Add the dbkey to the incoming parameters
incoming["dbkey"] = input_dbkey
# wrapped params are used by change_format action and by output.label; only perform this wrapping once, as needed
wrapped_params = self._wrapped_params(trans, tool, incoming, inp_data)
out_data = odict()
input_collections = dict((k, v[0][0]) for k, v in inp_dataset_collections.items())
output_collections = OutputCollections(
trans,
history,
tool=tool,
tool_action=self,
input_collections=input_collections,
dataset_collection_elements=dataset_collection_elements,
on_text=on_text,
incoming=incoming,
params=wrapped_params.params,
job_params=job_params,
)
# Keep track of parent / child relationships, we'll create all the
# datasets first, then create the associations
parent_to_child_pairs = []
child_dataset_names = set()
object_store_populator = ObjectStorePopulator(app)
def handle_output(name, output, hidden=None):
if output.parent:
parent_to_child_pairs.append((output.parent, name))
child_dataset_names.add(name)
# What is the following hack for? Need to document under what
# conditions can the following occur? (james@bx.psu.edu)
# HACK: the output data has already been created
# this happens i.e. as a result of the async controller
if name in incoming:
dataid = incoming[name]
data = trans.sa_session.query(app.model.HistoryDatasetAssociation).get(
dataid
)
assert data is not None
out_data[name] = data
else:
ext = determine_output_format(
output,
wrapped_params.params,
inp_data,
inp_dataset_collections,
input_ext,
)
data = app.model.HistoryDatasetAssociation(
extension=ext, create_dataset=True, flush=False
)
if hidden is None:
hidden = output.hidden
if (
not hidden and dataset_collection_elements is not None
): # Mapping over a collection - hide datasets
hidden = True
if hidden:
data.visible = False
if (
dataset_collection_elements is not None
and name in dataset_collection_elements
):
dataset_collection_elements[name].hda = data
trans.sa_session.add(data)
trans.app.security_agent.set_all_dataset_permissions(
data.dataset, output_permissions, new=True
)
for _, tag in preserved_tags.items():
data.tags.append(tag.copy())
# Must flush before setting object store id currently.
# TODO: optimize this.
trans.sa_session.flush()
object_store_populator.set_object_store_id(data)
# This may not be neccesary with the new parent/child associations
data.designation = name
# Copy metadata from one of the inputs if requested.
# metadata source can be either a string referencing an input
# or an actual object to copy.
metadata_source = output.metadata_source
if metadata_source:
if isinstance(metadata_source, string_types):
metadata_source = inp_data.get(metadata_source)
if metadata_source is not None:
data.init_meta(copy_from=metadata_source)
else:
data.init_meta()
# Take dbkey from LAST input
data.dbkey = str(input_dbkey)
# Set state
data.blurb = "queued"
# Set output label
data.name = self.get_output_name(
output,
data,
tool,
on_text,
trans,
incoming,
history,
wrapped_params.params,
job_params,
)
# Store output
out_data[name] = data
if output.actions:
# Apply pre-job tool-output-dataset actions; e.g. setting metadata, changing format
output_action_params = dict(out_data)
output_action_params.update(incoming)
output.actions.apply_action(data, output_action_params)
# Also set the default values of actions of type metadata
self.set_metadata_defaults(
output,
data,
tool,
on_text,
trans,
incoming,
history,
wrapped_params.params,
job_params,
)
# Flush all datasets at once.
return data
for name, output in tool.outputs.items():
if not filter_output(output, incoming):
handle_output_timer = ExecutionTimer()
if output.collection:
collections_manager = app.dataset_collections_service
element_identifiers = []
known_outputs = output.known_outputs(
input_collections, collections_manager.type_registry
)
# Just to echo TODO elsewhere - this should be restructured to allow
# nested collections.
for output_part_def in known_outputs:
# Add elements to top-level collection, unless nested...
current_element_identifiers = element_identifiers
current_collection_type = output.structure.collection_type
for parent_id in output_part_def.parent_ids or []:
# TODO: replace following line with formal abstractions for doing this.
current_collection_type = ":".join(
current_collection_type.split(":")[1:]
)
name_to_index = dict(
(value["name"], index)
for (index, value) in enumerate(current_element_identifiers)
)
if parent_id not in name_to_index:
if parent_id not in current_element_identifiers:
index = len(current_element_identifiers)
current_element_identifiers.append(
dict(
name=parent_id,
collection_type=current_collection_type,
src="new_collection",
element_identifiers=[],
)
)
else:
index = name_to_index[parent_id]
current_element_identifiers = current_element_identifiers[
index
]["element_identifiers"]
effective_output_name = output_part_def.effective_output_name
element = handle_output(
effective_output_name, output_part_def.output_def, hidden=True
)
# TODO: this shouldn't exist in the top-level of the history at all
# but for now we are still working around that by hiding the contents
# there.
# Following hack causes dataset to no be added to history...
child_dataset_names.add(effective_output_name)
history.add_dataset(element, set_hid=set_output_hid, quota=False)
trans.sa_session.add(element)
trans.sa_session.flush()
current_element_identifiers.append(
{
"__object__": element,
"name": output_part_def.element_identifier,
}
)
log.info(element_identifiers)
if output.dynamic_structure:
assert not element_identifiers # known_outputs must have been empty
element_kwds = dict(
elements=collections_manager.ELEMENTS_UNINITIALIZED
)
else:
element_kwds = dict(element_identifiers=element_identifiers)
output_collections.create_collection(
output=output, name=name, tags=preserved_tags, **element_kwds
)
log.info(
"Handled collection output named %s for tool %s %s"
% (name, tool.id, handle_output_timer)
)
else:
handle_output(name, output)
log.info(
"Handled output named %s for tool %s %s"
% (name, tool.id, handle_output_timer)
)
add_datasets_timer = ExecutionTimer()
# Add all the top-level (non-child) datasets to the history unless otherwise specified
datasets_to_persist = []
for name in out_data.keys():
if (
name not in child_dataset_names and name not in incoming
): # don't add children; or already existing datasets, i.e. async created
data = out_data[name]
datasets_to_persist.append(data)
# Set HID and add to history.
# This is brand new and certainly empty so don't worry about quota.
# TOOL OPTIMIZATION NOTE - from above loop to the job create below 99%+
# of execution time happens within in history.add_datasets.
history.add_datasets(
trans.sa_session,
datasets_to_persist,
set_hid=set_output_hid,
quota=False,
flush=False,
)
# Add all the children to their parents
for parent_name, child_name in parent_to_child_pairs:
parent_dataset = out_data[parent_name]
child_dataset = out_data[child_name]
parent_dataset.children.append(child_dataset)
log.info("Added output datasets to history %s" % add_datasets_timer)
job_setup_timer = ExecutionTimer()
# Create the job object
job, galaxy_session = self._new_job_for_session(trans, tool, history)
self._record_inputs(
trans,
tool,
job,
incoming,
inp_data,
inp_dataset_collections,
current_user_roles,
)
self._record_outputs(job, out_data, output_collections)
job.object_store_id = object_store_populator.object_store_id
if job_params:
job.params = dumps(job_params)
job.set_handler(tool.get_job_handler(job_params))
trans.sa_session.add(job)
# Now that we have a job id, we can remap any outputs if this is a rerun and the user chose to continue dependent jobs
# This functionality requires tracking jobs in the database.
if app.config.track_jobs_in_database and rerun_remap_job_id is not None:
try:
old_job = trans.sa_session.query(app.model.Job).get(rerun_remap_job_id)
assert old_job is not None, "(%s/%s): Old job id is invalid" % (
rerun_remap_job_id,
job.id,
)
assert old_job.tool_id == job.tool_id, (
"(%s/%s): Old tool id (%s) does not match rerun tool id (%s)"
% (old_job.id, job.id, old_job.tool_id, job.tool_id)
)
if trans.user is not None:
assert old_job.user_id == trans.user.id, (
"(%s/%s): Old user id (%s) does not match rerun user id (%s)"
% (old_job.id, job.id, old_job.user_id, trans.user.id)
)
elif (
trans.user is None and type(galaxy_session) == trans.model.GalaxySession
):
assert old_job.session_id == galaxy_session.id, (
"(%s/%s): Old session id (%s) does not match rerun session id (%s)"
% (old_job.id, job.id, old_job.session_id, galaxy_session.id)
)
else:
raise Exception(
"(%s/%s): Remapping via the API is not (yet) supported"
% (old_job.id, job.id)
)
# Duplicate PJAs before remap.
for pjaa in old_job.post_job_actions:
job.add_post_job_action(pjaa.post_job_action)
for jtod in old_job.output_datasets:
for job_to_remap, jtid in [
(jtid.job, jtid) for jtid in jtod.dataset.dependent_jobs
]:
if (
trans.user is not None and job_to_remap.user_id == trans.user.id
) or (
trans.user is None
and job_to_remap.session_id == galaxy_session.id
):
if job_to_remap.state == job_to_remap.states.PAUSED:
job_to_remap.state = job_to_remap.states.NEW
for hda in [
dep_jtod.dataset
for dep_jtod in job_to_remap.output_datasets
]:
if hda.state == hda.states.PAUSED:
hda.state = hda.states.NEW
hda.info = None
input_values = dict(
[
(p.name, json.loads(p.value))
for p in job_to_remap.parameters
]
)
update_param(
jtid.name, input_values, str(out_data[jtod.name].id)
)
for p in job_to_remap.parameters:
p.value = json.dumps(input_values[p.name])
jtid.dataset = out_data[jtod.name]
jtid.dataset.hid = jtod.dataset.hid
log.info(
"Job %s input HDA %s remapped to new HDA %s"
% (job_to_remap.id, jtod.dataset.id, jtid.dataset.id)
)
trans.sa_session.add(job_to_remap)
trans.sa_session.add(jtid)
jtod.dataset.visible = False
trans.sa_session.add(jtod)
except Exception:
log.exception("Cannot remap rerun dependencies.")
log.info(
"Setup for job %s complete, ready to flush %s"
% (job.log_str(), job_setup_timer)
)
job_flush_timer = ExecutionTimer()
trans.sa_session.flush()
log.info("Flushed transaction for job %s %s" % (job.log_str(), job_flush_timer))
# Some tools are not really executable, but jobs are still created for them ( for record keeping ).
# Examples include tools that redirect to other applications ( epigraph ). These special tools must
# include something that can be retrieved from the params ( e.g., REDIRECT_URL ) to keep the job
# from being queued.
if "REDIRECT_URL" in incoming:
# Get the dataset - there should only be 1
for name in inp_data.keys():
dataset = inp_data[name]
redirect_url = tool.parse_redirect_url(dataset, incoming)
# GALAXY_URL should be include in the tool params to enable the external application
# to send back to the current Galaxy instance
GALAXY_URL = incoming.get("GALAXY_URL", None)
assert GALAXY_URL is not None, "GALAXY_URL parameter missing in tool config."
redirect_url += "&GALAXY_URL=%s" % GALAXY_URL
# Job should not be queued, so set state to ok
job.set_state(app.model.Job.states.OK)
job.info = "Redirected to: %s" % redirect_url
trans.sa_session.add(job)
trans.sa_session.flush()
trans.response.send_redirect(
url_for(
controller="tool_runner", action="redirect", redirect_url=redirect_url
)
)
else:
# Put the job in the queue if tracking in memory
app.job_manager.job_queue.put(job.id, job.tool_id)
trans.log_event(
"Added job to the job queue, id: %s" % str(job.id), tool_id=job.tool_id
)
return job, out_data
|
https://github.com/galaxyproject/galaxy/issues/5222
|
galaxy.jobs.handler DEBUG 2017-12-15 09:04:34,777 (3332) Dispatching to slurm runner
galaxy.jobs DEBUG 2017-12-15 09:04:34,808 (3332) Persisting job destination (destination id: cluster16c40)
galaxy.jobs.runners DEBUG 2017-12-15 09:04:34,815 Job [3332] queued (37.893 ms)
galaxy.jobs.handler INFO 2017-12-15 09:04:34,822 (3332) Job dispatched
galaxy.jobs.runners ERROR 2017-12-15 09:04:34,874 (3332) Failure preparing job
Traceback (most recent call last):
File "/galaxy/lib/galaxy/jobs/runners/__init__.py", line 170, in prepare_job
job_wrapper.prepare()
File "/galaxy/lib/galaxy/jobs/__init__.py", line 878, in prepare
tool_evaluator.set_compute_environment(compute_environment, get_special=get_special)
File "/galaxy/lib/galaxy/tools/evaluation.py", line 73, in set_compute_environment
visit_input_values(self.tool.inputs, incoming, validate_inputs)
File "/galaxy/lib/galaxy/tools/parameters/__init__.py", line 106, in visit_input_values
visit_input_values(input.cases[values['__current_case__']].inputs, values, callback, new_name_prefix, label_prefix, parent_prefix=name_prefix, **payload)
File "/galaxy/lib/galaxy/tools/parameters/__init__.py", line 112, in visit_input_values
callback_helper(input, input_values, name_prefix, label_prefix, parent_prefix=parent_prefix, context=context)
File "/galaxy/lib/galaxy/tools/parameters/__init__.py", line 77, in callback_helper
new_value = callback(**args)
File "/galaxy/lib/galaxy/tools/evaluation.py", line 71, in validate_inputs
value = input.from_json(value, request_context, context)
File "/galaxy/lib/galaxy/tools/parameters/basic.py", line 1676, in from_json
raise ValueError("The previously selected dataset has entered an unusable state")
ValueError: The previously selected dataset has entered an unusable state
|
ValueError
|
def shutdown(self):
self.watchers.shutdown()
self.workflow_scheduling_manager.shutdown()
self.job_manager.shutdown()
self.object_store.shutdown()
if self.heartbeat:
self.heartbeat.shutdown()
self.update_repository_manager.shutdown()
try:
self.control_worker.shutdown()
except AttributeError:
# There is no control_worker
pass
|
def shutdown(self):
self.workflow_scheduling_manager.shutdown()
self.job_manager.shutdown()
self.object_store.shutdown()
if self.heartbeat:
self.heartbeat.shutdown()
self.update_repository_manager.shutdown()
try:
self.control_worker.shutdown()
except AttributeError:
# There is no control_worker
pass
|
https://github.com/galaxyproject/galaxy/issues/4738
|
galaxy.tools.deps.installable WARNING 2017-10-03 12:25:28,961 Conda not installed and auto-installation disabled.
galaxy.tools.deps.installable WARNING 2017-10-03 12:25:29,016 Conda not installed and auto-installation disabled.
galaxy.datatypes.registry DEBUG 2017-10-03 12:25:29,034 Loaded external metadata tool: __SET_METADATA__
galaxy.tools.special_tools DEBUG 2017-10-03 12:25:29,048 Loaded history import tool: __IMPORT_HISTORY__
galaxy.tools.special_tools DEBUG 2017-10-03 12:25:29,057 Loaded history export tool: __EXPORT_HISTORY__
galaxy.queue_worker INFO 2017-10-03 12:25:29,060 Queuing async task rebuild_toolbox_search_index for main.
galaxy.queue_worker DEBUG 2017-10-03 12:25:29,074 Toolbox reload (324.466 ms)
ok
Check the count of repositories in the database named filtering_1470 and owned by user1. ... ok
----------------------------------------------------------------------
XML: /galaxy/xunit-7193.xml
----------------------------------------------------------------------
Ran 579 tests in 922.170s
OK
test_driver INFO 2017-10-03 12:25:30,298 Shutting down
test_driver INFO 2017-10-03 12:25:30,299 Shutting down embedded tool_shed web server
test_driver INFO 2017-10-03 12:25:30,302 Embedded web server tool_shed stopped
test_driver INFO 2017-10-03 12:25:30,303 Stopping application tool_shed
test_driver INFO 2017-10-03 12:25:30,303 Application tool_shed stopped.
test_driver INFO 2017-10-03 12:25:30,303 Shutting down embedded galaxy web server
test_driver INFO 2017-10-03 12:25:30,324 Embedded web server galaxy stopped
test_driver INFO 2017-10-03 12:25:30,328 Stopping application galaxy
galaxy.jobs.handler INFO 2017-10-03 12:25:30,328 sending stop signal to worker thread
galaxy.jobs.handler INFO 2017-10-03 12:25:30,328 job handler queue stopped
galaxy.jobs.runners INFO 2017-10-03 12:25:30,328 TaskRunner: Sending stop signal to 2 worker threads
galaxy.jobs.runners INFO 2017-10-03 12:25:30,328 LocalRunner: Sending stop signal to 5 worker threads
galaxy.jobs.handler INFO 2017-10-03 12:25:30,329 sending stop signal to worker thread
galaxy.jobs.handler INFO 2017-10-03 12:25:30,330 job handler stop queue stopped
test_driver INFO 2017-10-03 12:25:30,331 Application galaxy stopped.
tool_shed.galaxy_install.update_repository_manager INFO 2017-10-03 12:25:30,359 Update repository manager restarter shutting down...
galaxy.queue_worker DEBUG 2017-10-03 12:25:31,083 Executing toolbox reload on 'main'
galaxy.tools.toolbox.base INFO 2017-10-03 12:25:31,098 Parsing the tool configuration /galaxy/test/functional/tools/upload_tool_conf.xml
galaxy.tools.toolbox.base INFO 2017-10-03 12:25:31,099 Parsing the tool configuration /tmp/tmpDL4NYD/test_shed_tool_conf.xml
galaxy.tools.toolbox.base ERROR 2017-10-03 12:25:31,099 Error loading tools defined in config /tmp/tmpDL4NYD/test_shed_tool_conf.xml
Traceback (most recent call last):
File "/galaxy/lib/galaxy/tools/toolbox/base.py", line 114, in _init_tools_from_configs
self._init_tools_from_config(config_filename)
File "/galaxy/lib/galaxy/tools/toolbox/base.py", line 148, in _init_tools_from_config
tool_conf_source = get_toolbox_parser(config_filename)
File "/galaxy/lib/galaxy/tools/toolbox/parser.py", line 152, in get_toolbox_parser
return XmlToolConfSource(config_filename)
File "/galaxy/lib/galaxy/tools/toolbox/parser.py", line 40, in __init__
tree = parse_xml(config_filename)
File "/galaxy/lib/galaxy/util/__init__.py", line 217, in parse_xml
root = tree.parse(fname, parser=ElementTree.XMLParser(target=DoctypeSafeCallbackTarget()))
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 647, in parse
source = open(source, "rb")
IOError: [Errno 2] No such file or directory: '/tmp/tmpDL4NYD/test_shed_tool_conf.xml'
galaxy.tools.toolbox.base INFO 2017-10-03 12:25:31,100 Parsing the tool configuration /tmp/tmpDL4NYD/test_migrated_tool_conf.xml
galaxy.tools.toolbox.base ERROR 2017-10-03 12:25:31,101 Error loading tools defined in config /tmp/tmpDL4NYD/test_migrated_tool_conf.xml
Traceback (most recent call last):
File "/galaxy/lib/galaxy/tools/toolbox/base.py", line 114, in _init_tools_from_configs
self._init_tools_from_config(config_filename)
File "/galaxy/lib/galaxy/tools/toolbox/base.py", line 148, in _init_tools_from_config
tool_conf_source = get_toolbox_parser(config_filename)
File "/galaxy/lib/galaxy/tools/toolbox/parser.py", line 152, in get_toolbox_parser
return XmlToolConfSource(config_filename)
File "/galaxy/lib/galaxy/tools/toolbox/parser.py", line 40, in __init__
tree = parse_xml(config_filename)
File "/galaxy/lib/galaxy/util/__init__.py", line 217, in parse_xml
root = tree.parse(fname, parser=ElementTree.XMLParser(target=DoctypeSafeCallbackTarget()))
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 647, in parse
source = open(source, "rb")
IOError: [Errno 2] No such file or directory: '/tmp/tmpDL4NYD/test_migrated_tool_conf.xml'
galaxy.tools.toolbox.base DEBUG 2017-10-03 12:25:31,101 Reading tools from config files finshed (2.666 ms)
galaxy.tools.toolbox.base DEBUG 2017-10-03 12:25:31,101 Loading section: Get Data
galaxy.tools.toolbox.base DEBUG 2017-10-03 12:25:31,101 Loading section: test_1000
galaxy.tools.toolbox.base DEBUG 2017-10-03 12:25:31,101 Loading section: test_1010
galaxy.tools.toolbox.base DEBUG 2017-10-03 12:25:31,101 Loading section: test_1020
galaxy.tools.toolbox.base DEBUG 2017-10-03 12:25:31,102 Loading section: test_1030
galaxy.tools.toolbox.base DEBUG 2017-10-03 12:25:31,102 Loading section: column_maker
galaxy.tools.toolbox.base DEBUG 2017-10-03 12:25:31,102 Loading section: emboss_5_0050
galaxy.tools.toolbox.base DEBUG 2017-10-03 12:25:31,102 Loading section: freebayes
galaxy.tools.toolbox.base DEBUG 2017-10-03 12:25:31,102 Loading section: test_1060
galaxy.tools.toolbox.base DEBUG 2017-10-03 12:25:31,102 Loading section: test_1070
galaxy.tools.toolbox.base DEBUG 2017-10-03 12:25:31,102 Loading section: convert_chars
galaxy.tools.toolbox.base DEBUG 2017-10-03 12:25:31,102 Loading section: new_column_maker
galaxy.tools.toolbox.base DEBUG 2017-10-03 12:25:31,102 Loading section: new_convert_chars
galaxy.tools.toolbox.base DEBUG 2017-10-03 12:25:31,103 Loading section: freebayes_1090
galaxy.tools.toolbox.base DEBUG 2017-10-03 12:25:31,103 Loading section: filtering_1090
galaxy.tools.toolbox.base DEBUG 2017-10-03 12:25:31,103 Loading section: bwa_1090
galaxy.tools.toolbox.base DEBUG 2017-10-03 12:25:31,103 Loading section: Test 0120
galaxy.tools.toolbox.base DEBUG 2017-10-03 12:25:31,103 Loading section: test_1410
galaxy.tools.toolbox.base DEBUG 2017-10-03 12:25:31,103 Loading section: repair
galaxy.tools.toolbox.base DEBUG 2017-10-03 12:25:31,103 Loading section: Filtering
galaxy.tools.toolbox.base DEBUG 2017-10-03 12:25:31,103 Loading tool panel finished (2.456 ms)
galaxy.tools.deps WARNING 2017-10-03 12:25:31,104 Path '/tmp/tmpDL4NYD/tmpfxt0uQ/database/tool_dependencies3eUpIz' does not exist, ignoring
galaxy.tools.deps WARNING 2017-10-03 12:25:31,105 Path '/tmp/tmpDL4NYD/tmpfxt0uQ/database/tool_dependencies3eUpIz' is not directory, ignoring
galaxy.tools.deps DEBUG 2017-10-03 12:25:31,105 Unable to find config file './dependency_resolvers_conf.xml'
Exception in thread ToolConfWatcher.thread:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "/galaxy/lib/galaxy/tools/toolbox/watcher.py", line 123, in check
self.reload_callback()
File "/galaxy/lib/galaxy/webapps/galaxy/config_watchers.py", line 24, in <lambda>
self.tool_config_watcher = get_tool_conf_watcher(reload_callback=lambda: reload_toolbox(self.app), tool_cache=self.app.tool_cache)
File "/galaxy/lib/galaxy/queue_worker.py", line 92, in reload_toolbox
_get_new_toolbox(app)
File "/galaxy/lib/galaxy/queue_worker.py", line 111, in _get_new_toolbox
new_toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
File "/galaxy/lib/galaxy/tools/__init__.py", line 226, in __init__
app=app,
File "/galaxy/lib/galaxy/tools/toolbox/base.py", line 1059, in __init__
self._init_dependency_manager()
File "/galaxy/lib/galaxy/tools/toolbox/base.py", line 1072, in _init_dependency_manager
self.dependency_manager = build_dependency_manager(self.app.config)
File "/galaxy/lib/galaxy/tools/deps/__init__.py", line 41, in build_dependency_manager
dependency_manager = DependencyManager(**dependency_manager_kwds)
File "/galaxy/lib/galaxy/tools/deps/__init__.py", line 85, in __init__
self.dependency_resolvers = self.__build_dependency_resolvers(conf_file)
File "/galaxy/lib/galaxy/tools/deps/__init__.py", line 194, in __build_dependency_resolvers
return self.__default_dependency_resolvers()
File "/galaxy/lib/galaxy/tools/deps/__init__.py", line 202, in __default_dependency_resolvers
CondaDependencyResolver(self),
File "/galaxy/lib/galaxy/tools/deps/resolvers/conda.py", line 138, in __init__
self.disabled = not galaxy.tools.deps.installable.ensure_installed(conda_context, install_conda, self.auto_init)
File "/galaxy/lib/galaxy/tools/deps/installable.py", line 68, in ensure_installed
os.mkdir(parent_path)
OSError: [Errno 2] No such file or directory: '/tmp/tmpDL4NYD/tmpfxt0uQ/database/tool_dependencies3eUpIz'
galaxy.jobs.handler INFO 2017-10-03 12:25:31,143 sending stop signal to worker thread
galaxy.jobs.handler INFO 2017-10-03 12:25:31,143 job handler queue stopped
galaxy.jobs.runners INFO 2017-10-03 12:25:31,144 TaskRunner: Sending stop signal to 2 worker threads
galaxy.jobs.runners INFO 2017-10-03 12:25:31,144 LocalRunner: Sending stop signal to 5 worker threads
galaxy.jobs.handler INFO 2017-10-03 12:25:31,144 sending stop signal to worker thread
galaxy.jobs.handler INFO 2017-10-03 12:25:31,145 job handler stop queue stopped
Restoring 1 unacknowledged message(s).
Exception in thread ToolConfWatcher.thread (most likely raised during interpreter shutdown):
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
File "/usr/lib/python2.7/threading.py", line 763, in run
File "/galaxy/lib/galaxy/tools/toolbox/watcher.py", line 101, in check
<type 'exceptions.TypeError'>: 'NoneType' object is not callable
Build timed out (after 75 minutes). Marking the build as aborted.
Build was aborted
Recording test results
Started calculate disk usage of build
Finished Calculation of disk usage of build in 0 seconds
Started calculate disk usage of workspace
Finished Calculation of disk usage of workspace in 0 seconds
Adding one-line test results to commit status...
Setting status of 62bac4f0aa9f50a79761b9fff5942e478878b895 to FAILURE with url https://jenkins.galaxyproject.org/job/docker-toolshed/7193/ and message: 'Build finished. 579 tests run, 0 skipped, 0 failed.'
Using context: toolshed test
Finished: ABORTED
|
IOError
|
def create(
self,
trans,
parent,
name,
collection_type,
element_identifiers=None,
elements=None,
implicit_collection_info=None,
trusted_identifiers=None,
hide_source_items=False,
tags=None,
):
"""
PRECONDITION: security checks on ability to add to parent
occurred during load.
"""
# Trust embedded, newly created objects created by tool subsystem.
if trusted_identifiers is None:
trusted_identifiers = implicit_collection_info is not None
if element_identifiers and not trusted_identifiers:
validate_input_element_identifiers(element_identifiers)
dataset_collection = self.create_dataset_collection(
trans=trans,
collection_type=collection_type,
element_identifiers=element_identifiers,
elements=elements,
hide_source_items=hide_source_items,
)
if isinstance(parent, model.History):
dataset_collection_instance = self.model.HistoryDatasetCollectionAssociation(
collection=dataset_collection,
name=name,
)
if implicit_collection_info:
for input_name, input_collection in implicit_collection_info[
"implicit_inputs"
]:
dataset_collection_instance.add_implicit_input_collection(
input_name, input_collection
)
for output_dataset in implicit_collection_info.get("outputs"):
if output_dataset not in trans.sa_session:
output_dataset = trans.sa_session.query(type(output_dataset)).get(
output_dataset.id
)
if isinstance(output_dataset, model.HistoryDatasetAssociation):
output_dataset.hidden_beneath_collection_instance = (
dataset_collection_instance
)
elif isinstance(
output_dataset, model.HistoryDatasetCollectionAssociation
):
dataset_collection_instance.add_implicit_input_collection(
input_name, input_collection
)
else:
# dataset collection, don't need to do anything...
pass
trans.sa_session.add(output_dataset)
dataset_collection_instance.implicit_output_name = implicit_collection_info[
"implicit_output_name"
]
log.debug(
"Created collection with %d elements"
% (len(dataset_collection_instance.collection.elements))
)
# Handle setting hid
parent.add_dataset_collection(dataset_collection_instance)
elif isinstance(parent, model.LibraryFolder):
dataset_collection_instance = self.model.LibraryDatasetCollectionAssociation(
collection=dataset_collection,
folder=parent,
name=name,
)
else:
message = (
"Internal logic error - create called with unknown parent type %s"
% type(parent)
)
log.exception(message)
raise MessageException(message)
tags = tags or {}
if implicit_collection_info:
for k, v in implicit_collection_info.get("implicit_inputs", []):
for tag in [t for t in v.tags if t.user_tname == "name"]:
tags[tag.value] = tag
for _, tag in tags.items():
dataset_collection_instance.tags.append(
tag.copy(cls=model.HistoryDatasetCollectionTagAssociation)
)
return self.__persist(dataset_collection_instance)
|
def create(
self,
trans,
parent,
name,
collection_type,
element_identifiers=None,
elements=None,
implicit_collection_info=None,
trusted_identifiers=None,
hide_source_items=False,
tags=None,
):
"""
PRECONDITION: security checks on ability to add to parent
occurred during load.
"""
# Trust embedded, newly created objects created by tool subsystem.
if trusted_identifiers is None:
trusted_identifiers = implicit_collection_info is not None
if element_identifiers and not trusted_identifiers:
validate_input_element_identifiers(element_identifiers)
dataset_collection = self.create_dataset_collection(
trans=trans,
collection_type=collection_type,
element_identifiers=element_identifiers,
elements=elements,
hide_source_items=hide_source_items,
)
if isinstance(parent, model.History):
dataset_collection_instance = self.model.HistoryDatasetCollectionAssociation(
collection=dataset_collection,
name=name,
)
if implicit_collection_info:
for input_name, input_collection in implicit_collection_info[
"implicit_inputs"
]:
dataset_collection_instance.add_implicit_input_collection(
input_name, input_collection
)
for output_dataset in implicit_collection_info.get("outputs"):
if output_dataset not in trans.sa_session:
output_dataset = trans.sa_session.query(type(output_dataset)).get(
output_dataset.id
)
if isinstance(output_dataset, model.HistoryDatasetAssociation):
output_dataset.hidden_beneath_collection_instance = (
dataset_collection_instance
)
elif isinstance(
output_dataset, model.HistoryDatasetCollectionAssociation
):
dataset_collection_instance.add_implicit_input_collection(
input_name, input_collection
)
else:
# dataset collection, don't need to do anything...
pass
trans.sa_session.add(output_dataset)
dataset_collection_instance.implicit_output_name = implicit_collection_info[
"implicit_output_name"
]
log.debug(
"Created collection with %d elements"
% (len(dataset_collection_instance.collection.elements))
)
# Handle setting hid
parent.add_dataset_collection(dataset_collection_instance)
elif isinstance(parent, model.LibraryFolder):
dataset_collection_instance = self.model.LibraryDatasetCollectionAssociation(
collection=dataset_collection,
folder=parent,
name=name,
)
else:
message = (
"Internal logic error - create called with unknown parent type %s"
% type(parent)
)
log.exception(message)
raise MessageException(message)
tags = tags or {}
if implicit_collection_info:
for k, v in implicit_collection_info.get("implicit_inputs", []):
for tag in [t for t in v.tags if t.user_tname == "name"]:
tags[tag.value] = tag
for _, tag in tags.items():
dataset_collection_instance.tags.append(tag.copy())
return self.__persist(dataset_collection_instance)
|
https://github.com/galaxyproject/galaxy/issues/4975
|
galaxy.tools.actions INFO 2017-11-10 17:17:20,507 Verified access to datasets for Job[unflushed,tool_id=toolshed.g2.bx.psu.edu/repos/lparsons/htseq_count/htseq_count/0.6.1galaxy3] (39.007 ms)
galaxy.tools.actions INFO 2017-11-10 17:17:20,511 Setup for job Job[unflushed,tool_id=toolshed.g2.bx.psu.edu/repos/lparsons/htseq_count/htseq_count/0.6.1galaxy3] complete, ready to flush (67.715 ms)
galaxy.tools.actions INFO 2017-11-10 17:17:20,608 Flushed transaction for job Job[id=2195981,tool_id=toolshed.g2.bx.psu.edu/repos/lparsons/htseq_count/htseq_count/0.6.1galaxy3] (96.495 ms)
galaxy.tools.execute DEBUG 2017-11-10 17:17:20,608 Tool [toolshed.g2.bx.psu.edu/repos/lparsons/htseq_count/htseq_count/0.6.1galaxy3] created job [2195981] (631.589 ms)
galaxy.tools.execute DEBUG 2017-11-10 17:17:20,629 Executed 2 job(s) for tool toolshed.g2.bx.psu.edu/repos/lparsons/htseq_count/htseq_count/0.6.1galaxy3 request: (1092.059 ms)
galaxy.managers.collections DEBUG 2017-11-10 17:17:20,662 Created collection with 2 elements
galaxy.managers.collections DEBUG 2017-11-10 17:17:20,812 Created collection with 2 elements
galaxy.workflow.run DEBUG 2017-11-10 17:17:21,068 Workflow step 137193 of invocation 29732 invoked (1794.676 ms)
galaxy.managers.collections DEBUG 2017-11-10 17:17:21,479 Created collection with 0 elements
galaxy.tools ERROR 2017-11-10 17:17:21,510 Exception caught while attempting tool execution:
Traceback (most recent call last):
File "/usr/local/galaxy/galaxy-dist/lib/galaxy/tools/__init__.py", line 1311, in handle_single_execution
job, out_data = self.execute(trans, incoming=params, history=history, rerun_remap_job_id=rerun_remap_job_id, mapping_over_collection=mapping_over_collection, execution_cache=execution_cache)
File "/usr/local/galaxy/galaxy-dist/lib/galaxy/tools/__init__.py", line 1391, in execute
return self.tool_action.execute(self, trans, incoming=incoming, set_output_hid=set_output_hid, history=history, **kwargs)
File "/usr/local/galaxy/galaxy-dist/lib/galaxy/tools/actions/__init__.py", line 410, in execute
**element_kwds
File "/usr/local/galaxy/galaxy-dist/lib/galaxy/tools/actions/__init__.py", line 735, in create_collection
**element_kwds
File "/usr/local/galaxy/galaxy-dist/lib/galaxy/managers/collections.py", line 107, in create
dataset_collection_instance.tags.append(tag.copy())
File "/usr/local/galaxy/galaxy-dist/.venv/lib/python2.7/site-packages/sqlalchemy/orm/collections.py", line 1073, in append
item = __set(self, item, _sa_initiator)
File "/usr/local/galaxy/galaxy-dist/.venv/lib/python2.7/site-packages/sqlalchemy/orm/collections.py", line 1045, in __set
item = executor.fire_append_event(item, _sa_initiator)
File "/usr/local/galaxy/galaxy-dist/.venv/lib/python2.7/site-packages/sqlalchemy/orm/collections.py", line 717, in fire_append_event
item, initiator)
File "/usr/local/galaxy/galaxy-dist/.venv/lib/python2.7/site-packages/sqlalchemy/orm/attributes.py", line 941, in fire_append_event
initiator or self._append_token or self._init_append_token())
File "/usr/local/galaxy/galaxy-dist/.venv/lib/python2.7/site-packages/sqlalchemy/orm/attributes.py", line 1178, in emit_backref_from_collection_append_event
child_impl = child_state.manager[key].impl
KeyError: 'dataset_collections'
|
KeyError
|
def copy(self, cls=None):
if cls:
new_ta = cls()
else:
new_ta = type(self)()
new_ta.tag_id = self.tag_id
new_ta.user_tname = self.user_tname
new_ta.value = self.value
new_ta.user_value = self.user_value
return new_ta
|
def copy(self):
new_ta = type(self)()
new_ta.tag_id = self.tag_id
new_ta.user_tname = self.user_tname
new_ta.value = self.value
new_ta.user_value = self.user_value
return new_ta
|
https://github.com/galaxyproject/galaxy/issues/4975
|
galaxy.tools.actions INFO 2017-11-10 17:17:20,507 Verified access to datasets for Job[unflushed,tool_id=toolshed.g2.bx.psu.edu/repos/lparsons/htseq_count/htseq_count/0.6.1galaxy3] (39.007 ms)
galaxy.tools.actions INFO 2017-11-10 17:17:20,511 Setup for job Job[unflushed,tool_id=toolshed.g2.bx.psu.edu/repos/lparsons/htseq_count/htseq_count/0.6.1galaxy3] complete, ready to flush (67.715 ms)
galaxy.tools.actions INFO 2017-11-10 17:17:20,608 Flushed transaction for job Job[id=2195981,tool_id=toolshed.g2.bx.psu.edu/repos/lparsons/htseq_count/htseq_count/0.6.1galaxy3] (96.495 ms)
galaxy.tools.execute DEBUG 2017-11-10 17:17:20,608 Tool [toolshed.g2.bx.psu.edu/repos/lparsons/htseq_count/htseq_count/0.6.1galaxy3] created job [2195981] (631.589 ms)
galaxy.tools.execute DEBUG 2017-11-10 17:17:20,629 Executed 2 job(s) for tool toolshed.g2.bx.psu.edu/repos/lparsons/htseq_count/htseq_count/0.6.1galaxy3 request: (1092.059 ms)
galaxy.managers.collections DEBUG 2017-11-10 17:17:20,662 Created collection with 2 elements
galaxy.managers.collections DEBUG 2017-11-10 17:17:20,812 Created collection with 2 elements
galaxy.workflow.run DEBUG 2017-11-10 17:17:21,068 Workflow step 137193 of invocation 29732 invoked (1794.676 ms)
galaxy.managers.collections DEBUG 2017-11-10 17:17:21,479 Created collection with 0 elements
galaxy.tools ERROR 2017-11-10 17:17:21,510 Exception caught while attempting tool execution:
Traceback (most recent call last):
File "/usr/local/galaxy/galaxy-dist/lib/galaxy/tools/__init__.py", line 1311, in handle_single_execution
job, out_data = self.execute(trans, incoming=params, history=history, rerun_remap_job_id=rerun_remap_job_id, mapping_over_collection=mapping_over_collection, execution_cache=execution_cache)
File "/usr/local/galaxy/galaxy-dist/lib/galaxy/tools/__init__.py", line 1391, in execute
return self.tool_action.execute(self, trans, incoming=incoming, set_output_hid=set_output_hid, history=history, **kwargs)
File "/usr/local/galaxy/galaxy-dist/lib/galaxy/tools/actions/__init__.py", line 410, in execute
**element_kwds
File "/usr/local/galaxy/galaxy-dist/lib/galaxy/tools/actions/__init__.py", line 735, in create_collection
**element_kwds
File "/usr/local/galaxy/galaxy-dist/lib/galaxy/managers/collections.py", line 107, in create
dataset_collection_instance.tags.append(tag.copy())
File "/usr/local/galaxy/galaxy-dist/.venv/lib/python2.7/site-packages/sqlalchemy/orm/collections.py", line 1073, in append
item = __set(self, item, _sa_initiator)
File "/usr/local/galaxy/galaxy-dist/.venv/lib/python2.7/site-packages/sqlalchemy/orm/collections.py", line 1045, in __set
item = executor.fire_append_event(item, _sa_initiator)
File "/usr/local/galaxy/galaxy-dist/.venv/lib/python2.7/site-packages/sqlalchemy/orm/collections.py", line 717, in fire_append_event
item, initiator)
File "/usr/local/galaxy/galaxy-dist/.venv/lib/python2.7/site-packages/sqlalchemy/orm/attributes.py", line 941, in fire_append_event
initiator or self._append_token or self._init_append_token())
File "/usr/local/galaxy/galaxy-dist/.venv/lib/python2.7/site-packages/sqlalchemy/orm/attributes.py", line 1178, in emit_backref_from_collection_append_event
child_impl = child_state.manager[key].impl
KeyError: 'dataset_collections'
|
KeyError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.