after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def _configure_node_from_config(self, config):
"""
Startup elements in the node as specified in the provided node configuration.
"""
self.log.info("Configuring node from local configuration ...")
# get contoller configuration subpart
controller = config.get("controller", {})
# start Manhole in node controller
if "manhole" in controller:
yield self._controller.call(
"crossbar.start_manhole", controller["manhole"], options=CallOptions()
)
self.log.debug("controller: manhole started")
# startup all workers
workers = config.get("workers", [])
if len(workers):
self.log.info("Starting {nworkers} workers ...", nworkers=len(workers))
else:
self.log.info("No workers configured!")
for worker in workers:
# worker ID
if "id" in worker:
worker_id = worker.pop("id")
else:
worker_id = "worker-{:03d}".format(self._worker_no)
self._worker_no += 1
# worker type: either a native worker ('router', 'container', ..), or a guest worker ('guest')
worker_type = worker["type"]
# native worker processes setup
if worker_type in self._native_workers:
# set logname depending on native worker type
worker_logname = '{} "{}"'.format(
self._native_workers[worker_type]["logname"], worker_id
)
# any worker specific options
worker_options = worker.get("options", {})
# now actually start the (native) worker ..
yield self._controller.call(
"crossbar.start_worker",
worker_id,
worker_type,
worker_options,
options=CallOptions(),
)
# setup native worker generic stuff
method_name = "_configure_native_worker_{}".format(
worker_type.replace("-", "_")
)
try:
config_fn = getattr(self, method_name)
except AttributeError:
raise ValueError(
"A native worker of type '{}' is configured but "
"there is no method '{}' on {}".format(
worker_type, method_name, type(self)
)
)
yield config_fn(worker_logname, worker_id, worker)
# guest worker processes setup
elif worker_type == "guest":
# now actually start the (guest) worker ..
# FIXME: start_worker() takes the whole configuration item for guest workers, whereas native workers
# only take the options (which is part of the whole config item for the worker)
yield self._controller.call(
"crossbar.start_worker",
worker_id,
worker_type,
worker,
options=CallOptions(),
)
else:
raise Exception(
'logic error: unexpected worker_type="{}"'.format(worker_type)
)
self.log.info("Local node configuration applied successfully!")
|
def _configure_node_from_config(self, config):
"""
Startup elements in the node as specified in the provided node configuration.
"""
self.log.info("Configuring node from local configuration ...")
# get contoller configuration subpart
controller = config.get("controller", {})
# start Manhole in node controller
if "manhole" in controller:
yield self._controller.call(
"crossbar.start_manhole", controller["manhole"], options=CallOptions()
)
self.log.debug("controller: manhole started")
# startup all workers
workers = config.get("workers", [])
if len(workers):
self.log.info("Starting {nworkers} workers ...", nworkers=len(workers))
else:
self.log.info("No workers configured!")
for worker in workers:
# worker ID
if "id" in worker:
worker_id = worker.pop("id")
else:
worker_id = "worker-{:03d}".format(self._worker_no)
self._worker_no += 1
# worker type - a type of working process from the following fixed list
worker_type = worker["type"]
assert worker_type in self._native_workers
worker_cfg = self._native_workers[worker_type]
# set logname depending on worker type
worker_logname = "{} '{}'".format(worker_cfg["logname"], worker_id)
# any worker specific options
worker_options = worker.get("options", {})
# now actually start the worker ..
yield self._controller.call(
"crossbar.start_worker",
worker_id,
worker_type,
worker_options,
options=CallOptions(),
)
# native worker processes setup
if worker_type in self._native_workers:
# setup native worker generic stuff
method_name = "_configure_native_worker_{}".format(
worker_type.replace("-", "_")
)
try:
config_fn = getattr(self, method_name)
except AttributeError:
raise ValueError(
"A native worker of type '{}' is configured but "
"there is no method '{}' on {}".format(
worker_type, method_name, type(self)
)
)
yield config_fn(worker_logname, worker_id, worker)
self.log.info("Local node configuration applied successfully!")
|
https://github.com/crossbario/crossbar/issues/1179
|
2017-09-05T14:52:34+0200 [Controller 15960] Starting 2 workers ...
2017-09-05T14:52:34+0200 [Controller 15960] Router worker "worker-001" starting ..
2017-09-05T14:52:34+0200 [Router 15969] Started Router worker "worker-001" [crossbar.worker.router.RouterWorkerSession / CPython-EPollReactor]
2017-09-05T14:52:34+0200 [Router 15969] Router worker "worker-001" session 3303279962187294 initializing ..
2017-09-05T14:52:34+0200 [Router 15969] Registered 35 procedures
2017-09-05T14:52:34+0200 [Router 15969] Router worker "worker-001" session ready
2017-09-05T14:52:34+0200 [Controller 15960] Router worker "worker-001" process 15969 started
2017-09-05T14:52:34+0200 [Router 15969] RouterServiceSession ready [configured on_ready fired]
2017-09-05T14:52:34+0200 [Router 15969] Realm 'realm1' started
2017-09-05T14:52:34+0200 [Controller 15960] Router 'worker-001': realm 'realm-001' (named 'realm1') started
2017-09-05T14:52:34+0200 [Router 15969] role role-001 on realm realm-001 started
2017-09-05T14:52:34+0200 [Controller 15960] Router 'worker-001': role 'role-001' (named 'authenticator') started on realm 'realm-001'
2017-09-05T14:52:34+0200 [Router 15969] role role-002 on realm realm-001 started
2017-09-05T14:52:34+0200 [Controller 15960] Router 'worker-001': role 'role-002' (named 'public') started on realm 'realm-001'
2017-09-05T14:52:34+0200 [Router 15969] started component: labgrid.remote.authenticator.AuthenticatorSession id=1175882440106437
2017-09-05T14:52:34+0200 [Controller 15960] Router 'worker-001': component 'component-001' started
2017-09-05T14:52:34+0200 [Router 15969] Site starting on 20408
2017-09-05T14:52:34+0200 [Controller 15960] Router 'worker-001': transport 'transport-001' started
2017-09-05T14:52:34+0200 [Controller 15960] Could not startup node: Traceback (most recent call last):
File "/home/phoenix/.virtualenvs/labgrid/lib/python3.6/site-packages/twisted/internet/defer.py", line 653, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/home/phoenix/.virtualenvs/labgrid/lib/python3.6/site-packages/twisted/internet/defer.py", line 1442, in gotResult
_inlineCallbacks(r, g, deferred)
File "/home/phoenix/.virtualenvs/labgrid/lib/python3.6/site-packages/twisted/internet/defer.py", line 1384, in _inlineCallbacks
result = result.throwExceptionIntoGenerator(g)
File "/home/phoenix/.virtualenvs/labgrid/lib/python3.6/site-packages/twisted/python/failure.py", line 393, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
--- <exception caught here> ---
File "/home/phoenix/.virtualenvs/labgrid/lib/python3.6/site-packages/crossbar/controller/node.py", line 597, in start
yield self._startup()
File "/home/phoenix/.virtualenvs/labgrid/lib/python3.6/site-packages/twisted/internet/defer.py", line 1386, in _inlineCallbacks
result = g.send(result)
File "/home/phoenix/.virtualenvs/labgrid/lib/python3.6/site-packages/crossbar/controller/node.py", line 656, in _configure_node_from_config
assert worker_type in self._native_workers
builtins.AssertionError:
|
builtins.AssertionError
|
def stop_component(self, component_id, details=None):
"""
Stop a component currently running within this container.
:param component_id: The ID of the component to stop.
:type component_id: int
:param details: Caller details.
:type details: instance of :class:`autobahn.wamp.types.CallDetails`
:returns: Stop information.
:rtype: dict
"""
self.log.debug(
"{klass}.stop_component({component_id}, {details})",
klass=self.__class__.__name__,
component_id=component_id,
details=details,
)
if component_id not in self.components:
raise ApplicationError(
"crossbar.error.no_such_object",
"no component with ID {} running in this container".format(component_id),
)
component = self.components[component_id]
try:
component.proto.close()
except:
self.log.failure(
"failed to close protocol on component '{component_id}': {log_failure}",
component_id=component_id,
)
raise
else:
# essentially just waiting for "on_component_stop"
yield component._stopped
stopped = {
"component_id": component_id,
"uptime": (datetime.utcnow() - component.started).total_seconds(),
"caller": {
"session": details.caller if details else None,
"authid": details.caller_authid if details else None,
"authrole": details.caller_authrole if details else None,
},
}
# the component.proto above normally already cleaned it up
if component_id in self.components:
del self.components[component_id]
# FIXME: this is getting autobahn.wamp.exception.TransportLost
if False:
self.publish(
"{}.on_component_stopped".format(self._uri_prefix),
stopped,
options=PublishOptions(exclude=details.caller),
)
returnValue(stopped)
|
def stop_component(self, component_id, details=None):
"""
Stop a component currently running within this container.
:param component_id: The ID of the component to stop.
:type component_id: int
:param details: Caller details.
:type details: instance of :class:`autobahn.wamp.types.CallDetails`
:returns: Stop information.
:rtype: dict
"""
self.log.debug(
"{klass}.stop_component({component_id}, {details})",
klass=self.__class__.__name__,
component_id=component_id,
details=details,
)
if component_id not in self.components:
raise ApplicationError(
"crossbar.error.no_such_object",
"no component with ID {} running in this container".format(component_id),
)
component = self.components[component_id]
try:
component.proto.close()
except:
self.log.failure(
"failed to close protocol on component '{component_id}': {log_failure}",
component_id=component_id,
)
raise
else:
# essentially just waiting for "on_component_stop"
yield component._stopped
stopped = {
"component_id": component_id,
"uptime": (datetime.utcnow() - component.started).total_seconds(),
"caller": {
"session": details.caller,
"authid": details.caller_authid,
"authrole": details.caller_authrole,
},
}
# the component.proto above normally already cleaned it up
if component_id in self.components:
del self.components[component_id]
# FIXME: this is getting autobahn.wamp.exception.TransportLost
if False:
self.publish(
"{}.on_component_stopped".format(self._uri_prefix),
stopped,
options=PublishOptions(exclude=details.caller),
)
returnValue(stopped)
|
https://github.com/crossbario/crossbar/issues/1179
|
2017-09-05T14:52:34+0200 [Controller 15960] Starting 2 workers ...
2017-09-05T14:52:34+0200 [Controller 15960] Router worker "worker-001" starting ..
2017-09-05T14:52:34+0200 [Router 15969] Started Router worker "worker-001" [crossbar.worker.router.RouterWorkerSession / CPython-EPollReactor]
2017-09-05T14:52:34+0200 [Router 15969] Router worker "worker-001" session 3303279962187294 initializing ..
2017-09-05T14:52:34+0200 [Router 15969] Registered 35 procedures
2017-09-05T14:52:34+0200 [Router 15969] Router worker "worker-001" session ready
2017-09-05T14:52:34+0200 [Controller 15960] Router worker "worker-001" process 15969 started
2017-09-05T14:52:34+0200 [Router 15969] RouterServiceSession ready [configured on_ready fired]
2017-09-05T14:52:34+0200 [Router 15969] Realm 'realm1' started
2017-09-05T14:52:34+0200 [Controller 15960] Router 'worker-001': realm 'realm-001' (named 'realm1') started
2017-09-05T14:52:34+0200 [Router 15969] role role-001 on realm realm-001 started
2017-09-05T14:52:34+0200 [Controller 15960] Router 'worker-001': role 'role-001' (named 'authenticator') started on realm 'realm-001'
2017-09-05T14:52:34+0200 [Router 15969] role role-002 on realm realm-001 started
2017-09-05T14:52:34+0200 [Controller 15960] Router 'worker-001': role 'role-002' (named 'public') started on realm 'realm-001'
2017-09-05T14:52:34+0200 [Router 15969] started component: labgrid.remote.authenticator.AuthenticatorSession id=1175882440106437
2017-09-05T14:52:34+0200 [Controller 15960] Router 'worker-001': component 'component-001' started
2017-09-05T14:52:34+0200 [Router 15969] Site starting on 20408
2017-09-05T14:52:34+0200 [Controller 15960] Router 'worker-001': transport 'transport-001' started
2017-09-05T14:52:34+0200 [Controller 15960] Could not startup node: Traceback (most recent call last):
File "/home/phoenix/.virtualenvs/labgrid/lib/python3.6/site-packages/twisted/internet/defer.py", line 653, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/home/phoenix/.virtualenvs/labgrid/lib/python3.6/site-packages/twisted/internet/defer.py", line 1442, in gotResult
_inlineCallbacks(r, g, deferred)
File "/home/phoenix/.virtualenvs/labgrid/lib/python3.6/site-packages/twisted/internet/defer.py", line 1384, in _inlineCallbacks
result = result.throwExceptionIntoGenerator(g)
File "/home/phoenix/.virtualenvs/labgrid/lib/python3.6/site-packages/twisted/python/failure.py", line 393, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
--- <exception caught here> ---
File "/home/phoenix/.virtualenvs/labgrid/lib/python3.6/site-packages/crossbar/controller/node.py", line 597, in start
yield self._startup()
File "/home/phoenix/.virtualenvs/labgrid/lib/python3.6/site-packages/twisted/internet/defer.py", line 1386, in _inlineCallbacks
result = g.send(result)
File "/home/phoenix/.virtualenvs/labgrid/lib/python3.6/site-packages/crossbar/controller/node.py", line 656, in _configure_node_from_config
assert worker_type in self._native_workers
builtins.AssertionError:
|
builtins.AssertionError
|
def authorize(self, session, uri, action, options):
"""
Authorize a session connected under this role to perform the given
action on the given URI.
:param session: The WAMP session that requests the action.
:type session: Instance of :class:`autobahn.wamp.protocol.ApplicationSession`
:param uri: The URI on which to perform the action.
:type uri: str
:param action: The action to be performed.
:type action: str
:return: bool -- Flag indicating whether session is authorized or not.
"""
session_details = getattr(session, "_session_details", None)
if session_details is None:
# this happens for "embedded" sessions -- perhaps we
# should have a better way to detect this -- also
# session._transport should be a RouterApplicationSession
details = {
"session": session._session_id,
"authid": session._authid,
"authrole": session._authrole,
"authmethod": session._authmethod,
"authprovider": session._authprovider,
"authextra": session._authextra,
"transport": {
"type": "stdio", # or maybe "embedded"?
},
}
else:
details = {
"session": session_details.session,
"authid": session_details.authid,
"authrole": session_details.authrole,
"authmethod": session_details.authmethod,
"authprovider": session_details.authprovider,
"authextra": session_details.authextra,
"transport": session._transport._transport_info,
}
self.log.debug(
"CrossbarRouterRoleDynamicAuth.authorize {uri} {action} {details}",
uri=uri,
action=action,
details=details,
)
d = self._session.call(self._authorizer, details, uri, action, options)
# we could do backwards-compatibility for clients that didn't
# yet add the 5th "options" argument to their authorizers like
# so:
def maybe_call_old_way(result):
if isinstance(result, Failure):
if isinstance(result.value, ApplicationError):
if "takes exactly 4 arguments" in str(result.value):
self.log.warn(
"legacy authorizer '{auth}'; should take 5 arguments. Calling with 4.",
auth=self._authorizer,
)
return self._session.call(
self._authorizer, session_details, uri, action
)
return result
d.addBoth(maybe_call_old_way)
def sanity_check(authorization):
"""
Ensure the return-value we got from the user-supplied method makes sense
"""
if isinstance(authorization, dict):
for key in authorization.keys():
if key not in ["allow", "cache", "disclose"]:
return Failure(
ValueError(
"Authorizer returned unknown key '{key}'".format(
key=key,
)
)
)
# must have "allow"
if "allow" not in authorization:
return Failure(
ValueError("Authorizer must have 'allow' in returned dict")
)
# all values must be bools
for key, value in authorization.items():
if not isinstance(value, bool):
return Failure(
ValueError("Authorizer must have bool for '{}'".format(key))
)
return authorization
elif isinstance(authorization, bool):
return authorization
return Failure(
ValueError(
"Authorizer returned unknown type '{name}'".format(
name=type(authorization).__name__,
)
)
)
d.addCallback(sanity_check)
return d
|
def authorize(self, session, uri, action, options):
"""
Authorize a session connected under this role to perform the given
action on the given URI.
:param session: The WAMP session that requests the action.
:type session: Instance of :class:`autobahn.wamp.protocol.ApplicationSession`
:param uri: The URI on which to perform the action.
:type uri: str
:param action: The action to be performed.
:type action: str
:return: bool -- Flag indicating whether session is authorized or not.
"""
session_details = getattr(session, "_session_details", None)
if session_details is None:
# this happens for "embedded" sessions -- perhaps we
# should have a better way to detect this -- also
# session._transport should be a RouterApplicationSession
session_details = {
"session": session._session_id,
"authid": session._authid,
"authrole": session._authrole,
"authmethod": session._authmethod,
"authprovider": session._authprovider,
"authextra": session._authextra,
"transport": {
"type": "stdio", # or maybe "embedded"?
},
}
self.log.debug(
"CrossbarRouterRoleDynamicAuth.authorize {uri} {action} {details}",
uri=uri,
action=action,
details=session_details,
)
d = self._session.call(self._authorizer, session_details, uri, action, options)
# we could do backwards-compatibility for clients that didn't
# yet add the 5th "options" argument to their authorizers like
# so:
def maybe_call_old_way(result):
if isinstance(result, Failure):
if isinstance(result.value, ApplicationError):
if "takes exactly 4 arguments" in str(result.value):
self.log.warn(
"legacy authorizer '{auth}'; should take 5 arguments. Calling with 4.",
auth=self._authorizer,
)
return self._session.call(
self._authorizer, session_details, uri, action
)
return result
d.addBoth(maybe_call_old_way)
def sanity_check(authorization):
"""
Ensure the return-value we got from the user-supplied method makes sense
"""
if isinstance(authorization, dict):
for key in authorization.keys():
if key not in ["allow", "cache", "disclose"]:
return Failure(
ValueError(
"Authorizer returned unknown key '{key}'".format(
key=key,
)
)
)
# must have "allow"
if "allow" not in authorization:
return Failure(
ValueError("Authorizer must have 'allow' in returned dict")
)
# all values must be bools
for key, value in authorization.items():
if not isinstance(value, bool):
return Failure(
ValueError("Authorizer must have bool for '{}'".format(key))
)
return authorization
elif isinstance(authorization, bool):
return authorization
return Failure(
ValueError(
"Authorizer returned unknown type '{name}'".format(
name=type(authorization).__name__,
)
)
)
d.addCallback(sanity_check)
return d
|
https://github.com/crossbario/crossbar/issues/1437
|
2018-11-11T20:19:23+0100 [Router 9015] Unhandled error in Deferred:
2018-11-11T20:19:23+0100 [Router 9015]
Traceback (most recent call last):
File ".../lib/python3.7/site-packages/crossbar/router/router.py", line 278, in process
self._dealer.processCall(session, msg)
File ".../lib/python3.7/site-packages/crossbar/router/dealer.py", line 742, in processCall
txaio.add_callbacks(d, on_authorize_success, on_authorize_error)
File ".../lib/python3.7/site-packages/txaio/tx.py", line 506, in add_callbacks
future.addCallbacks(callback, errback)
File ".../lib/python3.7/site-packages/twisted/internet/defer.py", line 311, in addCallbacks
self._runCallbacks()
--- <exception caught here> ---
File ".../lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File ".../lib/python3.7/site-packages/crossbar/router/dealer.py", line 721, in on_authorize_success
self._call(session, call, registration, authorization)
File ".../lib/python3.7/site-packages/crossbar/router/dealer.py", line 923, in _call
self._router.send(callee, invocation)
File ".../lib/python3.7/site-packages/crossbar/router/router.py", line 240, in send
session._transport.send(msg)
File ".../lib/python3.7/site-packages/autobahn/twisted/rawsocket.py", line 140, in send
raise SerializationError("WampRawSocketProtocol: unable to serialize WAMP application payload ({0})".format(e))
autobahn.wamp.exception.SerializationError: WampRawSocketProtocol: unable to serialize WAMP application payload (cannot serialize unknown object: <autobahn.wamp.types.SessionDetails object at 0x7f2e5fe018b8>)
|
autobahn.wamp.exception.SerializationError
|
def start_router_transport(self, id, config, details=None):
"""
Start a transport on this router worker.
:param id: The ID of the transport to start.
:type id: str
:param config: The transport configuration.
:type config: dict
"""
self.log.debug("{name}.start_router_transport", name=self.__class__.__name__)
# prohibit starting a transport twice
#
if id in self.transports:
emsg = "Could not start transport: a transport with ID '{}' is already running (or starting)".format(
id
)
self.log.error(emsg)
raise ApplicationError("crossbar.error.already_running", emsg)
# check configuration
#
try:
checkconfig.check_router_transport(config)
except Exception as e:
emsg = "Invalid router transport configuration: {}".format(e)
self.log.error(emsg)
raise ApplicationError("crossbar.error.invalid_configuration", emsg)
else:
self.log.debug("Starting {ttype}-transport on router.", ttype=config["type"])
# standalone WAMP-RawSocket transport
#
if config["type"] == "rawsocket":
transport_factory = WampRawSocketServerFactory(
self._router_session_factory, config
)
transport_factory.noisy = False
# standalone WAMP-WebSocket transport
#
elif config["type"] == "websocket":
transport_factory = WampWebSocketServerFactory(
self._router_session_factory,
self.config.extra.cbdir,
config,
self._templates,
)
transport_factory.noisy = False
# Flash-policy file server pseudo transport
#
elif config["type"] == "flashpolicy":
transport_factory = FlashPolicyFactory(
config.get("allowed_domain", None), config.get("allowed_ports", None)
)
# WebSocket testee pseudo transport
#
elif config["type"] == "websocket.testee":
transport_factory = WebSocketTesteeServerFactory(config, self._templates)
# Stream testee pseudo transport
#
elif config["type"] == "stream.testee":
transport_factory = StreamTesteeServerFactory()
# MQTT legacy adapter transport
#
elif config["type"] == "mqtt":
transport_factory = WampMQTTServerFactory(
self._router_session_factory, config, self._reactor
)
transport_factory.noisy = False
# Twisted Web based transport
#
elif config["type"] == "web":
transport_factory = self._create_web_factory(
config,
is_secure="tls" in config["endpoint"],
)
# Universal transport
#
elif config["type"] == "universal":
if "web" in config:
web_factory = self._create_web_factory(
config["web"],
is_secure=("tls" in config["endpoint"]),
)
else:
web_factory = None
if "rawsocket" in config:
rawsocket_factory = WampRawSocketServerFactory(
self._router_session_factory, config["rawsocket"]
)
rawsocket_factory.noisy = False
else:
rawsocket_factory = None
if "mqtt" in config:
mqtt_factory = WampMQTTServerFactory(
self._router_session_factory, config["mqtt"], self._reactor
)
mqtt_factory.noisy = False
else:
mqtt_factory = None
if "websocket" in config:
websocket_factory_map = {}
for websocket_url_first_component, websocket_config in config[
"websocket"
].items():
websocket_transport_factory = WampWebSocketServerFactory(
self._router_session_factory,
self.config.extra.cbdir,
websocket_config,
self._templates,
)
websocket_transport_factory.noisy = False
websocket_factory_map[websocket_url_first_component] = (
websocket_transport_factory
)
self.log.debug(
"hooked up websocket factory on request URI {request_uri}",
request_uri=websocket_url_first_component,
)
else:
websocket_factory_map = None
transport_factory = UniSocketServerFactory(
web_factory, websocket_factory_map, rawsocket_factory, mqtt_factory
)
# Unknown transport type
#
else:
# should not arrive here, since we did check_transport() in the beginning
raise Exception("logic error")
# create transport endpoint / listening port from transport factory
#
d = create_listening_port_from_config(
config["endpoint"],
self.config.extra.cbdir,
transport_factory,
self._reactor,
self.log,
)
def ok(port):
self.transports[id] = RouterTransport(id, config, transport_factory, port)
self.log.debug("Router transport '{id}'' started and listening", id=id)
return
def fail(err):
emsg = "Cannot listen on transport endpoint: {log_failure}"
self.log.error(emsg, log_failure=err)
raise ApplicationError("crossbar.error.cannot_listen", emsg)
d.addCallbacks(ok, fail)
return d
|
def start_router_transport(self, id, config, details=None):
"""
Start a transport on this router worker.
:param id: The ID of the transport to start.
:type id: str
:param config: The transport configuration.
:type config: dict
"""
self.log.debug("{name}.start_router_transport", name=self.__class__.__name__)
# prohibit starting a transport twice
#
if id in self.transports:
emsg = "Could not start transport: a transport with ID '{}' is already running (or starting)".format(
id
)
self.log.error(emsg)
raise ApplicationError("crossbar.error.already_running", emsg)
# check configuration
#
try:
checkconfig.check_router_transport(config)
except Exception as e:
emsg = "Invalid router transport configuration: {}".format(e)
self.log.error(emsg)
raise ApplicationError("crossbar.error.invalid_configuration", emsg)
else:
self.log.debug("Starting {ttype}-transport on router.", ttype=config["type"])
# standalone WAMP-RawSocket transport
#
if config["type"] == "rawsocket":
transport_factory = WampRawSocketServerFactory(
self._router_session_factory, config
)
transport_factory.noisy = False
# standalone WAMP-WebSocket transport
#
elif config["type"] == "websocket":
transport_factory = WampWebSocketServerFactory(
self._router_session_factory,
self.config.extra.cbdir,
config,
self._templates,
)
transport_factory.noisy = False
# Flash-policy file server pseudo transport
#
elif config["type"] == "flashpolicy":
transport_factory = FlashPolicyFactory(
config.get("allowed_domain", None), config.get("allowed_ports", None)
)
# WebSocket testee pseudo transport
#
elif config["type"] == "websocket.testee":
transport_factory = WebSocketTesteeServerFactory(config, self._templates)
# Stream testee pseudo transport
#
elif config["type"] == "stream.testee":
transport_factory = StreamTesteeServerFactory()
# MQTT legacy adapter transport
#
elif config["type"] == "mqtt":
transport_factory = WampMQTTServerFactory(
self._router_session_factory, config, self._reactor
)
transport_factory.noisy = False
# Twisted Web based transport
#
elif config["type"] == "web":
transport_factory = self._create_web_factory(config)
# Universal transport
#
elif config["type"] == "universal":
if "web" in config:
web_factory = self._create_web_factory(config["web"])
else:
web_factory = None
if "rawsocket" in config:
rawsocket_factory = WampRawSocketServerFactory(
self._router_session_factory, config["rawsocket"]
)
rawsocket_factory.noisy = False
else:
rawsocket_factory = None
if "mqtt" in config:
mqtt_factory = WampMQTTServerFactory(
self._router_session_factory, config["mqtt"], self._reactor
)
mqtt_factory.noisy = False
else:
mqtt_factory = None
if "websocket" in config:
websocket_factory_map = {}
for websocket_url_first_component, websocket_config in config[
"websocket"
].items():
websocket_transport_factory = WampWebSocketServerFactory(
self._router_session_factory,
self.config.extra.cbdir,
websocket_config,
self._templates,
)
websocket_transport_factory.noisy = False
websocket_factory_map[websocket_url_first_component] = (
websocket_transport_factory
)
self.log.debug(
"hooked up websocket factory on request URI {request_uri}",
request_uri=websocket_url_first_component,
)
else:
websocket_factory_map = None
transport_factory = UniSocketServerFactory(
web_factory, websocket_factory_map, rawsocket_factory, mqtt_factory
)
# Unknown transport type
#
else:
# should not arrive here, since we did check_transport() in the beginning
raise Exception("logic error")
# create transport endpoint / listening port from transport factory
#
d = create_listening_port_from_config(
config["endpoint"],
self.config.extra.cbdir,
transport_factory,
self._reactor,
self.log,
)
def ok(port):
self.transports[id] = RouterTransport(id, config, transport_factory, port)
self.log.debug("Router transport '{id}'' started and listening", id=id)
return
def fail(err):
emsg = "Cannot listen on transport endpoint: {log_failure}"
self.log.error(emsg, log_failure=err)
raise ApplicationError("crossbar.error.cannot_listen", emsg)
d.addCallbacks(ok, fail)
return d
|
https://github.com/crossbario/crossbar/issues/1080
|
2017-05-15T17:29:32+0000 [Router 1609] KeyError: 'endpoint': Traceback (most recent call last):
File "/home/ubuntu/crossbar-pypy2-1/site-packages/autobahn/twisted/websocket.py", line 162, in _onMessage
self.onMessage(payload, isBinary)
File "/home/ubuntu/crossbar-pypy2-1/site-packages/autobahn/wamp/websocket.py", line 95, in onMessage
self._session.onMessage(msg)
File "/home/ubuntu/crossbar-pypy2-1/site-packages/autobahn/wamp/protocol.py", line 892, in onMessage
on_reply = txaio.as_future(endpoint.fn, *invoke_args, **invoke_kwargs)
File "/home/ubuntu/crossbar-pypy2-1/site-packages/txaio/tx.py", line 417, in as_future
return maybeDeferred(fun, *args, **kwargs)
--- <exception caught here> ---
File "/home/ubuntu/crossbar-pypy2-1/site-packages/twisted/internet/defer.py", line 150, in maybeDeferred
result = f(*args, **kw)
File "/home/ubuntu/crossbar-pypy2-1/site-packages/crossbar/worker/router.py", line 869, in start_router_transport
web_factory = self._create_web_factory(config['web'])
File "/home/ubuntu/crossbar-pypy2-1/site-packages/crossbar/worker/router.py", line 962, in _create_web_factory
if 'tls' in config['endpoint']:
exceptions.KeyError: 'endpoint'
|
exceptions.KeyError
|
def _create_web_factory(self, config, is_secure):
options = config.get("options", {})
# create Twisted Web root resource
if "/" in config["paths"]:
root_config = config["paths"]["/"]
root = self._create_resource(root_config, nested=False)
else:
root = Resource404(self._templates, b"")
# create Twisted Web resources on all non-root paths configured
self._add_paths(root, config.get("paths", {}))
# create the actual transport factory
transport_factory = Site(
root,
timeout=options.get("client_timeout", None),
)
transport_factory.noisy = False
# we override this factory so that we can inject
# _LessNoisyHTTPChannel to avoid info-level logging on timing
# out web clients (which happens all the time).
def channel_protocol_factory():
return _GenericHTTPChannelProtocol(_LessNoisyHTTPChannel())
transport_factory.protocol = channel_protocol_factory
# Web access logging
if not options.get("access_log", False):
transport_factory.log = lambda _: None
# Traceback rendering
transport_factory.displayTracebacks = options.get("display_tracebacks", False)
# HSTS
if options.get("hsts", False):
if is_secure:
hsts_max_age = int(options.get("hsts_max_age", 31536000))
transport_factory.requestFactory = createHSTSRequestFactory(
transport_factory.requestFactory, hsts_max_age
)
else:
self.log.warn(
"Warning: HSTS requested, but running on non-TLS - skipping HSTS"
)
return transport_factory
|
def _create_web_factory(self, config):
options = config.get("options", {})
# create Twisted Web root resource
if "/" in config["paths"]:
root_config = config["paths"]["/"]
root = self._create_resource(root_config, nested=False)
else:
root = Resource404(self._templates, b"")
# create Twisted Web resources on all non-root paths configured
self._add_paths(root, config.get("paths", {}))
# create the actual transport factory
transport_factory = Site(
root,
timeout=options.get("client_timeout", None),
)
transport_factory.noisy = False
# we override this factory so that we can inject
# _LessNoisyHTTPChannel to avoid info-level logging on timing
# out web clients (which happens all the time).
def channel_protocol_factory():
return _GenericHTTPChannelProtocol(_LessNoisyHTTPChannel())
transport_factory.protocol = channel_protocol_factory
# Web access logging
if not options.get("access_log", False):
transport_factory.log = lambda _: None
# Traceback rendering
transport_factory.displayTracebacks = options.get("display_tracebacks", False)
# HSTS
if options.get("hsts", False):
if "tls" in config["endpoint"]:
hsts_max_age = int(options.get("hsts_max_age", 31536000))
transport_factory.requestFactory = createHSTSRequestFactory(
transport_factory.requestFactory, hsts_max_age
)
else:
self.log.warn(
"Warning: HSTS requested, but running on non-TLS - skipping HSTS"
)
return transport_factory
|
https://github.com/crossbario/crossbar/issues/1080
|
2017-05-15T17:29:32+0000 [Router 1609] KeyError: 'endpoint': Traceback (most recent call last):
File "/home/ubuntu/crossbar-pypy2-1/site-packages/autobahn/twisted/websocket.py", line 162, in _onMessage
self.onMessage(payload, isBinary)
File "/home/ubuntu/crossbar-pypy2-1/site-packages/autobahn/wamp/websocket.py", line 95, in onMessage
self._session.onMessage(msg)
File "/home/ubuntu/crossbar-pypy2-1/site-packages/autobahn/wamp/protocol.py", line 892, in onMessage
on_reply = txaio.as_future(endpoint.fn, *invoke_args, **invoke_kwargs)
File "/home/ubuntu/crossbar-pypy2-1/site-packages/txaio/tx.py", line 417, in as_future
return maybeDeferred(fun, *args, **kwargs)
--- <exception caught here> ---
File "/home/ubuntu/crossbar-pypy2-1/site-packages/twisted/internet/defer.py", line 150, in maybeDeferred
result = f(*args, **kw)
File "/home/ubuntu/crossbar-pypy2-1/site-packages/crossbar/worker/router.py", line 869, in start_router_transport
web_factory = self._create_web_factory(config['web'])
File "/home/ubuntu/crossbar-pypy2-1/site-packages/crossbar/worker/router.py", line 962, in _create_web_factory
if 'tls' in config['endpoint']:
exceptions.KeyError: 'endpoint'
|
exceptions.KeyError
|
def detach(self, session):
"""
Implements :func:`crossbar.router.interfaces.IDealer.detach`
"""
if session in self._session_to_registrations:
# send out Errors for any in-flight calls we have
outstanding = self._callee_to_invocations.get(session, [])
for invoke in outstanding:
self.log.debug(
"Cancelling in-flight INVOKE with id={request} on session {session}",
request=invoke.call.request,
session=session._session_id,
)
reply = message.Error(
message.Call.MESSAGE_TYPE,
invoke.call.request,
ApplicationError.CANCELED,
["callee disconnected from in-flight request"],
)
# send this directly to the caller's session
# (it is possible the caller was disconnected and thus
# _transport is None before we get here though)
if invoke.caller._transport:
invoke.caller._transport.send(reply)
for registration in self._session_to_registrations[session]:
was_registered, was_last_callee = self._registration_map.drop_observer(
session, registration
)
if was_registered and was_last_callee:
self._registration_map.delete_observation(registration)
# publish WAMP meta events
#
if self._router._realm:
service_session = self._router._realm.session
if service_session and not registration.uri.startswith("wamp."):
if was_registered:
service_session.publish(
"wamp.registration.on_unregister",
session._session_id,
registration.id,
)
if was_last_callee:
service_session.publish(
"wamp.registration.on_delete",
session._session_id,
registration.id,
)
del self._session_to_registrations[session]
else:
raise Exception("session with ID {} not attached".format(session._session_id))
|
def detach(self, session):
"""
Implements :func:`crossbar.router.interfaces.IDealer.detach`
"""
if session in self._session_to_registrations:
# send out Errors for any in-flight calls we have
outstanding = self._callee_to_invocations.get(session, [])
for invoke in outstanding:
self.log.debug(
"Cancelling in-flight INVOKE with id={request} on session {session}",
request=invoke.call.request,
session=session._session_id,
)
reply = message.Error(
message.Call.MESSAGE_TYPE,
invoke.call.request,
ApplicationError.CANCELED,
["callee disconnected from in-flight request"],
)
# send this directly to the caller's session
invoke.caller._transport.send(reply)
for registration in self._session_to_registrations[session]:
was_registered, was_last_callee = self._registration_map.drop_observer(
session, registration
)
if was_registered and was_last_callee:
self._registration_map.delete_observation(registration)
# publish WAMP meta events
#
if self._router._realm:
service_session = self._router._realm.session
if service_session and not registration.uri.startswith("wamp."):
if was_registered:
service_session.publish(
"wamp.registration.on_unregister",
session._session_id,
registration.id,
)
if was_last_callee:
service_session.publish(
"wamp.registration.on_delete",
session._session_id,
registration.id,
)
del self._session_to_registrations[session]
else:
raise Exception("session with ID {} not attached".format(session._session_id))
|
https://github.com/crossbario/crossbar/issues/980
|
3/2/2017 8:26:29 PM2017-03-02T20:26:29+0000 [Router 13] Unhandled error in Deferred:
3/2/2017 8:26:29 PM2017-03-02T20:26:29+0000 [Router 13]
3/2/2017 8:26:29 PMTraceback (most recent call last):
3/2/2017 8:26:29 PM File "/usr/local/lib/python3.6/site-packages/crossbar/router/router.py", line 192, in process
3/2/2017 8:26:29 PM self._dealer.processCall(session, msg)
3/2/2017 8:26:29 PM File "/usr/local/lib/python3.6/site-packages/crossbar/router/dealer.py", line 464, in processCall
3/2/2017 8:26:29 PM txaio.add_callbacks(d, on_authorize_success, on_authorize_error)
3/2/2017 8:26:29 PM File "/usr/local/lib/python3.6/site-packages/txaio/tx.py", line 490, in add_callbacks
3/2/2017 8:26:29 PM future.addCallbacks(callback, errback)
3/2/2017 8:26:29 PM File "/usr/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 310, in addCallbacks
3/2/2017 8:26:29 PM self._runCallbacks()
3/2/2017 8:26:29 PM--- <exception caught here> ---
3/2/2017 8:26:29 PM File "/usr/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 653, in _runCallbacks
3/2/2017 8:26:29 PM current.result = callback(current.result, *args, **kw)
3/2/2017 8:26:29 PM File "/usr/local/lib/python3.6/site-packages/crossbar/router/dealer.py", line 447, in on_authorize_success
3/2/2017 8:26:29 PM self._call(session, call, registration, authorization)
3/2/2017 8:26:29 PM File "/usr/local/lib/python3.6/site-packages/crossbar/router/dealer.py", line 627, in _call
3/2/2017 8:26:29 PM self._router.send(callee, invocation)
3/2/2017 8:26:29 PM File "/usr/local/lib/python3.6/site-packages/crossbar/router/router.py", line 163, in send
3/2/2017 8:26:29 PM session._transport.send(msg)
3/2/2017 8:26:29 PMbuiltins.AttributeError: 'NoneType' object has no attribute 'send'
|
PMbuiltins.AttributeError
|
def onClose(self, wasClean):
"""
Implements :func:`autobahn.wamp.interfaces.ITransportHandler.onClose`
"""
self._transport = None
if self._session_id:
# fire callback and close the transport
try:
self.onLeave(types.CloseDetails())
except Exception:
self.log.failure("Exception raised in onLeave callback")
try:
self._router.detach(self)
except Exception as e:
self.log.error(
"Failed to detach session '{}': {}".format(self._session_id, e)
)
self.log.debug("{tb}".format(tb=Failure().getTraceback()))
self._session_id = None
self._pending_session_id = None
self._authid = None
self._authrole = None
self._authmethod = None
self._authprovider = None
|
def onClose(self, wasClean):
"""
Implements :func:`autobahn.wamp.interfaces.ITransportHandler.onClose`
"""
self._transport = None
if self._session_id:
# fire callback and close the transport
try:
self.onLeave(types.CloseDetails())
except Exception:
self.log.failure("Exception raised in onLeave callback")
try:
self._router.detach(self)
except Exception:
pass
self._session_id = None
self._pending_session_id = None
self._authid = None
self._authrole = None
self._authmethod = None
self._authprovider = None
|
https://github.com/crossbario/crossbar/issues/980
|
3/2/2017 8:26:29 PM2017-03-02T20:26:29+0000 [Router 13] Unhandled error in Deferred:
3/2/2017 8:26:29 PM2017-03-02T20:26:29+0000 [Router 13]
3/2/2017 8:26:29 PMTraceback (most recent call last):
3/2/2017 8:26:29 PM File "/usr/local/lib/python3.6/site-packages/crossbar/router/router.py", line 192, in process
3/2/2017 8:26:29 PM self._dealer.processCall(session, msg)
3/2/2017 8:26:29 PM File "/usr/local/lib/python3.6/site-packages/crossbar/router/dealer.py", line 464, in processCall
3/2/2017 8:26:29 PM txaio.add_callbacks(d, on_authorize_success, on_authorize_error)
3/2/2017 8:26:29 PM File "/usr/local/lib/python3.6/site-packages/txaio/tx.py", line 490, in add_callbacks
3/2/2017 8:26:29 PM future.addCallbacks(callback, errback)
3/2/2017 8:26:29 PM File "/usr/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 310, in addCallbacks
3/2/2017 8:26:29 PM self._runCallbacks()
3/2/2017 8:26:29 PM--- <exception caught here> ---
3/2/2017 8:26:29 PM File "/usr/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 653, in _runCallbacks
3/2/2017 8:26:29 PM current.result = callback(current.result, *args, **kw)
3/2/2017 8:26:29 PM File "/usr/local/lib/python3.6/site-packages/crossbar/router/dealer.py", line 447, in on_authorize_success
3/2/2017 8:26:29 PM self._call(session, call, registration, authorization)
3/2/2017 8:26:29 PM File "/usr/local/lib/python3.6/site-packages/crossbar/router/dealer.py", line 627, in _call
3/2/2017 8:26:29 PM self._router.send(callee, invocation)
3/2/2017 8:26:29 PM File "/usr/local/lib/python3.6/site-packages/crossbar/router/router.py", line 163, in send
3/2/2017 8:26:29 PM session._transport.send(msg)
3/2/2017 8:26:29 PMbuiltins.AttributeError: 'NoneType' object has no attribute 'send'
|
PMbuiltins.AttributeError
|
def make_stderr_observer(
levels=(LogLevel.warn, LogLevel.error, LogLevel.critical),
show_source=False,
format="standard",
colour=False,
_file=None,
_categories=None,
):
"""
Create an observer which prints logs to L{sys.stderr}.
"""
if _file is None:
_file = sys.__stderr__
if _categories is None:
from crossbar._log_categories import log_categories as _categories
@provider(ILogObserver)
def StandardErrorObserver(event):
if event["log_level"] not in levels:
return
if event.get("log_system", "-") == "-":
logSystem = "{:<10} {:>6}".format("Controller", os.getpid())
else:
logSystem = event["log_system"]
if show_source and event.get("log_namespace") is not None:
logSystem += " " + event.get("cb_namespace", event.get("log_namespace", ""))
if event.get("log_category"):
format_string = _categories.get(event["log_category"])
if format_string:
event = event.copy()
event["log_format"] = format_string
if event.get("log_format", None) is not None:
eventText = formatEvent(event)
else:
eventText = ""
if "log_failure" in event:
# This is a traceback. Print it.
eventText = eventText + event["log_failure"].getTraceback()
if format == "standard":
FORMAT_STRING = STANDARD_FORMAT
elif format == "syslogd":
FORMAT_STRING = SYSLOGD_FORMAT
elif format == "none":
FORMAT_STRING = NONE_FORMAT
else:
assert False
if colour:
# Errors are always red.
fore = Fore.RED
eventString = FORMAT_STRING.format(
startcolour=fore,
time=formatTime(event["log_time"]),
system=logSystem,
endcolour=Fore.RESET,
text=eventText,
)
else:
eventString = strip_ansi(
FORMAT_STRING.format(
startcolour="",
time=formatTime(event["log_time"]),
system=logSystem,
endcolour="",
text=eventText,
)
)
print(eventString, file=_file)
return StandardErrorObserver
|
def make_stderr_observer(
levels=(LogLevel.warn, LogLevel.error, LogLevel.critical),
show_source=False,
format="standard",
colour=False,
_file=None,
_categories=None,
):
"""
Create an observer which prints logs to L{sys.stderr}.
"""
if _file is None:
_file = sys.__stderr__
if _categories is None:
from crossbar._log_categories import log_categories as _categories
@provider(ILogObserver)
def StandardErrorObserver(event):
if event["log_level"] not in levels:
return
if event.get("log_system", "-") == "-":
logSystem = "{:<10} {:>6}".format("Controller", os.getpid())
else:
logSystem = event["log_system"]
if show_source and event.get("log_namespace") is not None:
logSystem += " " + event.get("cb_namespace", event.get("log_namespace", ""))
if event.get("log_category"):
format_string = _categories.get(event["log_category"])
if format_string:
event = event.copy()
event["log_format"] = format_string
if event.get("log_format", None) is not None:
eventText = formatEvent(event)
else:
eventText = ""
if "log_failure" in event:
# This is a traceback. Print it.
eventText = eventText + event["log_failure"].getTraceback()
if format == "standard":
FORMAT_STRING = STANDARD_FORMAT
elif format == "syslogd":
FORMAT_STRING = SYSLOGD_FORMAT
elif format == "none":
FORMAT_STRING = NONE_FORMAT
else:
assert False
if colour:
# Errors are always red.
fore = Fore.RED
eventString = FORMAT_STRING.format(
startcolour=fore,
time=formatTime(event["log_time"]),
system=logSystem,
endcolour=Fore.RESET,
text=formatEvent(event),
)
else:
eventString = strip_ansi(
FORMAT_STRING.format(
startcolour="",
time=formatTime(event["log_time"]),
system=logSystem,
endcolour="",
text=formatEvent(event),
)
)
print(eventString, file=_file)
return StandardErrorObserver
|
https://github.com/crossbario/crossbar/issues/702
|
Unhandled Error
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/twisted/web/http.py", line 1439, in dataReceived
finishCallback(data[contentLength:])
File "/usr/local/lib/python2.7/dist-packages/twisted/web/http.py", line 1704, in _finishRequestBody
self.allContentReceived()
File "/usr/local/lib/python2.7/dist-packages/twisted/web/http.py", line 1767, in allContentReceived
req.requestReceived(command, path, version)
File "/usr/local/lib/python2.7/dist-packages/twisted/web/http.py", line 768, in requestReceived
self.process()
--- <exception caught here> ---
File "/usr/local/lib/python2.7/dist-packages/twisted/web/server.py", line 183, in process
self.render(resrc)
File "/usr/local/lib/python2.7/dist-packages/twisted/web/server.py", line 234, in render
body = resrc.render(self)
File "/usr/local/lib/python2.7/dist-packages/twisted/web/resource.py", line 250, in render
return m(request)
File "/usr/local/lib/python2.7/dist-packages/crossbar/router/longpoll.py", line 397, in render_POST
self.log.debug("WampLongPoll: creating new session ..")
exceptions.AttributeError: WampLongPollResourceOpen instance has no attribute 'log'
|
exceptions.AttributeError
|
def StandardErrorObserver(event):
if event["log_level"] not in levels:
return
if event.get("log_system", "-") == "-":
logSystem = "{:<10} {:>6}".format("Controller", os.getpid())
else:
logSystem = event["log_system"]
if show_source and event.get("log_namespace") is not None:
logSystem += " " + event.get("cb_namespace", event.get("log_namespace", ""))
if event.get("log_category"):
format_string = _categories.get(event["log_category"])
if format_string:
event = event.copy()
event["log_format"] = format_string
if event.get("log_format", None) is not None:
eventText = formatEvent(event)
else:
eventText = ""
if "log_failure" in event:
# This is a traceback. Print it.
eventText = eventText + event["log_failure"].getTraceback()
if format == "standard":
FORMAT_STRING = STANDARD_FORMAT
elif format == "syslogd":
FORMAT_STRING = SYSLOGD_FORMAT
elif format == "none":
FORMAT_STRING = NONE_FORMAT
else:
assert False
if colour:
# Errors are always red.
fore = Fore.RED
eventString = FORMAT_STRING.format(
startcolour=fore,
time=formatTime(event["log_time"]),
system=logSystem,
endcolour=Fore.RESET,
text=eventText,
)
else:
eventString = strip_ansi(
FORMAT_STRING.format(
startcolour="",
time=formatTime(event["log_time"]),
system=logSystem,
endcolour="",
text=eventText,
)
)
print(eventString, file=_file)
|
def StandardErrorObserver(event):
if event["log_level"] not in levels:
return
if event.get("log_system", "-") == "-":
logSystem = "{:<10} {:>6}".format("Controller", os.getpid())
else:
logSystem = event["log_system"]
if show_source and event.get("log_namespace") is not None:
logSystem += " " + event.get("cb_namespace", event.get("log_namespace", ""))
if event.get("log_category"):
format_string = _categories.get(event["log_category"])
if format_string:
event = event.copy()
event["log_format"] = format_string
if event.get("log_format", None) is not None:
eventText = formatEvent(event)
else:
eventText = ""
if "log_failure" in event:
# This is a traceback. Print it.
eventText = eventText + event["log_failure"].getTraceback()
if format == "standard":
FORMAT_STRING = STANDARD_FORMAT
elif format == "syslogd":
FORMAT_STRING = SYSLOGD_FORMAT
elif format == "none":
FORMAT_STRING = NONE_FORMAT
else:
assert False
if colour:
# Errors are always red.
fore = Fore.RED
eventString = FORMAT_STRING.format(
startcolour=fore,
time=formatTime(event["log_time"]),
system=logSystem,
endcolour=Fore.RESET,
text=formatEvent(event),
)
else:
eventString = strip_ansi(
FORMAT_STRING.format(
startcolour="",
time=formatTime(event["log_time"]),
system=logSystem,
endcolour="",
text=formatEvent(event),
)
)
print(eventString, file=_file)
|
https://github.com/crossbario/crossbar/issues/702
|
Unhandled Error
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/twisted/web/http.py", line 1439, in dataReceived
finishCallback(data[contentLength:])
File "/usr/local/lib/python2.7/dist-packages/twisted/web/http.py", line 1704, in _finishRequestBody
self.allContentReceived()
File "/usr/local/lib/python2.7/dist-packages/twisted/web/http.py", line 1767, in allContentReceived
req.requestReceived(command, path, version)
File "/usr/local/lib/python2.7/dist-packages/twisted/web/http.py", line 768, in requestReceived
self.process()
--- <exception caught here> ---
File "/usr/local/lib/python2.7/dist-packages/twisted/web/server.py", line 183, in process
self.render(resrc)
File "/usr/local/lib/python2.7/dist-packages/twisted/web/server.py", line 234, in render
body = resrc.render(self)
File "/usr/local/lib/python2.7/dist-packages/twisted/web/resource.py", line 250, in render
return m(request)
File "/usr/local/lib/python2.7/dist-packages/crossbar/router/longpoll.py", line 397, in render_POST
self.log.debug("WampLongPoll: creating new session ..")
exceptions.AttributeError: WampLongPollResourceOpen instance has no attribute 'log'
|
exceptions.AttributeError
|
def create_native_worker_client_factory(router_session_factory, on_ready, on_exit):
"""
Create a transport factory for talking to native workers.
The node controller talks WAMP-WebSocket-over-STDIO with spawned (native) workers.
The node controller runs a client transport factory, and the native worker
runs a server transport factory. This is a little non-intuitive, but just the
way that Twisted works when using STDIO transports.
:param router_session_factory: Router session factory to attach to.
:type router_session_factory: obj
"""
factory = NativeWorkerClientFactory(
router_session_factory, "ws://localhost", debug=False
)
# we need to increase the opening handshake timeout in particular, since starting up a worker
# on PyPy will take a little (due to JITting)
factory.setProtocolOptions(
failByDrop=False, openHandshakeTimeout=60, closeHandshakeTimeout=5
)
# on_ready is resolved in crossbar/controller/process.py:on_worker_ready around 175
# after crossbar.node.<ID>.on_worker_ready is published to (in the controller session)
# that happens in crossbar/worker/worker.py:publish_ready which itself happens when
# the native worker joins the realm (part of onJoin)
factory._on_ready = on_ready
factory._on_exit = on_exit
return factory
|
def create_native_worker_client_factory(router_session_factory, on_ready, on_exit):
"""
Create a transport factory for talking to native workers.
The node controller talks WAMP-WebSocket-over-STDIO with spawned (native) workers.
The node controller runs a client transport factory, and the native worker
runs a server transport factory. This is a little non-intuitive, but just the
way that Twisted works when using STDIO transports.
:param router_session_factory: Router session factory to attach to.
:type router_session_factory: obj
"""
factory = NativeWorkerClientFactory(
router_session_factory, "ws://localhost", debug=False
)
# we need to increase the opening handshake timeout in particular, since starting up a worker
# on PyPy will take a little (due to JITting)
factory.setProtocolOptions(
failByDrop=False, openHandshakeTimeout=30, closeHandshakeTimeout=5
)
# on_ready is resolved in crossbar/controller/process.py:on_worker_ready around 175
# after crossbar.node.<ID>.on_worker_ready is published to (in the controller session)
# that happens in crossbar/worker/worker.py:publish_ready which itself happens when
# the native worker joins the realm (part of onJoin)
factory._on_ready = on_ready
factory._on_exit = on_exit
return factory
|
https://github.com/crossbario/crossbar/issues/305
|
$ crossbar init
$ crossbar start
2015-04-14 19:05:22+0000 [Controller 8740] Log opened.
2015-04-14 19:05:22+0000 [Controller 8740] ==================== Crossbar.io ====================
2015-04-14 19:05:22+0000 [Controller 8740] Crossbar.io 0.10.4 starting
2015-04-14 19:05:22+0000 [Controller 8740] Running on CPython using EPollReactor reactor
2015-04-14 19:05:22+0000 [Controller 8740] Starting from node directory /home/pi/cbtest/.crossbar
2015-04-14 19:05:25+0000 [Controller 8740] Starting from local configuration '/home/pi/cbtest/.crossbar/config.json'
2015-04-14 19:05:25+0000 [Controller 8740] Warning, could not set process title (setproctitle not installed)
2015-04-14 19:05:25+0000 [Controller 8740] Router created for realm 'crossbar'
2015-04-14 19:05:26+0000 [Controller 8740] No WAMPlets detected in enviroment.
2015-04-14 19:05:26+0000 [Controller 8740] Starting Router with ID 'worker1' ..
2015-04-14 19:05:26+0000 [Controller 8740] Entering reactor event loop ...
2015-04-14 19:05:28+0000 [Router 8749] /usr/local/lib/python2.7/dist-packages/zope.deprecation-4.1.2-py2.7.egg/zope/__init__.py:3: UserWarning: Module twisted was already imported from /usr/local/lib/python2.7/dist-packages/Twisted-15.1.0-py2.7-linux-armv6l.egg/twisted/__init__.pyc, but /usr/local/lib/python2.7/dist-packages/autobahn-0.10.3-py2.7.egg is being added to sys.path
2015-04-14 19:05:30+0000 [Router 8749] Log opened.
2015-04-14 19:05:30+0000 [Router 8749] Warning: could not set worker process title (setproctitle not installed)
2015-04-14 19:05:52+0000 [Router 8749] Running under CPython using EPollReactor reactor
2015-04-14 19:05:58+0000 [Controller 8740] Worker 8749: Process connection gone (A process has ended with a probable error condition: process ended with exit code 1.)
2015-04-14 19:05:58+0000 [Controller 8740] ERROR: failed to start native worker - A process has ended with a probable error condition: process ended with exit code 1.
2015-04-14 19:05:58+0000 [Controller 8740] Traceback (most recent call last):
2015-04-14 19:05:58+0000 [Controller 8740] File "/usr/local/lib/python2.7/dist-packages/crossbar-0.10.4-py2.7.egg/crossbar/controller/node.py", line 183, in run_node_config
2015-04-14 19:05:58+0000 [Controller 8740] yield self._run_node_config(config)
2015-04-14 19:05:58+0000 [Controller 8740] File "/usr/local/lib/python2.7/dist-packages/Twisted-15.1.0-py2.7-linux-armv6l.egg/twisted/internet/defer.py", line 1105, in _inlineCallbacks
2015-04-14 19:05:58+0000 [Controller 8740] result = result.throwExceptionIntoGenerator(g)
2015-04-14 19:05:58+0000 [Controller 8740] File "/usr/local/lib/python2.7/dist-packages/Twisted-15.1.0-py2.7-linux-armv6l.egg/twisted/python/failure.py", line 389, in throwExceptionIntoGenerator
2015-04-14 19:05:58+0000 [Controller 8740] return g.throw(self.type, self.value, self.tb)
2015-04-14 19:05:58+0000 [Controller 8740] File "/usr/local/lib/python2.7/dist-packages/crossbar-0.10.4-py2.7.egg/crossbar/controller/node.py", line 249, in _run_node_config
2015-04-14 19:05:58+0000 [Controller 8740] yield self._controller.start_router(worker_id, worker_options, details=call_details)
2015-04-14 19:05:58+0000 [Controller 8740] File "/usr/local/lib/python2.7/dist-packages/Twisted-15.1.0-py2.7-linux-armv6l.egg/twisted/internet/defer.py", line 578, in _runCallbacks
2015-04-14 19:05:58+0000 [Controller 8740] current.result = callback(current.result, *args, **kw)
2015-04-14 19:05:58+0000 [Controller 8740] File "/usr/local/lib/python2.7/dist-packages/crossbar-0.10.4-py2.7.egg/crossbar/controller/process.py", line 540, in on_ready_error
2015-04-14 19:05:58+0000 [Controller 8740] raise ApplicationError("crossbar.error.cannot_start", emsg, worker.getlog())
2015-04-14 19:05:58+0000 [Controller 8740] ApplicationError: ApplicationError('crossbar.error.cannot_start', args = ('ERROR: failed to start native worker - A process has ended with a probable error condition: process ended with exit code 1.', []), kwargs = {})
2015-04-14 19:05:58+0000 [Controller 8740] Main loop terminated.
|
ApplicationError
|
def wrapcloseterm(self, widget):
"""A child terminal has closed, so this container must die"""
dbg("Paned::wrapcloseterm: Called on %s" % widget)
if self.closeterm(widget):
# At this point we only have one child, which is the surviving term
sibling = self.children[0]
first_term_sibling = sibling
cur_tabnum = None
focus_sibling = True
if self.get_toplevel().is_child_notebook():
notebook = self.get_toplevel().get_children()[0]
cur_tabnum = notebook.get_current_page()
tabnum = notebook.page_num_descendant(self)
nth_page = notebook.get_nth_page(tabnum)
exiting_term_was_last_active = (
notebook.last_active_term[nth_page] == widget.uuid
)
if exiting_term_was_last_active:
first_term_sibling = enumerate_descendants(self)[1][0]
notebook.set_last_active_term(first_term_sibling.uuid)
notebook.clean_last_active_term()
self.get_toplevel().last_active_term = None
if cur_tabnum != tabnum:
focus_sibling = False
elif self.get_toplevel().last_active_term != widget.uuid:
focus_sibling = False
self.remove(sibling)
metadata = None
parent = self.get_parent()
metadata = parent.get_child_metadata(self)
dbg("metadata obtained for %s: %s" % (self, metadata))
parent.remove(self)
self.cnxids.remove_all()
parent.add(sibling, metadata)
if cur_tabnum:
notebook.set_current_page(cur_tabnum)
if focus_sibling:
first_term_sibling.grab_focus()
elif not sibling.get_toplevel().is_child_notebook():
try:
Terminator().find_terminal_by_uuid(
sibling.get_toplevel().last_active_term.urn
).grab_focus()
except AttributeError:
dbg(
"cannot find terminal with uuid: %s"
% sibling.get_toplevel().last_active_term.urn
)
else:
dbg("Paned::wrapcloseterm: self.closeterm failed")
|
def wrapcloseterm(self, widget):
"""A child terminal has closed, so this container must die"""
dbg("Paned::wrapcloseterm: Called on %s" % widget)
if self.closeterm(widget):
# At this point we only have one child, which is the surviving term
sibling = self.children[0]
first_term_sibling = sibling
cur_tabnum = None
focus_sibling = True
if self.get_toplevel().is_child_notebook():
notebook = self.get_toplevel().get_children()[0]
cur_tabnum = notebook.get_current_page()
tabnum = notebook.page_num_descendant(self)
nth_page = notebook.get_nth_page(tabnum)
exiting_term_was_last_active = (
notebook.last_active_term[nth_page] == widget.uuid
)
if exiting_term_was_last_active:
first_term_sibling = enumerate_descendants(self)[1][0]
notebook.set_last_active_term(first_term_sibling.uuid)
notebook.clean_last_active_term()
self.get_toplevel().last_active_term = None
if cur_tabnum != tabnum:
focus_sibling = False
elif self.get_toplevel().last_active_term != widget.uuid:
focus_sibling = False
self.remove(sibling)
metadata = None
parent = self.get_parent()
metadata = parent.get_child_metadata(self)
dbg("metadata obtained for %s: %s" % (self, metadata))
parent.remove(self)
self.cnxids.remove_all()
parent.add(sibling, metadata)
if cur_tabnum:
notebook.set_current_page(cur_tabnum)
if focus_sibling:
first_term_sibling.grab_focus()
elif not sibling.get_toplevel().is_child_notebook():
Terminator().find_terminal_by_uuid(
sibling.get_toplevel().last_active_term.urn
).grab_focus()
else:
dbg("Paned::wrapcloseterm: self.closeterm failed")
|
https://github.com/gnome-terminator/terminator/issues/68
|
Truncated backtrace:
paned.py:311:wrapcloseterm:AttributeError: 'NoneType' object has no attribute 'grab_focus'
Traceback (most recent call last):
File "/usr/lib/python3.8/site-packages/terminatorlib/paned.py", line 311, in wrapcloseterm
Terminator().find_terminal_by_uuid(sibling.get_toplevel().last_active_term.urn).grab_focus()
AttributeError: 'NoneType' object has no attribute 'grab_focus'
Local variables in innermost frame:
self: <paned.HPaned object at 0x7f85187c0ec0 (terminatorlib+paned+HPaned at 0x559802bf6b10)>
widget: <terminal.Terminal object at 0x7f8528779840 (terminatorlib+terminal+Terminal at 0x559802f18140)>
sibling: <terminal.Terminal object at 0x7f85187c4100 (terminatorlib+terminal+Terminal at 0x5598031ed160)>
first_term_sibling: <terminal.Terminal object at 0x7f85187c4100 (terminatorlib+terminal+Terminal at 0x5598031ed160)>
cur_tabnum: None
focus_sibling: False
metadata: None
parent: <window.Window object at 0x7f852875f3c0 (terminatorlib+window+Window at 0x559802d90500)>
|
AttributeError
|
def get_default_cwd():
"""Determine a reasonable default cwd"""
try:
cwd = os.getcwd()
except (FileNotFoundError, OSError):
err("unable to set current working directory, does not exist")
cwd = "/"
return cwd
|
def get_default_cwd():
"""Determine a reasonable default cwd"""
cwd = os.getcwd()
if not os.path.exists(cwd) or not os.path.isdir(cwd):
try:
cwd = pwd.getpwuid(os.getuid())[5]
except KeyError:
cwd = "/"
return cwd
|
https://github.com/gnome-terminator/terminator/issues/58
|
Traceback (most recent call last):
File "/usr/local/bin/terminator", line 87, in <module>
dbus_service = ipc.DBusService()
File "/usr/local/lib/python3.7/dist-packages/terminatorlib/ipc.py", line 42, in __init__
self.prepare_attributes()
File "/usr/local/lib/python3.7/dist-packages/terminatorlib/ipc.py", line 56, in prepare_attributes
"Couldn't get DBus name %s: Name exists" % BUS_NAME)
dbus.exceptions.DBusException: Couldn't get DBus name net.tenshu.Terminator21a9d5db22c73a993ff0b42f64b396873: Name exists
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/bin/terminator", line 108, in <module>
ipc.new_window_cmdline(optionslist)
File "/usr/local/lib/python3.7/dist-packages/terminatorlib/ipc.py", line 194, in _exec
func(proxy, *args, **argd)
File "/usr/local/lib/python3.7/dist-packages/terminatorlib/ipc.py", line 200, in new_window_cmdline
session.new_window_cmdline(options)
File "/usr/lib/python3/dist-packages/dbus/proxies.py", line 72, in __call__
return self._proxy_method(*args, **keywords)
File "/usr/lib/python3/dist-packages/dbus/proxies.py", line 147, in __call__
**keywords)
File "/usr/lib/python3/dist-packages/dbus/connection.py", line 653, in call_blocking
message, timeout)
dbus.exceptions.DBusException: org.freedesktop.DBus.Python.FileNotFoundError: Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/dbus/service.py", line 711, in _message_cb
retval = candidate_method(self, *args, **keywords)
File "/usr/lib/python3/dist-packages/terminatorlib/ipc.py", line 71, in new_window_cmdline
self.terminator.create_layout(oldopts.layout)
File "/usr/lib/python3/dist-packages/terminatorlib/terminator.py", line 310, in create_layout
window, terminal = self.new_window()
File "/usr/lib/python3/dist-packages/terminatorlib/terminator.py", line 233, in new_window
terminal = maker.make('Terminal')
File "/usr/lib/python3/dist-packages/terminatorlib/factory.py", line 93, in make
output = func(**kwargs)
File "/usr/lib/python3/dist-packages/terminatorlib/factory.py", line 105, in make_terminal
return(terminal.Terminal())
File "/usr/lib/python3/dist-packages/terminatorlib/terminal.py", line 141, in __init__
self.cwd = get_default_cwd()
File "/usr/lib/python3/dist-packages/terminatorlib/cwd.py", line 28, in get_default_cwd
cwd = os.getcwd()
FileNotFoundError:
|
FileNotFoundError
|
def __init__(self):
"""Class initialiser"""
GObject.GObject.__init__(self)
self.terminator = Terminator()
self.terminator.register_terminal(self)
# FIXME: Surely these should happen in Terminator::register_terminal()?
self.connect("enumerate", self.terminator.do_enumerate)
self.connect("focus-in", self.terminator.focus_changed)
self.connect("focus-out", self.terminator.focus_left)
self.matches = {}
self.cnxids = Signalman()
self.config = Config()
self.cwd = get_default_cwd()
self.origcwd = self.terminator.origcwd
self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
self.pending_on_vte_size_allocate = False
self.vte = Vte.Terminal()
self.vte._draw_data = None
if not hasattr(self.vte, "set_opacity") or not hasattr(self.vte, "is_composited"):
self.composite_support = False
else:
self.composite_support = True
dbg("composite_support: %s" % self.composite_support)
self.vte.show()
self.default_encoding = self.vte.get_encoding()
self.regex_flags = REGEX_FLAGS
self.update_url_matches()
self.terminalbox = self.create_terminalbox()
self.titlebar = Titlebar(self)
self.titlebar.connect_icon(self.on_group_button_press)
self.titlebar.connect("edit-done", self.on_edit_done)
self.connect("title-change", self.titlebar.set_terminal_title)
self.titlebar.connect("create-group", self.really_create_group)
self.titlebar.show_all()
self.searchbar = Searchbar()
self.searchbar.connect("end-search", self.on_search_done)
self.show()
self.pack_start(self.titlebar, False, True, 0)
self.pack_start(self.terminalbox, True, True, 0)
self.pack_end(self.searchbar, True, True, 0)
self.connect_signals()
os.putenv("TERM", self.config["term"])
os.putenv("COLORTERM", self.config["colorterm"])
env_proxy = os.getenv("http_proxy")
if not env_proxy:
if self.config["http_proxy"] and self.config["http_proxy"] != "":
os.putenv("http_proxy", self.config["http_proxy"])
self.reconfigure()
self.vte.set_size(80, 24)
|
def __init__(self):
"""Class initialiser"""
GObject.GObject.__init__(self)
self.terminator = Terminator()
self.terminator.register_terminal(self)
# FIXME: Surely these should happen in Terminator::register_terminal()?
self.connect("enumerate", self.terminator.do_enumerate)
self.connect("focus-in", self.terminator.focus_changed)
self.connect("focus-out", self.terminator.focus_left)
self.matches = {}
self.cnxids = Signalman()
self.config = Config()
self.cwd = get_default_cwd()
self.origcwd = self.terminator.origcwd
self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
self.pending_on_vte_size_allocate = False
self.vte = Vte.Terminal()
self.vte._draw_data = None
if not hasattr(self.vte, "set_opacity") or not hasattr(self.vte, "is_composited"):
self.composite_support = False
else:
self.composite_support = True
dbg("composite_support: %s" % self.composite_support)
self.vte.show()
self.default_encoding = self.vte.get_encoding()
self.regex_flags = Vte.REGEX_FLAGS_DEFAULT | PCRE2_MULTILINE
self.update_url_matches()
self.terminalbox = self.create_terminalbox()
self.titlebar = Titlebar(self)
self.titlebar.connect_icon(self.on_group_button_press)
self.titlebar.connect("edit-done", self.on_edit_done)
self.connect("title-change", self.titlebar.set_terminal_title)
self.titlebar.connect("create-group", self.really_create_group)
self.titlebar.show_all()
self.searchbar = Searchbar()
self.searchbar.connect("end-search", self.on_search_done)
self.show()
self.pack_start(self.titlebar, False, True, 0)
self.pack_start(self.terminalbox, True, True, 0)
self.pack_end(self.searchbar, True, True, 0)
self.connect_signals()
os.putenv("TERM", self.config["term"])
os.putenv("COLORTERM", self.config["colorterm"])
env_proxy = os.getenv("http_proxy")
if not env_proxy:
if self.config["http_proxy"] and self.config["http_proxy"] != "":
os.putenv("http_proxy", self.config["http_proxy"])
self.reconfigure()
self.vte.set_size(80, 24)
|
https://github.com/gnome-terminator/terminator/issues/10
|
$ terminator
Traceback (most recent call last):
File "/usr/bin/terminator", line 122, in <module>
TERMINATOR.create_layout(OPTIONS.layout)
File "/usr/share/terminator/terminatorlib/terminator.py", line 311, in create_layout
window, terminal = self.new_window()
File "/usr/share/terminator/terminatorlib/terminator.py", line 234, in new_window
terminal = maker.make('Terminal')
File "/usr/share/terminator/terminatorlib/factory.py", line 94, in make
output = func(**kwargs)
File "/usr/share/terminator/terminatorlib/factory.py", line 106, in make_terminal
return(terminal.Terminal())
File "/usr/share/terminator/terminatorlib/terminal.py", line 147, in __init__
self.update_url_matches()
File "/usr/share/terminator/terminatorlib/terminal.py", line 273, in update_url_matches
reg = Vte.Regex.new_for_match(re, len(re), self.regex_flags)
File "/usr/lib/python2.7/dist-packages/gi/module.py", line 139, in __getattr__
self.__name__, name))
AttributeError: 'gi.repository.Vte' object has no attribute 'Regex'
|
AttributeError
|
def _add_regex(self, name, re):
if REGEX_MODERN:
reg = Vte.Regex.new_for_match(re, len(re), self.regex_flags)
self.matches[name] = self.vte.match_add_regex(reg, 0)
else:
reg = GLib.Regex.new(re, self.regex_flags, 0)
self.matches[name] = self.vte.match_add_gregex(reg, 0)
self.vte.match_set_cursor_name(self.matches[name], "pointer")
|
def _add_regex(self, name, re):
reg = Vte.Regex.new_for_match(re, len(re), self.regex_flags)
self.matches[name] = self.vte.match_add_regex(reg, 0)
self.vte.match_set_cursor_name(self.matches[name], "pointer")
|
https://github.com/gnome-terminator/terminator/issues/10
|
$ terminator
Traceback (most recent call last):
File "/usr/bin/terminator", line 122, in <module>
TERMINATOR.create_layout(OPTIONS.layout)
File "/usr/share/terminator/terminatorlib/terminator.py", line 311, in create_layout
window, terminal = self.new_window()
File "/usr/share/terminator/terminatorlib/terminator.py", line 234, in new_window
terminal = maker.make('Terminal')
File "/usr/share/terminator/terminatorlib/factory.py", line 94, in make
output = func(**kwargs)
File "/usr/share/terminator/terminatorlib/factory.py", line 106, in make_terminal
return(terminal.Terminal())
File "/usr/share/terminator/terminatorlib/terminal.py", line 147, in __init__
self.update_url_matches()
File "/usr/share/terminator/terminatorlib/terminal.py", line 273, in update_url_matches
reg = Vte.Regex.new_for_match(re, len(re), self.regex_flags)
File "/usr/lib/python2.7/dist-packages/gi/module.py", line 139, in __getattr__
self.__name__, name))
AttributeError: 'gi.repository.Vte' object has no attribute 'Regex'
|
AttributeError
|
def can_init():
"""This function returns True iff stderr is a TTY and we are not inside a
REPL. Iff this function returns `True`, a call to :meth:`init` will let
``pwnlib`` manage the terminal.
"""
if sys.platform == "win32":
return False
if not sys.stdout.isatty():
return False
# Check for python -i
if sys.flags.interactive:
return False
# Check fancy REPLs
mods = sys.modules.keys()
for repl in ["IPython", "bpython", "dreampielib", "jupyter_client._version"]:
if repl in mods:
return False
# The standard python REPL will have co_filename == '<stdin>' for some
# frame. We raise an exception to set sys.exc_info so we can unwind the call
# stack.
try:
raise BaseException
except BaseException:
frame = sys.exc_info()[2].tb_frame
while frame:
if frame.f_code.co_filename == "<stdin>":
return False
frame = frame.f_back
return True
|
def can_init():
"""This function returns True iff stderr is a TTY and we are not inside a
REPL. Iff this function returns `True`, a call to :meth:`init` will let
``pwnlib`` manage the terminal.
"""
if sys.platform == "win32":
return False
if not sys.stdout.isatty():
return False
# Check for python -i
if sys.flags.interactive:
return False
# Check fancy REPLs
mods = sys.modules.keys()
for repl in ["IPython", "bpython", "dreampielib"]:
if repl in mods:
return False
# The standard python REPL will have co_filename == '<stdin>' for some
# frame. We raise an exception to set sys.exc_info so we can unwind the call
# stack.
try:
raise BaseException
except BaseException:
frame = sys.exc_info()[2].tb_frame
while frame:
if frame.f_code.co_filename == "<stdin>":
return False
frame = frame.f_back
return True
|
https://github.com/Gallopsled/pwntools/issues/826
|
In 1: from pwn import *
Out 1:
---------------------------------------------------------------------------
error Traceback (most recent call last)
<ipython-input-1-d810d1e70089> in <module>()
----> 1 from pwn import *
/usr/local/lib/python2.7/dist-packages/pwn/__init__.py in <module>()
1 # Promote useful stuff to toplevel
----> 2 from .toplevel import *
3
4 pwnlib.args.initialize()
5 pwnlib.log.install_default_handler()
/usr/local/lib/python2.7/dist-packages/pwn/toplevel.py in <module>()
18 from pprint import pprint
19
---> 20 import pwnlib
21 from pwnlib import *
22 from pwnlib.asm import *
/usr/local/lib/python2.7/dist-packages/pwnlib/__init__.py in <module>()
38
39 for module in __all__:
---> 40 importlib.import_module('.%s' % module, 'pwnlib')
/usr/lib/python2.7/importlib/__init__.pyc in import_module(name, package)
35 level += 1
36 name = _resolve_name(name[level:], package, level)
---> 37 __import__(name)
38 return sys.modules[name]
/usr/local/lib/python2.7/dist-packages/pwnlib/args.py in <module>()
46 import sys
47
---> 48 from . import term
49 from .context import context
50
/usr/local/lib/python2.7/dist-packages/pwnlib/term/__init__.py in <module>()
1 import sys
2
----> 3 from . import completer
4 from . import key
5 from . import keymap
/usr/local/lib/python2.7/dist-packages/pwnlib/term/completer.py in <module>()
2 import re
3
----> 4 from . import readline
5
6
/usr/local/lib/python2.7/dist-packages/pwnlib/term/readline.py in <module>()
2 from . import keymap as km
3 from . import term
----> 4 from . import text
5
6 cursor = text.reverse
/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.pyc in <module>()
113
114 tether = sys.modules[__name__]
--> 115 sys.modules[__name__] = Module()
/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.pyc in __init__(self)
24 self.__file__ = __file__
25 self.__name__ = __name__
---> 26 self.num_colors = termcap.get('colors', default = 8)
27 self.has_bright = self.num_colors >= 16
28 self.has_gray = self.has_bright
/usr/local/lib/python2.7/dist-packages/pwnlib/term/termcap.pyc in get(cap, *args, **kwargs)
23 s = cache.get(cap)
24 if not s:
---> 25 s = curses.tigetstr(cap)
26 if s == None:
27 s = curses.tigetnum(cap)
error: must call (at least) setupterm() first
In 2: import curses
In 3: curses.setupterm()
Out 2:
---------------------------------------------------------------------------
UnsupportedOperation Traceback (most recent call last)
<ipython-input-5-82805162dd9f> in <module>()
----> 1 curses.setupterm()
/usr/local/lib/python2.7/dist-packages/ipykernel/iostream.pyc in fileno(self)
304
305 def fileno(self):
--> 306 raise UnsupportedOperation("IOStream has no fileno.")
307
308 def write(self, string):
UnsupportedOperation: IOStream has no fileno.
print (curses.initscr(), curses.endwin())
(<_curses.curses window object at 0x7fef5c891a98>, None)
In 4: from pwnlib import *
Out 3:
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
<ipython-input-2-5eb1d9485f4c> in <module>()
----> 1 from pwnlib import *
/usr/local/lib/python2.7/dist-packages/pwnlib/__init__.py in <module>()
38
39 for module in __all__:
---> 40 importlib.import_module('.%s' % module, 'pwnlib')
/usr/lib/python2.7/importlib/__init__.pyc in import_module(name, package)
35 level += 1
36 name = _resolve_name(name[level:], package, level)
---> 37 __import__(name)
38 return sys.modules[name]
/usr/local/lib/python2.7/dist-packages/pwnlib/args.py in <module>()
46 import sys
47
---> 48 from . import term
49 from .context import context
50
/usr/local/lib/python2.7/dist-packages/pwnlib/term/__init__.py in <module>()
1 import sys
2
----> 3 from . import completer
4 from . import key
5 from . import keymap
/usr/local/lib/python2.7/dist-packages/pwnlib/term/completer.py in <module>()
2 import re
3
----> 4 from . import readline
5
6
/usr/local/lib/python2.7/dist-packages/pwnlib/term/readline.py in <module>()
----> 1 from . import keyconsts as kc
2 from . import keymap as km
3 from . import term
4 from . import text
5
ImportError: cannot import name keyconsts
```
|
ImportError
|
def init():
global cache
# Detect running under Jupyter
try:
if get_ipython().__class__.__name__ == "ZMQInteractiveShell":
os.environ["PWNLIB_NOTERM"] = "1"
os.environ["JUPYTER_DETECTED"] = "yes"
except NameError:
pass
if "PWNLIB_NOTERM" not in os.environ:
# Fix for BPython
try:
curses.setupterm()
except curses.error as e:
import traceback
print(
"Warning:",
"".join(traceback.format_exception_only(e.__class__, e)),
file=sys.stderr,
)
cache = {}
# Manually add reset sequence into the cache.
# Can't look it up using tigetstr.
cache["reset"] = "\x1b[m"
|
def init():
global cache
if "PWNLIB_NOTERM" not in os.environ:
# Fix for BPython
try:
curses.setupterm()
except curses.error as e:
import traceback
print(
"Warning:",
"".join(traceback.format_exception_only(e.__class__, e)),
file=sys.stderr,
)
cache = {}
# Manually add reset sequence into the cache.
# Can't look it up using tigetstr.
cache["reset"] = "\x1b[m"
|
https://github.com/Gallopsled/pwntools/issues/826
|
In 1: from pwn import *
Out 1:
---------------------------------------------------------------------------
error Traceback (most recent call last)
<ipython-input-1-d810d1e70089> in <module>()
----> 1 from pwn import *
/usr/local/lib/python2.7/dist-packages/pwn/__init__.py in <module>()
1 # Promote useful stuff to toplevel
----> 2 from .toplevel import *
3
4 pwnlib.args.initialize()
5 pwnlib.log.install_default_handler()
/usr/local/lib/python2.7/dist-packages/pwn/toplevel.py in <module>()
18 from pprint import pprint
19
---> 20 import pwnlib
21 from pwnlib import *
22 from pwnlib.asm import *
/usr/local/lib/python2.7/dist-packages/pwnlib/__init__.py in <module>()
38
39 for module in __all__:
---> 40 importlib.import_module('.%s' % module, 'pwnlib')
/usr/lib/python2.7/importlib/__init__.pyc in import_module(name, package)
35 level += 1
36 name = _resolve_name(name[level:], package, level)
---> 37 __import__(name)
38 return sys.modules[name]
/usr/local/lib/python2.7/dist-packages/pwnlib/args.py in <module>()
46 import sys
47
---> 48 from . import term
49 from .context import context
50
/usr/local/lib/python2.7/dist-packages/pwnlib/term/__init__.py in <module>()
1 import sys
2
----> 3 from . import completer
4 from . import key
5 from . import keymap
/usr/local/lib/python2.7/dist-packages/pwnlib/term/completer.py in <module>()
2 import re
3
----> 4 from . import readline
5
6
/usr/local/lib/python2.7/dist-packages/pwnlib/term/readline.py in <module>()
2 from . import keymap as km
3 from . import term
----> 4 from . import text
5
6 cursor = text.reverse
/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.pyc in <module>()
113
114 tether = sys.modules[__name__]
--> 115 sys.modules[__name__] = Module()
/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.pyc in __init__(self)
24 self.__file__ = __file__
25 self.__name__ = __name__
---> 26 self.num_colors = termcap.get('colors', default = 8)
27 self.has_bright = self.num_colors >= 16
28 self.has_gray = self.has_bright
/usr/local/lib/python2.7/dist-packages/pwnlib/term/termcap.pyc in get(cap, *args, **kwargs)
23 s = cache.get(cap)
24 if not s:
---> 25 s = curses.tigetstr(cap)
26 if s == None:
27 s = curses.tigetnum(cap)
error: must call (at least) setupterm() first
In 2: import curses
In 3: curses.setupterm()
Out 2:
---------------------------------------------------------------------------
UnsupportedOperation Traceback (most recent call last)
<ipython-input-5-82805162dd9f> in <module>()
----> 1 curses.setupterm()
/usr/local/lib/python2.7/dist-packages/ipykernel/iostream.pyc in fileno(self)
304
305 def fileno(self):
--> 306 raise UnsupportedOperation("IOStream has no fileno.")
307
308 def write(self, string):
UnsupportedOperation: IOStream has no fileno.
print (curses.initscr(), curses.endwin())
(<_curses.curses window object at 0x7fef5c891a98>, None)
In 4: from pwnlib import *
Out 3:
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
<ipython-input-2-5eb1d9485f4c> in <module>()
----> 1 from pwnlib import *
/usr/local/lib/python2.7/dist-packages/pwnlib/__init__.py in <module>()
38
39 for module in __all__:
---> 40 importlib.import_module('.%s' % module, 'pwnlib')
/usr/lib/python2.7/importlib/__init__.pyc in import_module(name, package)
35 level += 1
36 name = _resolve_name(name[level:], package, level)
---> 37 __import__(name)
38 return sys.modules[name]
/usr/local/lib/python2.7/dist-packages/pwnlib/args.py in <module>()
46 import sys
47
---> 48 from . import term
49 from .context import context
50
/usr/local/lib/python2.7/dist-packages/pwnlib/term/__init__.py in <module>()
1 import sys
2
----> 3 from . import completer
4 from . import key
5 from . import keymap
/usr/local/lib/python2.7/dist-packages/pwnlib/term/completer.py in <module>()
2 import re
3
----> 4 from . import readline
5
6
/usr/local/lib/python2.7/dist-packages/pwnlib/term/readline.py in <module>()
----> 1 from . import keyconsts as kc
2 from . import keymap as km
3 from . import term
4 from . import text
5
ImportError: cannot import name keyconsts
```
|
ImportError
|
def __init__(
self, parent, process=None, tty=False, wd=None, env=None, raw=True, *args, **kwargs
):
super(ssh_channel, self).__init__(*args, **kwargs)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.returncode = None
self.host = parent.host
self.tty = tty
self.env = env
self.process = process
self.cwd = wd or "."
if isinstance(wd, six.text_type):
wd = wd.encode("utf-8")
env = env or {}
msg = "Opening new channel: %r" % (process or "shell")
if isinstance(process, (list, tuple)):
process = b" ".join(
(lambda x: x.encode("utf-8") if isinstance(x, six.text_type) else x)(
sh_string(s)
)
for s in process
)
if isinstance(process, six.text_type):
process = process.encode("utf-8")
if process and wd:
process = b"cd " + sh_string(wd) + b" >/dev/null 2>&1;" + process
if process and env:
for name, value in env.items():
if not re.match("^[a-zA-Z_][a-zA-Z0-9_]*$", name):
self.error("run(): Invalid environment key %r" % name)
export = "export %s=%s;" % (name, sh_string(value))
if isinstance(export, six.text_type):
export = export.encode("utf-8")
process = export + process
if process and tty:
if raw:
process = b"stty raw -ctlecho -echo; " + process
else:
process = b"stty -ctlecho -echo; " + process
# If this object is enabled for DEBUG-level logging, don't hide
# anything about the command that's actually executed.
if process and self.isEnabledFor(logging.DEBUG):
msg = "Opening new channel: %r" % ((process,) or "shell")
with self.waitfor(msg) as h:
import paramiko
try:
self.sock = parent.transport.open_session()
except paramiko.ChannelException as e:
if e.args == (1, "Administratively prohibited"):
self.error("Too many sessions open! Use ssh_channel.close() or 'with'!")
raise e
if self.tty:
self.sock.get_pty("xterm", term.width, term.height)
def resizer():
if self.sock:
try:
self.sock.resize_pty(term.width, term.height)
except paramiko.ssh_exception.SSHException:
pass
self.resizer = resizer
term.term.on_winch.append(self.resizer)
else:
self.resizer = None
# Put stderr on stdout. This might not always be desirable,
# but our API does not support multiple streams
self.sock.set_combine_stderr(True)
self.settimeout(self.timeout)
if process:
self.sock.exec_command(process)
else:
self.sock.invoke_shell()
h.success()
|
def __init__(
self, parent, process=None, tty=False, wd=None, env=None, raw=True, *args, **kwargs
):
super(ssh_channel, self).__init__(*args, **kwargs)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.returncode = None
self.host = parent.host
self.tty = tty
self.env = env
self.process = process
if isinstance(wd, six.text_type):
wd = wd.encode("utf-8")
self.cwd = wd or b"."
env = env or {}
msg = "Opening new channel: %r" % (process or "shell")
if isinstance(process, (list, tuple)):
process = b" ".join(
(lambda x: x.encode("utf-8") if isinstance(x, six.text_type) else x)(
sh_string(s)
)
for s in process
)
if isinstance(process, six.text_type):
process = process.encode("utf-8")
if process and wd:
process = b"cd " + sh_string(wd) + b" >/dev/null 2>&1;" + process
if process and env:
for name, value in env.items():
if not re.match("^[a-zA-Z_][a-zA-Z0-9_]*$", name):
self.error("run(): Invalid environment key %r" % name)
export = "export %s=%s;" % (name, sh_string(value))
if isinstance(export, six.text_type):
export = export.encode("utf-8")
process = export + process
if process and tty:
if raw:
process = b"stty raw -ctlecho -echo; " + process
else:
process = b"stty -ctlecho -echo; " + process
# If this object is enabled for DEBUG-level logging, don't hide
# anything about the command that's actually executed.
if process and self.isEnabledFor(logging.DEBUG):
msg = "Opening new channel: %r" % ((process,) or "shell")
with self.waitfor(msg) as h:
import paramiko
try:
self.sock = parent.transport.open_session()
except paramiko.ChannelException as e:
if e.args == (1, "Administratively prohibited"):
self.error("Too many sessions open! Use ssh_channel.close() or 'with'!")
raise e
if self.tty:
self.sock.get_pty("xterm", term.width, term.height)
def resizer():
if self.sock:
try:
self.sock.resize_pty(term.width, term.height)
except paramiko.ssh_exception.SSHException:
pass
self.resizer = resizer
term.term.on_winch.append(self.resizer)
else:
self.resizer = None
# Put stderr on stdout. This might not always be desirable,
# but our API does not support multiple streams
self.sock.set_combine_stderr(True)
self.settimeout(self.timeout)
if process:
self.sock.exec_command(process)
else:
self.sock.invoke_shell()
h.success()
|
https://github.com/Gallopsled/pwntools/issues/1468
|
[+] Downloading '/proc/sys/kernel/core_pattern': Found '/proc/sys/kernel/core_pattern' in ssh cache
[+] Downloading '/proc/sys/kernel/core_uses_pid': Found '/proc/sys/kernel/core_uses_pid' in ssh cache
[DEBUG] core_pattern: b'|/usr/share/apport/apport %p %s %c %d %P %E'
[DEBUG] core_uses_pid: False
[DEBUG] Not a process
[DEBUG] interpreter: ''
[DEBUG] Looking for QEMU corefile
Traceback (most recent call last):
File "win.py", line 114, in <module>
io.corefile
File "/Users/zachriggle/github.com/pwntools/pwnlib/tubes/ssh.py", line 369, in corefile
finder = pwnlib.elf.corefile.CorefileFinder(self)
File "/Users/zachriggle/github.com/pwntools/pwnlib/elf/corefile.py", line 1187, in __init__
self.core_path = self.qemu_corefile()
File "/Users/zachriggle/github.com/pwntools/pwnlib/elf/corefile.py", line 1476, in qemu_corefile
corefile_path = os.path.join(self.cwd, corefile_name)
File "/Users/zachriggle/.pyenv/versions/3.8.2/lib/python3.8/posixpath.py", line 90, in join
genericpath._check_arg_types('join', a, *p)
File "/Users/zachriggle/.pyenv/versions/3.8.2/lib/python3.8/genericpath.py", line 155, in _check_arg_types
raise TypeError("Can't mix strings and bytes in path components") from None
TypeError: Can't mix strings and bytes in path components
|
TypeError
|
def process(
self,
argv=None,
executable=None,
tty=True,
cwd=None,
env=None,
timeout=Timeout.default,
run=True,
stdin=0,
stdout=1,
stderr=2,
preexec_fn=None,
preexec_args=(),
raw=True,
aslr=None,
setuid=None,
shell=False,
):
r"""
Executes a process on the remote server, in the same fashion
as pwnlib.tubes.process.process.
To achieve this, a Python script is created to call ``os.execve``
with the appropriate arguments.
As an added bonus, the ``ssh_channel`` object returned has a
``pid`` property for the process pid.
Arguments:
argv(list):
List of arguments to pass into the process
executable(str):
Path to the executable to run.
If :const:`None`, ``argv[0]`` is used.
tty(bool):
Request a `tty` from the server. This usually fixes buffering problems
by causing `libc` to write data immediately rather than buffering it.
However, this disables interpretation of control codes (e.g. Ctrl+C)
and breaks `.shutdown`.
cwd(str):
Working directory. If :const:`None`, uses the working directory specified
on :attr:`cwd` or set via :meth:`set_working_directory`.
env(dict):
Environment variables to set in the child. If :const:`None`, inherits the
default environment.
timeout(int):
Timeout to set on the `tube` created to interact with the process.
run(bool):
Set to :const:`True` to run the program (default).
If :const:`False`, returns the path to an executable Python script on the
remote server which, when executed, will do it.
stdin(int, str):
If an integer, replace stdin with the numbered file descriptor.
If a string, a open a file with the specified path and replace
stdin with its file descriptor. May also be one of ``sys.stdin``,
``sys.stdout``, ``sys.stderr``. If :const:`None`, the file descriptor is closed.
stdout(int, str):
See ``stdin``.
stderr(int, str):
See ``stdin``.
preexec_fn(callable):
Function which is executed on the remote side before execve().
This **MUST** be a self-contained function -- it must perform
all of its own imports, and cannot refer to variables outside
its scope.
preexec_args(object):
Argument passed to ``preexec_fn``.
This **MUST** only consist of native Python objects.
raw(bool):
If :const:`True`, disable TTY control code interpretation.
aslr(bool):
See :class:`pwnlib.tubes.process.process` for more information.
setuid(bool):
See :class:`pwnlib.tubes.process.process` for more information.
shell(bool):
Pass the command-line arguments to the shell.
Returns:
A new SSH channel, or a path to a script if ``run=False``.
Notes:
Requires Python on the remote server.
Examples:
>>> s = ssh(host='example.pwnme',
... user='runner',
... password='demopass')
>>> sh = s.process('/bin/sh', env={'PS1':''})
>>> sh.sendline(b'echo Hello; exit')
>>> sh.recvall()
b'Hello\n'
>>> s.process(['/bin/echo', b'\xff']).recvall()
b'\xff\n'
>>> s.process(['readlink', '/proc/self/exe']).recvall()
b'/bin/readlink\n'
>>> s.process(['LOLOLOL', '/proc/self/exe'], executable='readlink').recvall()
b'/bin/readlink\n'
>>> s.process(['LOLOLOL\x00', '/proc/self/cmdline'], executable='cat').recvall()
b'LOLOLOL\x00/proc/self/cmdline\x00'
>>> sh = s.process(executable='/bin/sh')
>>> str(sh.pid).encode() in s.pidof('sh') # doctest: +SKIP
True
>>> s.process(['pwd'], cwd='/tmp').recvall()
b'/tmp\n'
>>> p = s.process(['python','-c','import os; os.write(1, os.read(2, 1024))'], stderr=0)
>>> p.send(b'hello')
>>> p.recv()
b'hello'
>>> s.process(['/bin/echo', 'hello']).recvall()
b'hello\n'
>>> s.process(['/bin/echo', 'hello'], stdout='/dev/null').recvall()
b''
>>> s.process(['/usr/bin/env'], env={}).recvall()
b''
>>> s.process('/usr/bin/env', env={'A':'B'}).recvall()
b'A=B\n'
>>> s.process('false', preexec_fn=1234)
Traceback (most recent call last):
...
PwnlibException: preexec_fn must be a function
>>> s.process('false', preexec_fn=lambda: 1234)
Traceback (most recent call last):
...
PwnlibException: preexec_fn cannot be a lambda
>>> def uses_globals():
... foo = bar
>>> print(s.process('false', preexec_fn=uses_globals).recvall().strip().decode()) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
NameError: name 'bar' is not defined
>>> s.process('echo hello', shell=True).recvall()
b'hello\n'
"""
if not argv and not executable:
self.error("Must specify argv or executable")
argv = argv or []
aslr = aslr if aslr is not None else context.aslr
if isinstance(argv, (six.text_type, six.binary_type)):
argv = [argv]
if not isinstance(argv, (list, tuple)):
self.error("argv must be a list or tuple")
if not all(isinstance(arg, (six.text_type, six.binary_type)) for arg in argv):
self.error("argv must be strings or bytes: %r" % argv)
if shell:
if len(argv) != 1:
self.error("Cannot provide more than 1 argument if shell=True")
argv = ["/bin/sh", "-c"] + argv
# Create a duplicate so we can modify it
argv = list(argv or [])
# Python doesn't like when an arg in argv contains '\x00'
# -> execve() arg 2 must contain only strings
for i, oarg in enumerate(argv):
if isinstance(oarg, six.text_type):
arg = oarg.encode("utf-8")
else:
arg = oarg
if b"\x00" in arg[:-1]:
self.error("Inappropriate nulls in argv[%i]: %r" % (i, oarg))
argv[i] = bytearray(arg.rstrip(b"\x00"))
if env is not None and not isinstance(env, dict) and env != os.environ:
self.error("env must be a dict: %r" % env)
# Converts the environment variables to a list of tuples to retain order.
env2 = []
# Python also doesn't like when envp contains '\x00'
if env and hasattr(env, "items"):
for k, v in env.items():
if isinstance(k, six.text_type):
k = k.encode("utf-8")
if isinstance(v, six.text_type):
v = v.encode("utf-8")
if b"\x00" in k[:-1]:
self.error("Inappropriate nulls in environment key %r" % k)
if b"\x00" in v[:-1]:
self.error("Inappropriate nulls in environment value %r=%r" % (k, v))
env2.append((bytearray(k.rstrip(b"\x00")), bytearray(v.rstrip(b"\x00"))))
env = env2 or env
executable = executable or argv[0]
cwd = cwd or self.cwd
# Validate, since failures on the remote side will suck.
if not isinstance(executable, (six.text_type, six.binary_type, bytearray)):
self.error("executable / argv[0] must be a string: %r" % executable)
executable = context._decode(executable)
# Allow passing in sys.stdin/stdout/stderr objects
handles = {sys.stdin: 0, sys.stdout: 1, sys.stderr: 2}
stdin = handles.get(stdin, stdin)
stdout = handles.get(stdout, stdout)
stderr = handles.get(stderr, stderr)
# Allow the user to provide a self-contained function to run
def func():
pass
func = preexec_fn or func
func_args = preexec_args
if not isinstance(func, types.FunctionType):
self.error("preexec_fn must be a function")
func_name = func.__name__
if func_name == (lambda: 0).__name__:
self.error("preexec_fn cannot be a lambda")
func_src = inspect.getsource(func).strip()
setuid = True if setuid is None else bool(setuid)
script = r"""
#!/usr/bin/env python
import os, sys, ctypes, resource, platform, stat
from collections import OrderedDict
exe = %(executable)r
argv = [bytes(a) for a in %(argv)r]
env = %(env)r
os.chdir(%(cwd)r)
if env is not None:
env = OrderedDict((bytes(k), bytes(v)) for k,v in env)
os.environ.clear()
getattr(os, 'environb', os.environ).update(env)
else:
env = os.environ
def is_exe(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
PATH = os.environ.get('PATH','').split(os.pathsep)
if os.path.sep not in exe and not is_exe(exe):
for path in PATH:
test_path = os.path.join(path, exe)
if is_exe(test_path):
exe = test_path
break
if not is_exe(exe):
sys.stderr.write('3\n')
sys.stderr.write("{} is not executable or does not exist in $PATH: {}".format(exe,PATH))
sys.exit(-1)
if not %(setuid)r:
PR_SET_NO_NEW_PRIVS = 38
result = ctypes.CDLL('libc.so.6').prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)
if result != 0:
sys.stdout.write('3\n')
sys.stdout.write("Could not disable setuid: prctl(PR_SET_NO_NEW_PRIVS) failed")
sys.exit(-1)
try:
PR_SET_PTRACER = 0x59616d61
PR_SET_PTRACER_ANY = -1
ctypes.CDLL('libc.so.6').prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, 0, 0, 0)
except Exception:
pass
# Determine what UID the process will execute as
# This is used for locating apport core dumps
suid = os.getuid()
sgid = os.getgid()
st = os.stat(exe)
if %(setuid)r:
if (st.st_mode & stat.S_ISUID):
suid = st.st_uid
if (st.st_mode & stat.S_ISGID):
sgid = st.st_gid
if sys.argv[-1] == 'check':
sys.stdout.write("1\n")
sys.stdout.write(str(os.getpid()) + "\n")
sys.stdout.write(str(os.getuid()) + "\n")
sys.stdout.write(str(os.getgid()) + "\n")
sys.stdout.write(str(suid) + "\n")
sys.stdout.write(str(sgid) + "\n")
sys.stdout.write(os.path.realpath(exe) + '\x00')
sys.stdout.flush()
for fd, newfd in {0: %(stdin)r, 1: %(stdout)r, 2:%(stderr)r}.items():
if newfd is None:
os.close(fd)
elif isinstance(newfd, (str, bytes)):
newfd = os.open(newfd, os.O_RDONLY if fd == 0 else (os.O_RDWR|os.O_CREAT))
os.dup2(newfd, fd)
os.close(newfd)
elif isinstance(newfd, int) and newfd != fd:
os.dup2(fd, newfd)
if not %(aslr)r:
if platform.system().lower() == 'linux' and %(setuid)r is not True:
ADDR_NO_RANDOMIZE = 0x0040000
ctypes.CDLL('libc.so.6').personality(ADDR_NO_RANDOMIZE)
resource.setrlimit(resource.RLIMIT_STACK, (-1, -1))
# Attempt to dump ALL core file regions
try:
with open('/proc/self/coredump_filter', 'w') as core_filter:
core_filter.write('0x3f\n')
except Exception:
pass
# Assume that the user would prefer to have core dumps.
try:
resource.setrlimit(resource.RLIMIT_CORE, (-1, -1))
except Exception:
pass
%(func_src)s
%(func_name)s(*%(func_args)r)
os.execve(exe, argv, env)
""" % locals()
script = script.strip()
self.debug("Created execve script:\n" + script)
if not run:
with context.local(log_level="error"):
tmpfile = self.mktemp("-t", "pwnlib-execve-XXXXXXXXXX")
self.chmod("+x", tmpfile)
self.info("Uploading execve script to %r" % tmpfile)
self.upload_data(script, tmpfile)
return tmpfile
if self.isEnabledFor(logging.DEBUG):
execve_repr = "execve(%r, %s, %s)" % (
executable,
argv,
"os.environ" if (env in (None, os.environ)) else env,
)
# Avoid spamming the screen
if self.isEnabledFor(logging.DEBUG) and len(execve_repr) > 512:
execve_repr = execve_repr[:512] + "..."
else:
execve_repr = repr(executable)
msg = "Starting remote process %s on %s" % (execve_repr, self.host)
with self.progress(msg) as h:
script = (
'for py in python3 python; do test -x "$(which $py 2>&1)" && exec $py -c %s check; done; echo 2'
% sh_string(script)
)
with context.local(log_level="error"):
python = ssh_process(
self, script, tty=True, raw=True, level=self.level, timeout=self.timeout
)
try:
result = safeeval.const(python.recvline())
except (EOFError, ValueError):
h.failure("Process creation failed")
self.warn_once(
"Could not find a Python interpreter on %s\n" % self.host
+ "Use ssh.run() instead of ssh.process()"
)
return None
# If an error occurred, try to grab as much output
# as we can.
if result != 1:
error_message = python.recvrepeat(timeout=1)
if result == 0:
self.error("%r does not exist or is not executable" % executable)
elif result == 3:
self.error(error_message)
elif result == 2:
self.error("python is not installed on the remote system %r" % self.host)
elif result != 1:
h.failure("something bad happened:\n%s" % error_message)
python.pid = safeeval.const(python.recvline())
python.uid = safeeval.const(python.recvline())
python.gid = safeeval.const(python.recvline())
python.suid = safeeval.const(python.recvline())
python.sgid = safeeval.const(python.recvline())
python.argv = argv
python.executable = context._decode(python.recvuntil(b"\x00")[:-1])
h.success("pid %i" % python.pid)
if (
aslr == False
and setuid
and (python.uid != python.suid or python.gid != python.sgid)
):
effect = "partial" if self.aslr_ulimit else "no"
message = "Specfied aslr=False on setuid binary %s\n" % python.executable
message += (
"This will have %s effect. Add setuid=False to disable ASLR for debugging.\n"
% effect
)
if self.aslr_ulimit:
message += "Unlimited stack size should de-randomize shared libraries."
self.warn_once(message)
elif not aslr:
self.warn_once("ASLR is disabled for %r!" % python.executable)
return python
|
def process(
self,
argv=None,
executable=None,
tty=True,
cwd=None,
env=None,
timeout=Timeout.default,
run=True,
stdin=0,
stdout=1,
stderr=2,
preexec_fn=None,
preexec_args=(),
raw=True,
aslr=None,
setuid=None,
shell=False,
):
r"""
Executes a process on the remote server, in the same fashion
as pwnlib.tubes.process.process.
To achieve this, a Python script is created to call ``os.execve``
with the appropriate arguments.
As an added bonus, the ``ssh_channel`` object returned has a
``pid`` property for the process pid.
Arguments:
argv(list):
List of arguments to pass into the process
executable(str):
Path to the executable to run.
If :const:`None`, ``argv[0]`` is used.
tty(bool):
Request a `tty` from the server. This usually fixes buffering problems
by causing `libc` to write data immediately rather than buffering it.
However, this disables interpretation of control codes (e.g. Ctrl+C)
and breaks `.shutdown`.
cwd(str):
Working directory. If :const:`None`, uses the working directory specified
on :attr:`cwd` or set via :meth:`set_working_directory`.
env(dict):
Environment variables to set in the child. If :const:`None`, inherits the
default environment.
timeout(int):
Timeout to set on the `tube` created to interact with the process.
run(bool):
Set to :const:`True` to run the program (default).
If :const:`False`, returns the path to an executable Python script on the
remote server which, when executed, will do it.
stdin(int, str):
If an integer, replace stdin with the numbered file descriptor.
If a string, a open a file with the specified path and replace
stdin with its file descriptor. May also be one of ``sys.stdin``,
``sys.stdout``, ``sys.stderr``. If :const:`None`, the file descriptor is closed.
stdout(int, str):
See ``stdin``.
stderr(int, str):
See ``stdin``.
preexec_fn(callable):
Function which is executed on the remote side before execve().
This **MUST** be a self-contained function -- it must perform
all of its own imports, and cannot refer to variables outside
its scope.
preexec_args(object):
Argument passed to ``preexec_fn``.
This **MUST** only consist of native Python objects.
raw(bool):
If :const:`True`, disable TTY control code interpretation.
aslr(bool):
See :class:`pwnlib.tubes.process.process` for more information.
setuid(bool):
See :class:`pwnlib.tubes.process.process` for more information.
shell(bool):
Pass the command-line arguments to the shell.
Returns:
A new SSH channel, or a path to a script if ``run=False``.
Notes:
Requires Python on the remote server.
Examples:
>>> s = ssh(host='example.pwnme',
... user='runner',
... password='demopass')
>>> sh = s.process('/bin/sh', env={'PS1':''})
>>> sh.sendline(b'echo Hello; exit')
>>> sh.recvall()
b'Hello\n'
>>> s.process(['/bin/echo', b'\xff']).recvall()
b'\xff\n'
>>> s.process(['readlink', '/proc/self/exe']).recvall()
b'/bin/readlink\n'
>>> s.process(['LOLOLOL', '/proc/self/exe'], executable='readlink').recvall()
b'/bin/readlink\n'
>>> s.process(['LOLOLOL\x00', '/proc/self/cmdline'], executable='cat').recvall()
b'LOLOLOL\x00/proc/self/cmdline\x00'
>>> sh = s.process(executable='/bin/sh')
>>> str(sh.pid).encode() in s.pidof('sh') # doctest: +SKIP
True
>>> s.process(['pwd'], cwd='/tmp').recvall()
b'/tmp\n'
>>> p = s.process(['python','-c','import os; os.write(1, os.read(2, 1024))'], stderr=0)
>>> p.send(b'hello')
>>> p.recv()
b'hello'
>>> s.process(['/bin/echo', 'hello']).recvall()
b'hello\n'
>>> s.process(['/bin/echo', 'hello'], stdout='/dev/null').recvall()
b''
>>> s.process(['/usr/bin/env'], env={}).recvall()
b''
>>> s.process('/usr/bin/env', env={'A':'B'}).recvall()
b'A=B\n'
>>> s.process('false', preexec_fn=1234)
Traceback (most recent call last):
...
PwnlibException: preexec_fn must be a function
>>> s.process('false', preexec_fn=lambda: 1234)
Traceback (most recent call last):
...
PwnlibException: preexec_fn cannot be a lambda
>>> def uses_globals():
... foo = bar
>>> print(s.process('false', preexec_fn=uses_globals).recvall().strip().decode()) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
NameError: name 'bar' is not defined
>>> s.process('echo hello', shell=True).recvall()
b'hello\n'
"""
if not argv and not executable:
self.error("Must specify argv or executable")
argv = argv or []
aslr = aslr if aslr is not None else context.aslr
if isinstance(argv, (six.text_type, six.binary_type)):
argv = [argv]
if not isinstance(argv, (list, tuple)):
self.error("argv must be a list or tuple")
if not all(isinstance(arg, (six.text_type, six.binary_type)) for arg in argv):
self.error("argv must be strings or bytes: %r" % argv)
if shell:
if len(argv) != 1:
self.error("Cannot provide more than 1 argument if shell=True")
argv = ["/bin/sh", "-c"] + argv
# Create a duplicate so we can modify it
argv = list(argv or [])
# Python doesn't like when an arg in argv contains '\x00'
# -> execve() arg 2 must contain only strings
for i, oarg in enumerate(argv):
if isinstance(oarg, six.text_type):
arg = oarg.encode("utf-8")
else:
arg = oarg
if b"\x00" in arg[:-1]:
self.error("Inappropriate nulls in argv[%i]: %r" % (i, oarg))
argv[i] = bytearray(arg.rstrip(b"\x00"))
if env is not None and not isinstance(env, dict) and env != os.environ:
self.error("env must be a dict: %r" % env)
# Converts the environment variables to a list of tuples to retain order.
env2 = []
# Python also doesn't like when envp contains '\x00'
if env and hasattr(env, "items"):
for k, v in env.items():
if isinstance(k, six.text_type):
k = k.encode("utf-8")
if isinstance(v, six.text_type):
v = v.encode("utf-8")
if b"\x00" in k[:-1]:
self.error("Inappropriate nulls in environment key %r" % k)
if b"\x00" in v[:-1]:
self.error("Inappropriate nulls in environment value %r=%r" % (k, v))
env2.append((bytearray(k.rstrip(b"\x00")), bytearray(v.rstrip(b"\x00"))))
env = env2 or env
executable = executable or argv[0]
cwd = cwd or self.cwd
# Validate, since failures on the remote side will suck.
if not isinstance(executable, (six.text_type, six.binary_type, bytearray)):
self.error("executable / argv[0] must be a string: %r" % executable)
executable = context._decode(executable)
# Allow passing in sys.stdin/stdout/stderr objects
handles = {sys.stdin: 0, sys.stdout: 1, sys.stderr: 2}
stdin = handles.get(stdin, stdin)
stdout = handles.get(stdout, stdout)
stderr = handles.get(stderr, stderr)
# Allow the user to provide a self-contained function to run
def func():
pass
func = preexec_fn or func
func_args = preexec_args
if not isinstance(func, types.FunctionType):
self.error("preexec_fn must be a function")
func_name = func.__name__
if func_name == (lambda: 0).__name__:
self.error("preexec_fn cannot be a lambda")
func_src = inspect.getsource(func).strip()
setuid = True if setuid is None else bool(setuid)
script = r"""
#!/usr/bin/env python
import os, sys, ctypes, resource, platform, stat
from collections import OrderedDict
exe = %(executable)r
argv = [bytes(a) for a in %(argv)r]
env = %(env)r
os.chdir(%(cwd)r)
if env is not None:
env = OrderedDict((bytes(k), bytes(v)) for k,v in env)
os.environ.clear()
getattr(os, 'environb', os.environ).update(env)
else:
env = os.environ
def is_exe(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
PATH = os.environ.get('PATH','').split(os.pathsep)
if os.path.sep not in exe and not is_exe(exe):
for path in PATH:
test_path = os.path.join(path, exe)
if is_exe(test_path):
exe = test_path
break
if not is_exe(exe):
sys.stderr.write('3\n')
sys.stderr.write("{} is not executable or does not exist in $PATH: {}".format(exe,PATH))
sys.exit(-1)
if not %(setuid)r:
PR_SET_NO_NEW_PRIVS = 38
result = ctypes.CDLL('libc.so.6').prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)
if result != 0:
sys.stdout.write('3\n')
sys.stdout.write("Could not disable setuid: prctl(PR_SET_NO_NEW_PRIVS) failed")
sys.exit(-1)
try:
PR_SET_PTRACER = 0x59616d61
PR_SET_PTRACER_ANY = -1
ctypes.CDLL('libc.so.6').prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, 0, 0, 0)
except Exception:
pass
# Determine what UID the process will execute as
# This is used for locating apport core dumps
suid = os.getuid()
sgid = os.getgid()
st = os.stat(exe)
if %(setuid)r:
if (st.st_mode & stat.S_ISUID):
suid = st.st_uid
if (st.st_mode & stat.S_ISGID):
sgid = st.st_gid
if sys.argv[-1] == 'check':
sys.stdout.write("1\n")
sys.stdout.write(str(os.getpid()) + "\n")
sys.stdout.write(str(os.getuid()) + "\n")
sys.stdout.write(str(os.getgid()) + "\n")
sys.stdout.write(str(suid) + "\n")
sys.stdout.write(str(sgid) + "\n")
sys.stdout.write(os.path.realpath(exe) + '\x00')
sys.stdout.flush()
for fd, newfd in {0: %(stdin)r, 1: %(stdout)r, 2:%(stderr)r}.items():
if newfd is None:
os.close(fd)
elif isinstance(newfd, (str, bytes)):
newfd = os.open(newfd, os.O_RDONLY if fd == 0 else (os.O_RDWR|os.O_CREAT))
os.dup2(newfd, fd)
os.close(newfd)
elif isinstance(newfd, int) and newfd != fd:
os.dup2(fd, newfd)
if not %(aslr)r:
if platform.system().lower() == 'linux' and %(setuid)r is not True:
ADDR_NO_RANDOMIZE = 0x0040000
ctypes.CDLL('libc.so.6').personality(ADDR_NO_RANDOMIZE)
resource.setrlimit(resource.RLIMIT_STACK, (-1, -1))
# Attempt to dump ALL core file regions
try:
with open('/proc/self/coredump_filter', 'w') as core_filter:
core_filter.write('0x3f\n')
except Exception:
pass
# Assume that the user would prefer to have core dumps.
try:
resource.setrlimit(resource.RLIMIT_CORE, (-1, -1))
except Exception:
pass
%(func_src)s
%(func_name)s(*%(func_args)r)
os.execve(exe, argv, env)
""" % locals()
script = script.strip()
self.debug("Created execve script:\n" + script)
if not run:
with context.local(log_level="error"):
tmpfile = self.mktemp("-t", "pwnlib-execve-XXXXXXXXXX")
self.chmod("+x", tmpfile)
self.info("Uploading execve script to %r" % tmpfile)
self.upload_data(script, tmpfile)
return tmpfile
if self.isEnabledFor(logging.DEBUG):
execve_repr = "execve(%r, %s, %s)" % (
executable,
argv,
"os.environ" if (env in (None, os.environ)) else env,
)
# Avoid spamming the screen
if self.isEnabledFor(logging.DEBUG) and len(execve_repr) > 512:
execve_repr = execve_repr[:512] + "..."
else:
execve_repr = repr(executable)
msg = "Starting remote process %s on %s" % (execve_repr, self.host)
with self.progress(msg) as h:
script = (
'for py in python3 python; do test -x "$(which $py 2>&1)" && exec $py -c %s check; done; echo 2'
% sh_string(script)
)
with context.local(log_level="error"):
python = ssh_process(
self, script, tty=True, raw=True, level=self.level, timeout=self.timeout
)
try:
result = safeeval.const(python.recvline())
except (EOFError, ValueError):
h.failure("Process creation failed")
self.warn_once(
"Could not find a Python interpreter on %s\n" % self.host
+ "Use ssh.run() instead of ssh.process()"
)
return None
# If an error occurred, try to grab as much output
# as we can.
if result != 1:
error_message = python.recvrepeat(timeout=1)
if result == 0:
self.error("%r does not exist or is not executable" % executable)
elif result == 3:
self.error(error_message)
elif result == 2:
self.error("python is not installed on the remote system %r" % self.host)
elif result != 1:
h.failure("something bad happened:\n%s" % error_message)
python.pid = safeeval.const(python.recvline())
python.uid = safeeval.const(python.recvline())
python.gid = safeeval.const(python.recvline())
python.suid = safeeval.const(python.recvline())
python.sgid = safeeval.const(python.recvline())
python.argv = argv
python.executable = python.recvuntil(b"\x00")[:-1]
h.success("pid %i" % python.pid)
if (
aslr == False
and setuid
and (python.uid != python.suid or python.gid != python.sgid)
):
effect = "partial" if self.aslr_ulimit else "no"
message = "Specfied aslr=False on setuid binary %s\n" % python.executable
message += (
"This will have %s effect. Add setuid=False to disable ASLR for debugging.\n"
% effect
)
if self.aslr_ulimit:
message += "Unlimited stack size should de-randomize shared libraries."
self.warn_once(message)
elif not aslr:
self.warn_once("ASLR is disabled for %r!" % python.executable)
return python
|
https://github.com/Gallopsled/pwntools/issues/1468
|
[+] Downloading '/proc/sys/kernel/core_pattern': Found '/proc/sys/kernel/core_pattern' in ssh cache
[+] Downloading '/proc/sys/kernel/core_uses_pid': Found '/proc/sys/kernel/core_uses_pid' in ssh cache
[DEBUG] core_pattern: b'|/usr/share/apport/apport %p %s %c %d %P %E'
[DEBUG] core_uses_pid: False
[DEBUG] Not a process
[DEBUG] interpreter: ''
[DEBUG] Looking for QEMU corefile
Traceback (most recent call last):
File "win.py", line 114, in <module>
io.corefile
File "/Users/zachriggle/github.com/pwntools/pwnlib/tubes/ssh.py", line 369, in corefile
finder = pwnlib.elf.corefile.CorefileFinder(self)
File "/Users/zachriggle/github.com/pwntools/pwnlib/elf/corefile.py", line 1187, in __init__
self.core_path = self.qemu_corefile()
File "/Users/zachriggle/github.com/pwntools/pwnlib/elf/corefile.py", line 1476, in qemu_corefile
corefile_path = os.path.join(self.cwd, corefile_name)
File "/Users/zachriggle/.pyenv/versions/3.8.2/lib/python3.8/posixpath.py", line 90, in join
genericpath._check_arg_types('join', a, *p)
File "/Users/zachriggle/.pyenv/versions/3.8.2/lib/python3.8/genericpath.py", line 155, in _check_arg_types
raise TypeError("Can't mix strings and bytes in path components") from None
TypeError: Can't mix strings and bytes in path components
|
TypeError
|
def _peekkey_ss3(offset):
global _cbuf
if len(_cbuf) <= offset:
return Key(kc.TYPE_UNICODE, "O", kc.MOD_ALT)
cmd = _cbuf[offset]
if cmd < 0x40 or cmd >= 0x80:
return
_cbuf = _cbuf[offset:]
if chr(cmd) in _csi_ss3s:
return Key(*_csi_ss3s[chr(cmd)])
if chr(cmd) in _csi_ss3kp:
t, c, a = _csi_ss3kp[chr(cmd)]
if FLAG_CONVERTKP and a:
return Key(kc.TYPE_UNICODE, a)
else:
return Key(t, c)
|
def _peekkey_ss3(offset):
global _cbuf
if len(_cbuf) <= offset:
return Key(kc.TYPE_UNICODE, "O", kc.MOD_ALT)
cmd = _cbuf[offset]
if cmd < 0x40 or cmd >= 0x80:
return
_cbuf = _cbuf[numb:] # XXX: numb is not defined
if chr(cmd) in _csi_ss3s:
return Key(*_csi_ss3s[chr(cmd)])
if chr(cmd) in _csi_ss3kp:
t, c, a = _csi_ss3kp[chr(cmd)]
if CONVERTKP and a: # XXX: CONVERTKP is not defined
return Key(kc.TYPE_UNICODE, a)
else:
return Key(t, c)
|
https://github.com/Gallopsled/pwntools/issues/974
|
Traceback (most recent call last):
File "foo.py", line 907, in <module>
inp = sys.stdin.readline().strip().decode('utf-8')
File "/lib/python2.7/site-packages/pwnlib/term/readline.py", line 412, in readline
return readline(size)
File "/lib/python2.7/site-packages/pwnlib/term/readline.py", line 376, in readline
keymap.handle_input()
File "/lib/python2.7/site-packages/pwnlib/term/keymap.py", line 20, in handle_input
self.send(key.get())
File "/lib/python2.7/site-packages/pwnlib/term/key.py", line 164, in get
k = _peek()
File "/lib/python2.7/site-packages/pwnlib/term/key.py", line 159, in _peek
return _peek_ti() or _peek_csi() or _peek_simple()
File "/lib/python2.7/site-packages/pwnlib/term/key.py", line 393, in _peek_csi
return _peekkey_ss3(2)
File "/lib/python2.7/site-packages/pwnlib/term/key.py", line 372, in _peekkey_ss3
_cbuf = _cbuf[numb:] # XXX: numb is not defined
NameError: global name 'numb' is not defined
|
NameError
|
def __getattr__(self, desc):
try:
ds = desc.replace("gray", "bright_black").split("_")
init = ""
while ds:
d = ds[0]
try:
init += self._attributes[d]
ds.pop(0)
except KeyError:
break
def c():
bright = 0
c = ds.pop(0)
if c == "bright":
c = ds.pop(0)
if self.has_bright:
bright = 8
return self._colors[c] + bright
if ds:
if ds[0] == "on":
ds.pop(0)
init += self._bg_color(c())
else:
init += self._fg_color(c())
if len(ds):
assert ds.pop(0) == "on"
init += self._bg_color(c())
return self._decorator(desc, init)
except (IndexError, KeyError):
raise AttributeError("'module' object has no attribute %r" % desc)
|
def __getattr__(self, desc):
ds = desc.replace("gray", "bright_black").split("_")
init = ""
while ds:
d = ds[0]
try:
init += self._attributes[d]
ds.pop(0)
except KeyError:
break
def c():
bright = 0
c = ds.pop(0)
if c == "bright":
c = ds.pop(0)
if self.has_bright:
bright = 8
return self._colors[c] + bright
if ds:
if ds[0] == "on":
ds.pop(0)
init += self._bg_color(c())
else:
init += self._fg_color(c())
if len(ds):
assert ds.pop(0) == "on"
init += self._bg_color(c())
return self._decorator(desc, init)
|
https://github.com/Gallopsled/pwntools/issues/342
|
import pwnlib
pwnlib.term.text.__lol__
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "pwnlib/term/text.py", line 99, in __getattr__
init += self._fg_color(c())
File "pwnlib/term/text.py", line 93, in c
return self._colors[c] + bright
KeyError: ''
|
KeyError
|
def __init__(
self,
transforms,
bbox_params=None,
keymap=None,
update_pad_shape=False,
skip_img_without_anno=False,
):
if Compose is None:
raise RuntimeError("albumentations is not installed")
# Args will be modified later, copying it will be safer
transforms = copy.deepcopy(transforms)
if bbox_params is not None:
bbox_params = copy.deepcopy(bbox_params)
if keymap is not None:
keymap = copy.deepcopy(keymap)
self.transforms = transforms
self.filter_lost_elements = False
self.update_pad_shape = update_pad_shape
self.skip_img_without_anno = skip_img_without_anno
# A simple workaround to remove masks without boxes
if (
isinstance(bbox_params, dict)
and "label_fields" in bbox_params
and "filter_lost_elements" in bbox_params
):
self.filter_lost_elements = True
self.origin_label_fields = bbox_params["label_fields"]
bbox_params["label_fields"] = ["idx_mapper"]
del bbox_params["filter_lost_elements"]
self.bbox_params = self.albu_builder(bbox_params) if bbox_params else None
self.aug = Compose(
[self.albu_builder(t) for t in self.transforms], bbox_params=self.bbox_params
)
if not keymap:
self.keymap_to_albu = {
"img": "image",
"gt_masks": "masks",
"gt_bboxes": "bboxes",
}
else:
self.keymap_to_albu = keymap
self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()}
|
def __init__(
self,
transforms,
bbox_params=None,
keymap=None,
update_pad_shape=False,
skip_img_without_anno=False,
):
if Compose is None:
raise RuntimeError("albumentations is not installed")
self.transforms = transforms
self.filter_lost_elements = False
self.update_pad_shape = update_pad_shape
self.skip_img_without_anno = skip_img_without_anno
# A simple workaround to remove masks without boxes
if (
isinstance(bbox_params, dict)
and "label_fields" in bbox_params
and "filter_lost_elements" in bbox_params
):
self.filter_lost_elements = True
self.origin_label_fields = bbox_params["label_fields"]
bbox_params["label_fields"] = ["idx_mapper"]
del bbox_params["filter_lost_elements"]
self.bbox_params = self.albu_builder(bbox_params) if bbox_params else None
self.aug = Compose(
[self.albu_builder(t) for t in self.transforms], bbox_params=self.bbox_params
)
if not keymap:
self.keymap_to_albu = {
"img": "image",
"gt_masks": "masks",
"gt_bboxes": "bboxes",
}
else:
self.keymap_to_albu = keymap
self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()}
|
https://github.com/open-mmlab/mmdetection/issues/4599
|
Original Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/torch/utils/data/_utils/worker.py", line 178, in _worker_loop
data = fetcher.fetch(index)
File "/usr/local/lib/python3.6/dist-packages/torch/utils/data/_utils/fetch.py", line 44, in fetch
data = [self.dataset[idx] for idx in possibly_batched_index]
File "/usr/local/lib/python3.6/dist-packages/torch/utils/data/_utils/fetch.py", line 44, in <listcomp>
data = [self.dataset[idx] for idx in possibly_batched_index]
File "/content/mmdetection/mmdet/datasets/custom.py", line 193, in __getitem__
data = self.prepare_train_img(idx)
File "/content/mmdetection/mmdet/datasets/custom.py", line 216, in prepare_train_img
return self.pipeline(results)
File "/content/mmdetection/mmdet/datasets/pipelines/compose.py", line 40, in __call__
data = t(data)
File "/content/mmdetection/mmdet/datasets/pipelines/transforms.py", line 1344, in __call__
results = self.aug(**results)
File "/usr/local/lib/python3.6/dist-packages/albumentations/core/composition.py", line 170, in __call__
p.ensure_data_valid(data)
File "/usr/local/lib/python3.6/dist-packages/albumentations/augmentations/bbox_utils.py", line 37, in ensure_data_valid
raise ValueError("Your 'label_fields' are not valid - them must have same names as params in dict")
ValueError: Your 'label_fields' are not valid - them must have same names as params in dict
|
ValueError
|
def show_result(
self,
img,
result,
score_thr=0.3,
bbox_color="green",
text_color="green",
thickness=1,
font_scale=0.5,
win_name="",
show=False,
wait_time=0,
out_file=None,
):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (Tensor or tuple): The results to draw over `img`
bbox_result or (bbox_result, segm_result).
score_thr (float, optional): Minimum score of bboxes to be shown.
Default: 0.3.
bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.
text_color (str or tuple or :obj:`Color`): Color of texts.
thickness (int): Thickness of lines.
font_scale (float): Font scales of texts.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (Tensor): Only if not `show` or `out_file`
"""
img = mmcv.imread(img)
img = img.copy()
if isinstance(result, tuple):
bbox_result, segm_result = result
if isinstance(segm_result, tuple):
segm_result = segm_result[0] # ms rcnn
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
# draw segmentation masks
if segm_result is not None and len(labels) > 0: # non empty
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
np.random.seed(42)
color_masks = [
np.random.randint(0, 256, (1, 3), dtype=np.uint8)
for _ in range(max(labels) + 1)
]
for i in inds:
i = int(i)
color_mask = color_masks[labels[i]]
mask = segms[i]
img[mask] = img[mask] * 0.5 + color_mask * 0.5
# if out_file specified, do not show image in window
if out_file is not None:
show = False
# draw bounding boxes
mmcv.imshow_det_bboxes(
img,
bboxes,
labels,
class_names=self.CLASSES,
score_thr=score_thr,
bbox_color=bbox_color,
text_color=text_color,
thickness=thickness,
font_scale=font_scale,
win_name=win_name,
show=show,
wait_time=wait_time,
out_file=out_file,
)
if not (show or out_file):
warnings.warn(
"show==False and out_file is not specified, only "
"result image will be returned"
)
return img
|
def show_result(
self,
img,
result,
score_thr=0.3,
bbox_color="green",
text_color="green",
thickness=1,
font_scale=0.5,
win_name="",
show=False,
wait_time=0,
out_file=None,
):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (Tensor or tuple): The results to draw over `img`
bbox_result or (bbox_result, segm_result).
score_thr (float, optional): Minimum score of bboxes to be shown.
Default: 0.3.
bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.
text_color (str or tuple or :obj:`Color`): Color of texts.
thickness (int): Thickness of lines.
font_scale (float): Font scales of texts.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (Tensor): Only if not `show` or `out_file`
"""
img = mmcv.imread(img)
img = img.copy()
if isinstance(result, tuple):
bbox_result, segm_result = result
if isinstance(segm_result, tuple):
segm_result = segm_result[0] # ms rcnn
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
# draw segmentation masks
if segm_result is not None and len(labels) > 0: # non empty
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
np.random.seed(42)
color_masks = [
np.random.randint(0, 256, (1, 3), dtype=np.uint8)
for _ in range(max(labels) + 1)
]
for i in inds:
i = int(i)
color_mask = color_masks[labels[i]]
mask = maskUtils.decode(segms[i]).astype(np.bool)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
# if out_file specified, do not show image in window
if out_file is not None:
show = False
# draw bounding boxes
mmcv.imshow_det_bboxes(
img,
bboxes,
labels,
class_names=self.CLASSES,
score_thr=score_thr,
bbox_color=bbox_color,
text_color=text_color,
thickness=thickness,
font_scale=font_scale,
win_name=win_name,
show=show,
wait_time=wait_time,
out_file=out_file,
)
if not (show or out_file):
warnings.warn(
"show==False and out_file is not specified, only "
"result image will be returned"
)
return img
|
https://github.com/open-mmlab/mmdetection/issues/2734
|
Traceback (most recent call last):
File "demo/image_demo.py", line 26, in <module>
main()
File "demo/image_demo.py", line 22, in main
show_result_pyplot(model, args.img, result, score_thr=args.score_thr)
File "/root/mmdetection-master/mmdet/apis/inference.py", line 146, in show_result_pyplot
img = model.show_result(img, result, score_thr=score_thr, show=False)
File "/root/mmdetection-master/mmdet/models/detectors/base.py", line 211, in show_result
mask = maskUtils.decode(segms[i]).astype(np.bool)
File "/root/anaconda3/lib/python3.7/site-packages/pycocotools-2.0-py3.7-linux-x86_64.egg/pycocotools/mask.py", line 91, in decode
return _mask.decode([rleObjs])[:,:,0]
File "pycocotools/_mask.pyx", line 146, in pycocotools._mask.decode
File "pycocotools/_mask.pyx", line 128, in pycocotools._mask._frString
IndexError: only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and integer or boolean arrays are valid indices
|
IndexError
|
def show_result(self, data, result, **kwargs):
if self.with_mask:
ms_bbox_result, ms_segm_result = result
if isinstance(ms_bbox_result, dict):
result = (ms_bbox_result["ensemble"], ms_segm_result["ensemble"])
else:
if isinstance(result, dict):
result = result["ensemble"]
return super(CascadeRCNN, self).show_result(data, result, **kwargs)
|
def show_result(self, data, result, **kwargs):
if self.with_mask:
ms_bbox_result, ms_segm_result = result
if isinstance(ms_bbox_result, dict):
result = (ms_bbox_result["ensemble"], ms_segm_result["ensemble"])
else:
if isinstance(result, dict):
result = result["ensemble"]
super(CascadeRCNN, self).show_result(data, result, **kwargs)
|
https://github.com/open-mmlab/mmdetection/issues/2734
|
Traceback (most recent call last):
File "demo/image_demo.py", line 26, in <module>
main()
File "demo/image_demo.py", line 22, in main
show_result_pyplot(model, args.img, result, score_thr=args.score_thr)
File "/root/mmdetection-master/mmdet/apis/inference.py", line 146, in show_result_pyplot
img = model.show_result(img, result, score_thr=score_thr, show=False)
File "/root/mmdetection-master/mmdet/models/detectors/base.py", line 211, in show_result
mask = maskUtils.decode(segms[i]).astype(np.bool)
File "/root/anaconda3/lib/python3.7/site-packages/pycocotools-2.0-py3.7-linux-x86_64.egg/pycocotools/mask.py", line 91, in decode
return _mask.decode([rleObjs])[:,:,0]
File "pycocotools/_mask.pyx", line 146, in pycocotools._mask.decode
File "pycocotools/_mask.pyx", line 128, in pycocotools._mask._frString
IndexError: only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and integer or boolean arrays are valid indices
|
IndexError
|
def printf(self, format, *args):
"""
Inspired from numba/cgutils.py
Calls printf().
Argument `format` is expected to be a Python string.
Values to be printed are listed in `args`.
Note: There is no checking to ensure there is correct number of values
in `args` and there type matches the declaration in the format string.
"""
assert isinstance(format, str)
mod = self.mod
# Make global constant for format string
cstring = llvm_ir.IntType(8).as_pointer()
fmt_bytes = self.make_bytearray((format + "\00").encode("ascii"))
base_name = "printf_format"
count = 0
while "%s_%d" % (base_name, count) in self.mod.globals:
count += 1
global_fmt = self.global_constant("%s_%d" % (base_name, count), fmt_bytes)
fnty = llvm_ir.FunctionType(llvm_ir.IntType(32), [cstring], var_arg=True)
# Insert printf()
fn = mod.globals.get("printf", None)
if fn is None:
fn = llvm_ir.Function(mod, fnty, name="printf")
# Call
ptr_fmt = self.builder.bitcast(global_fmt, cstring)
return self.builder.call(fn, [ptr_fmt] + list(args))
|
def printf(self, format, *args):
"""
Inspired from numba/cgutils.py
Calls printf().
Argument `format` is expected to be a Python string.
Values to be printed are listed in `args`.
Note: There is no checking to ensure there is correct number of values
in `args` and there type matches the declaration in the format string.
"""
assert isinstance(format, str)
mod = self.mod
# Make global constant for format string
cstring = llvm_ir.IntType(8).as_pointer()
fmt_bytes = self.make_bytearray((format + "\00").encode("ascii"))
base_name = "printf_format"
count = 0
while self.mod.get_global("%s_%d" % (base_name, count)):
count += 1
global_fmt = self.global_constant("%s_%d" % (base_name, count), fmt_bytes)
fnty = llvm_ir.FunctionType(llvm_ir.IntType(32), [cstring], var_arg=True)
# Insert printf()
fn = mod.get_global("printf")
if fn is None:
fn = llvm_ir.Function(mod, fnty, name="printf")
# Call
ptr_fmt = self.builder.bitcast(global_fmt, cstring)
return self.builder.call(fn, [ptr_fmt] + list(args))
|
https://github.com/cea-sec/miasm/issues/630
|
TEST/ANALYSIS dse.py llvm
ERROR dse.py llvm
Traceback (most recent call last):
File "dse.py", line 157, in <module>
test(jit_engine)()
File "dse.py", line 61, in __call__
self.run()
File "dse.py", line 67, in run
self.myjit.continue_run()
File "/usr/local/lib/python2.7/dist-packages/miasm2/jitter/jitload.py", line 369, in continue_run
return self.run_iterator.next()
File "/usr/local/lib/python2.7/dist-packages/miasm2/jitter/jitload.py", line 340, in runiter_once
self.pc = self.runbloc(self.pc)
File "/usr/local/lib/python2.7/dist-packages/miasm2/jitter/jitload.py", line 297, in runbloc
return self.jit.runbloc(self.cpu, pc, self.breakpoints_handler.callbacks)
File "/usr/local/lib/python2.7/dist-packages/miasm2/jitter/jitcore.py", line 180, in runbloc
self.disbloc(lbl, cpu.vmmngr)
File "/usr/local/lib/python2.7/dist-packages/miasm2/jitter/jitcore.py", line 164, in disbloc
self.add_bloc(cur_block)
File "/usr/local/lib/python2.7/dist-packages/miasm2/jitter/jitcore_llvm.py", line 92, in add_bloc
func.from_asmblock(block)
File "/usr/local/lib/python2.7/dist-packages/miasm2/jitter/llvmconvert.py", line 1344, in from_asmblock
self.gen_pre_code(instr_attrib)
File "/usr/local/lib/python2.7/dist-packages/miasm2/jitter/llvmconvert.py", line 1026, in gen_pre_code
instr_attrib.instr))
File "/usr/local/lib/python2.7/dist-packages/miasm2/jitter/llvmconvert.py", line 574, in printf
while self.mod.get_global("%s_%d" % (base_name, count)):
File "/usr/local/lib/python2.7/dist-packages/llvmlite/ir/module.py", line 138, in get_global
return self.globals[name]
KeyError: 'printf_format_0'
|
KeyError
|
def parse_reports(self):
"""Find Picard OxoGMetrics reports and parse their data"""
# Set up vars
self.picard_OxoGMetrics_data = dict()
# Go through logs and find Metrics
for f in self.find_log_files("picard/oxogmetrics", filehandles=True):
# We use lists to make sure that we don't overwrite when no data will be parsed
parsed_data = list()
sample_names = list()
s_files = list()
s_name = None
keys = None
for l in f["f"]:
# New log starting
if (
"CollectOxoGMetrics" in l or "ConvertSequencingArtifactToOxoG" in l
) and "INPUT" in l:
s_name = None
keys = None
context_col = None
# Pull sample name from input
fn_search = re.search(
r"INPUT(?:_BASE)?(?:=|\s+)(\[?[^\s]+\]?)", l, flags=re.IGNORECASE
)
if fn_search:
s_name = os.path.basename(fn_search.group(1).strip("[]"))
s_name = self.clean_s_name(s_name, f["root"])
parsed_data.append(dict())
sample_names.append(s_name)
s_files.append(f)
if s_name is not None:
if "CollectOxoGMetrics$CpcgMetrics" in l and "## METRICS CLASS" in l:
keys = f["f"].readline().strip("\n").split("\t")
context_col = keys.index("CONTEXT")
elif keys:
vals = l.strip("\n").split("\t")
if len(vals) == len(keys) and context_col is not None:
context = vals[context_col]
parsed_data[-1][context] = dict()
for i, k in enumerate(keys):
k = k.strip()
try:
parsed_data[-1][context][k] = float(vals[i])
except ValueError:
vals[i] = vals[i].strip()
parsed_data[-1][context][k] = vals[i]
else:
s_name = None
keys = None
# Remove empty dictionaries
for idx, s_name in enumerate(sample_names):
if len(parsed_data[idx]) > 0:
if s_name in self.picard_OxoGMetrics_data:
log.debug(
"Duplicate sample name found in {}! Overwriting: {}".format(
s_files[idx], s_name
)
)
self.add_data_source(s_files[idx], s_name, section="OxoGMetrics")
self.picard_OxoGMetrics_data[s_name] = parsed_data[idx]
# Filter to strip out ignored sample names
self.picard_OxoGMetrics_data = self.ignore_samples(self.picard_OxoGMetrics_data)
if len(self.picard_OxoGMetrics_data) > 0:
# Write parsed data to a file
# Collapse into 2D structure with sample_context keys
print_data = {
"{}_{}".format(s, c): v
for s in self.picard_OxoGMetrics_data.keys()
for c, v in self.picard_OxoGMetrics_data[s].items()
}
self.write_data_file(print_data, "multiqc_picard_OxoGMetrics")
# Add to general stats table
data = dict()
for s_name in self.picard_OxoGMetrics_data:
data[s_name] = dict()
try:
data[s_name]["CCG_OXIDATION_ERROR_RATE"] = self.picard_OxoGMetrics_data[
s_name
]["CCG"]["OXIDATION_ERROR_RATE"]
except KeyError:
log.warning(
"Couldn't find picard CCG oxidation error rate for {}".format(
s_name
)
)
self.general_stats_headers["CCG_OXIDATION_ERROR_RATE"] = {
"title": "CCG Oxidation",
"description": "CCG-CAG Oxidation Error Rate",
"max": 1,
"min": 0,
"suffix": "%",
"format": "{:,.0f}",
"scale": "RdYlGn-rev",
"modify": lambda x: self.multiply_hundred(x),
}
for s_name in data:
if s_name not in self.general_stats_data:
self.general_stats_data[s_name] = dict()
self.general_stats_data[s_name].update(data[s_name])
# Return the number of detected samples to the parent module
return len(self.picard_OxoGMetrics_data)
|
def parse_reports(self):
"""Find Picard OxoGMetrics reports and parse their data"""
# Set up vars
self.picard_OxoGMetrics_data = dict()
# Go through logs and find Metrics
for f in self.find_log_files("picard/oxogmetrics", filehandles=True):
# We use lists to make sure that we don't overwrite when no data will be parsed
parsed_data = list()
sample_names = list()
s_files = list()
s_name = None
keys = None
for l in f["f"]:
# New log starting
if (
"CollectOxoGMetrics" in l or "ConvertSequencingArtifactToOxoG" in l
) and "INPUT" in l:
s_name = None
keys = None
context_col = None
# Pull sample name from input
fn_search = re.search(
r"INPUT(?:=|\s+)(\[?[^\s]+\]?)", l, flags=re.IGNORECASE
)
if fn_search:
s_name = os.path.basename(fn_search.group(1).strip("[]"))
s_name = self.clean_s_name(s_name, f["root"])
parsed_data.append(dict())
sample_names.append(s_name)
s_files.append(f)
if s_name is not None:
if "CollectOxoGMetrics$CpcgMetrics" in l and "## METRICS CLASS" in l:
keys = f["f"].readline().strip("\n").split("\t")
context_col = keys.index("CONTEXT")
elif keys:
vals = l.strip("\n").split("\t")
if len(vals) == len(keys) and context_col is not None:
context = vals[context_col]
parsed_data[-1][context] = dict()
for i, k in enumerate(keys):
k = k.strip()
try:
parsed_data[-1][context][k] = float(vals[i])
except ValueError:
vals[i] = vals[i].strip()
parsed_data[-1][context][k] = vals[i]
else:
s_name = None
keys = None
# Remove empty dictionaries
for idx, s_name in enumerate(sample_names):
if len(parsed_data[idx]) > 0:
if s_name in self.picard_OxoGMetrics_data:
log.debug(
"Duplicate sample name found in {}! Overwriting: {}".format(
s_files[idx], s_name
)
)
self.add_data_source(s_files[idx], s_name, section="OxoGMetrics")
self.picard_OxoGMetrics_data[s_name] = parsed_data[idx]
# Filter to strip out ignored sample names
self.picard_OxoGMetrics_data = self.ignore_samples(self.picard_OxoGMetrics_data)
if len(self.picard_OxoGMetrics_data) > 0:
# Write parsed data to a file
# Collapse into 2D structure with sample_context keys
print_data = {
"{}_{}".format(s, c): v
for s in self.picard_OxoGMetrics_data.keys()
for c, v in self.picard_OxoGMetrics_data[s].items()
}
self.write_data_file(print_data, "multiqc_picard_OxoGMetrics")
# Add to general stats table
data = dict()
for s_name in self.picard_OxoGMetrics_data:
data[s_name] = dict()
try:
data[s_name]["CCG_OXIDATION_ERROR_RATE"] = self.picard_OxoGMetrics_data[
s_name
]["CCG"]["OXIDATION_ERROR_RATE"]
except KeyError:
log.warning(
"Couldn't find picard CCG oxidation error rate for {}".format(
s_name
)
)
self.general_stats_headers["CCG_OXIDATION_ERROR_RATE"] = {
"title": "CCG Oxidation",
"description": "CCG-CAG Oxidation Error Rate",
"max": 1,
"min": 0,
"suffix": "%",
"format": "{:,.0f}",
"scale": "RdYlGn-rev",
"modify": lambda x: self.multiply_hundred(x),
}
for s_name in data:
if s_name not in self.general_stats_data:
self.general_stats_data[s_name] = dict()
self.general_stats_data[s_name].update(data[s_name])
# Return the number of detected samples to the parent module
return len(self.picard_OxoGMetrics_data)
|
https://github.com/ewels/MultiQC/issues/1265
|
$ multiqc -f abcd_CL1005827_L01_R1-indexcov.ped -o OUT
[INFO ] multiqc : This is MultiQC v1.9
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching : work/qc/abcd_CL1005827_L01_R1/coverage/indexcov/abcd_CL1005827_L01_R1-indexcov.ped
[INFO ] bcbio : Found 0 reports
[INFO ] goleft_indexcov : Found 1 samples
[ERROR ] multiqc : Oops! The 'goleft_indexcov' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
work/qc/abcd_CL1005827_L01_R1/coverage/indexcov/abcd_CL1005827_L01_R1-indexcov.ped
============================================================
Module goleft_indexcov raised an exception: Traceback (most recent call last):
File "anaconda/lib/python3.6/site-packages/multiqc/plots/linegraph.py", line 173, in plot
return get_template_mod().linegraph(plotdata, pconfig)
AttributeError: module 'multiqc.templates.default' has no attribute 'linegraph'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "anaconda/lib/python3.6/site-packages/multiqc/multiqc.py", line 569, in run
output = mod()
File "anaconda/lib/python3.6/site-packages/multiqc/modules/goleft_indexcov/goleft_indexcov.py", line 47, in __init__
self.roc_plot()
File "anaconda/lib/python3.6/site-packages/multiqc/modules/goleft_indexcov/goleft_indexcov.py", line 117, in roc_plot
plot = linegraph.plot([self.roc_plot_data[c] for c in chroms], pconfig)
File "anaconda/lib/python3.6/site-packages/multiqc/plots/linegraph.py", line 175, in plot
if config.plots_force_flat or (not config.plots_force_interactive and len(plotdata[0]) > config.plots_flat_numseries):
IndexError: list index out of range
============================================================
[INFO ] multiqc : Compressing plot data
[WARNING] multiqc : Deleting : OUT/multiqc_report.html (-f was specified)
[WARNING] multiqc : Deleting : OUT/multiqc_data (-f was specified)
[INFO ] multiqc : Report : OUT/multiqc_report.html
[INFO ] multiqc : Data : OUT/multiqc_data
[INFO ] multiqc : MultiQC complete
|
AttributeError
|
def plot(data, pconfig=None):
"""Plot a line graph with X,Y data.
:param data: 2D dict, first keys as sample names, then x:y data pairs
:param pconfig: optional dict with config key:value pairs. See CONTRIBUTING.md
:return: HTML and JS, ready to be inserted into the page
"""
# Don't just use {} as the default argument as it's mutable. See:
# http://python-guide-pt-br.readthedocs.io/en/latest/writing/gotchas/
if pconfig is None:
pconfig = {}
# Allow user to overwrite any given config for this plot
if "id" in pconfig and pconfig["id"] and pconfig["id"] in config.custom_plot_config:
for k, v in config.custom_plot_config[pconfig["id"]].items():
pconfig[k] = v
# Given one dataset - turn it into a list
if type(data) is not list:
data = [data]
# Validate config if linting
if config.lint:
# Get module name
modname = ""
callstack = inspect.stack()
for n in callstack:
if "multiqc/modules/" in n[1] and "base_module.py" not in n[1]:
callpath = n[1].split("multiqc/modules/", 1)[-1]
modname = ">{}< ".format(callpath)
break
# Look for essential missing pconfig keys
for k in ["id", "title", "ylab"]:
if k not in pconfig:
errmsg = "LINT: {}Linegraph pconfig was missing key '{}'".format(
modname, k
)
logger.error(errmsg)
report.lint_errors.append(errmsg)
# Check plot title format
if not re.match(r"^[^:]*\S: \S[^:]*$", pconfig.get("title", "")):
errmsg = "LINT: {} Linegraph title did not match format 'Module: Plot Name' (found '{}')".format(
modname, pconfig.get("title", "")
)
logger.error(errmsg)
report.lint_errors.append(errmsg)
# Smooth dataset if requested in config
if pconfig.get("smooth_points", None) is not None:
sumcounts = pconfig.get("smooth_points_sumcounts", True)
for i, d in enumerate(data):
if type(sumcounts) is list:
sumc = sumcounts[i]
else:
sumc = sumcounts
data[i] = smooth_line_data(d, pconfig["smooth_points"], sumc)
# Add sane plotting config defaults
for idx, yp in enumerate(pconfig.get("yPlotLines", [])):
pconfig["yPlotLines"][idx]["width"] = pconfig["yPlotLines"][idx].get("width", 2)
# Add initial axis labels if defined in `data_labels` but not main config
if pconfig.get("ylab") is None:
try:
pconfig["ylab"] = pconfig["data_labels"][0]["ylab"]
except (KeyError, IndexError):
pass
if pconfig.get("xlab") is None:
try:
pconfig["xlab"] = pconfig["data_labels"][0]["xlab"]
except (KeyError, IndexError):
pass
# Generate the data dict structure expected by HighCharts series
plotdata = list()
for data_index, d in enumerate(data):
thisplotdata = list()
for s in sorted(d.keys()):
# Ensure any overwritting conditionals from data_labels (e.g. ymax) are taken in consideration
series_config = pconfig.copy()
if (
"data_labels" in pconfig
and type(pconfig["data_labels"][data_index]) is dict
): # if not a dict: only dataset name is provided
series_config.update(pconfig["data_labels"][data_index])
pairs = list()
maxval = 0
if "categories" in series_config:
pconfig["categories"] = list()
for k in d[s].keys():
pconfig["categories"].append(k)
pairs.append(d[s][k])
maxval = max(maxval, d[s][k])
else:
for k in sorted(d[s].keys()):
if k is not None:
if "xmax" in series_config and float(k) > float(
series_config["xmax"]
):
continue
if "xmin" in series_config and float(k) < float(
series_config["xmin"]
):
continue
if d[s][k] is not None:
if "ymax" in series_config and float(d[s][k]) > float(
series_config["ymax"]
):
continue
if "ymin" in series_config and float(d[s][k]) < float(
series_config["ymin"]
):
continue
pairs.append([k, d[s][k]])
try:
maxval = max(maxval, d[s][k])
except TypeError:
pass
if maxval > 0 or series_config.get("hide_empty") is not True:
this_series = {"name": s, "data": pairs}
try:
this_series["color"] = series_config["colors"][s]
except:
pass
thisplotdata.append(this_series)
plotdata.append(thisplotdata)
# Add on annotation data series
try:
if pconfig.get("extra_series"):
extra_series = pconfig["extra_series"]
if type(pconfig["extra_series"]) == dict:
extra_series = [[pconfig["extra_series"]]]
elif (
type(pconfig["extra_series"]) == list
and type(pconfig["extra_series"][0]) == dict
):
extra_series = [pconfig["extra_series"]]
for i, es in enumerate(extra_series):
for s in es:
plotdata[i].append(s)
except (KeyError, IndexError):
pass
# Make a plot - template custom, or interactive or flat
try:
return get_template_mod().linegraph(plotdata, pconfig)
except (AttributeError, TypeError):
if config.plots_force_flat or (
not config.plots_force_interactive
and plotdata
and len(plotdata[0]) > config.plots_flat_numseries
):
try:
return matplotlib_linegraph(plotdata, pconfig)
except Exception as e:
logger.error(
"############### Error making MatPlotLib figure! Falling back to HighCharts."
)
logger.debug(e, exc_info=True)
return highcharts_linegraph(plotdata, pconfig)
else:
# Use MatPlotLib to generate static plots if requested
if config.export_plots:
matplotlib_linegraph(plotdata, pconfig)
# Return HTML for HighCharts dynamic plot
return highcharts_linegraph(plotdata, pconfig)
|
def plot(data, pconfig=None):
"""Plot a line graph with X,Y data.
:param data: 2D dict, first keys as sample names, then x:y data pairs
:param pconfig: optional dict with config key:value pairs. See CONTRIBUTING.md
:return: HTML and JS, ready to be inserted into the page
"""
# Don't just use {} as the default argument as it's mutable. See:
# http://python-guide-pt-br.readthedocs.io/en/latest/writing/gotchas/
if pconfig is None:
pconfig = {}
# Allow user to overwrite any given config for this plot
if "id" in pconfig and pconfig["id"] and pconfig["id"] in config.custom_plot_config:
for k, v in config.custom_plot_config[pconfig["id"]].items():
pconfig[k] = v
# Given one dataset - turn it into a list
if type(data) is not list:
data = [data]
# Validate config if linting
if config.lint:
# Get module name
modname = ""
callstack = inspect.stack()
for n in callstack:
if "multiqc/modules/" in n[1] and "base_module.py" not in n[1]:
callpath = n[1].split("multiqc/modules/", 1)[-1]
modname = ">{}< ".format(callpath)
break
# Look for essential missing pconfig keys
for k in ["id", "title", "ylab"]:
if k not in pconfig:
errmsg = "LINT: {}Linegraph pconfig was missing key '{}'".format(
modname, k
)
logger.error(errmsg)
report.lint_errors.append(errmsg)
# Check plot title format
if not re.match(r"^[^:]*\S: \S[^:]*$", pconfig.get("title", "")):
errmsg = "LINT: {} Linegraph title did not match format 'Module: Plot Name' (found '{}')".format(
modname, pconfig.get("title", "")
)
logger.error(errmsg)
report.lint_errors.append(errmsg)
# Smooth dataset if requested in config
if pconfig.get("smooth_points", None) is not None:
sumcounts = pconfig.get("smooth_points_sumcounts", True)
for i, d in enumerate(data):
if type(sumcounts) is list:
sumc = sumcounts[i]
else:
sumc = sumcounts
data[i] = smooth_line_data(d, pconfig["smooth_points"], sumc)
# Add sane plotting config defaults
for idx, yp in enumerate(pconfig.get("yPlotLines", [])):
pconfig["yPlotLines"][idx]["width"] = pconfig["yPlotLines"][idx].get("width", 2)
# Add initial axis labels if defined in `data_labels` but not main config
if pconfig.get("ylab") is None:
try:
pconfig["ylab"] = pconfig["data_labels"][0]["ylab"]
except (KeyError, IndexError):
pass
if pconfig.get("xlab") is None:
try:
pconfig["xlab"] = pconfig["data_labels"][0]["xlab"]
except (KeyError, IndexError):
pass
# Generate the data dict structure expected by HighCharts series
plotdata = list()
for data_index, d in enumerate(data):
thisplotdata = list()
for s in sorted(d.keys()):
# Ensure any overwritting conditionals from data_labels (e.g. ymax) are taken in consideration
series_config = pconfig.copy()
if (
"data_labels" in pconfig
and type(pconfig["data_labels"][data_index]) is dict
): # if not a dict: only dataset name is provided
series_config.update(pconfig["data_labels"][data_index])
pairs = list()
maxval = 0
if "categories" in series_config:
pconfig["categories"] = list()
for k in d[s].keys():
pconfig["categories"].append(k)
pairs.append(d[s][k])
maxval = max(maxval, d[s][k])
else:
for k in sorted(d[s].keys()):
if k is not None:
if "xmax" in series_config and float(k) > float(
series_config["xmax"]
):
continue
if "xmin" in series_config and float(k) < float(
series_config["xmin"]
):
continue
if d[s][k] is not None:
if "ymax" in series_config and float(d[s][k]) > float(
series_config["ymax"]
):
continue
if "ymin" in series_config and float(d[s][k]) < float(
series_config["ymin"]
):
continue
pairs.append([k, d[s][k]])
try:
maxval = max(maxval, d[s][k])
except TypeError:
pass
if maxval > 0 or series_config.get("hide_empty") is not True:
this_series = {"name": s, "data": pairs}
try:
this_series["color"] = series_config["colors"][s]
except:
pass
thisplotdata.append(this_series)
plotdata.append(thisplotdata)
# Add on annotation data series
try:
if pconfig.get("extra_series"):
extra_series = pconfig["extra_series"]
if type(pconfig["extra_series"]) == dict:
extra_series = [[pconfig["extra_series"]]]
elif (
type(pconfig["extra_series"]) == list
and type(pconfig["extra_series"][0]) == dict
):
extra_series = [pconfig["extra_series"]]
for i, es in enumerate(extra_series):
for s in es:
plotdata[i].append(s)
except (KeyError, IndexError):
pass
# Make a plot - template custom, or interactive or flat
try:
return get_template_mod().linegraph(plotdata, pconfig)
except (AttributeError, TypeError):
if config.plots_force_flat or (
not config.plots_force_interactive
and len(plotdata[0]) > config.plots_flat_numseries
):
try:
return matplotlib_linegraph(plotdata, pconfig)
except Exception as e:
logger.error(
"############### Error making MatPlotLib figure! Falling back to HighCharts."
)
logger.debug(e, exc_info=True)
return highcharts_linegraph(plotdata, pconfig)
else:
# Use MatPlotLib to generate static plots if requested
if config.export_plots:
matplotlib_linegraph(plotdata, pconfig)
# Return HTML for HighCharts dynamic plot
return highcharts_linegraph(plotdata, pconfig)
|
https://github.com/ewels/MultiQC/issues/1265
|
$ multiqc -f abcd_CL1005827_L01_R1-indexcov.ped -o OUT
[INFO ] multiqc : This is MultiQC v1.9
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching : work/qc/abcd_CL1005827_L01_R1/coverage/indexcov/abcd_CL1005827_L01_R1-indexcov.ped
[INFO ] bcbio : Found 0 reports
[INFO ] goleft_indexcov : Found 1 samples
[ERROR ] multiqc : Oops! The 'goleft_indexcov' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
work/qc/abcd_CL1005827_L01_R1/coverage/indexcov/abcd_CL1005827_L01_R1-indexcov.ped
============================================================
Module goleft_indexcov raised an exception: Traceback (most recent call last):
File "anaconda/lib/python3.6/site-packages/multiqc/plots/linegraph.py", line 173, in plot
return get_template_mod().linegraph(plotdata, pconfig)
AttributeError: module 'multiqc.templates.default' has no attribute 'linegraph'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "anaconda/lib/python3.6/site-packages/multiqc/multiqc.py", line 569, in run
output = mod()
File "anaconda/lib/python3.6/site-packages/multiqc/modules/goleft_indexcov/goleft_indexcov.py", line 47, in __init__
self.roc_plot()
File "anaconda/lib/python3.6/site-packages/multiqc/modules/goleft_indexcov/goleft_indexcov.py", line 117, in roc_plot
plot = linegraph.plot([self.roc_plot_data[c] for c in chroms], pconfig)
File "anaconda/lib/python3.6/site-packages/multiqc/plots/linegraph.py", line 175, in plot
if config.plots_force_flat or (not config.plots_force_interactive and len(plotdata[0]) > config.plots_flat_numseries):
IndexError: list index out of range
============================================================
[INFO ] multiqc : Compressing plot data
[WARNING] multiqc : Deleting : OUT/multiqc_report.html (-f was specified)
[WARNING] multiqc : Deleting : OUT/multiqc_data (-f was specified)
[INFO ] multiqc : Report : OUT/multiqc_report.html
[INFO ] multiqc : Data : OUT/multiqc_data
[INFO ] multiqc : MultiQC complete
|
AttributeError
|
def plot(data, pconfig=None):
"""Plot a line graph with X,Y data.
:param data: 2D dict, first keys as sample names, then x:y data pairs
:param pconfig: optional dict with config key:value pairs. See CONTRIBUTING.md
:return: HTML and JS, ready to be inserted into the page
"""
# Don't just use {} as the default argument as it's mutable. See:
# http://python-guide-pt-br.readthedocs.io/en/latest/writing/gotchas/
if pconfig is None:
pconfig = {}
# Allow user to overwrite any given config for this plot
if "id" in pconfig and pconfig["id"] and pconfig["id"] in config.custom_plot_config:
for k, v in config.custom_plot_config[pconfig["id"]].items():
pconfig[k] = v
# Given one dataset - turn it into a list
if type(data) is not list:
data = [data]
# Validate config if linting
if config.lint:
# Get module name
modname = ""
callstack = inspect.stack()
for n in callstack:
if "multiqc/modules/" in n[1] and "base_module.py" not in n[1]:
callpath = n[1].split("multiqc/modules/", 1)[-1]
modname = ">{}< ".format(callpath)
break
# Look for essential missing pconfig keys
for k in ["id", "title", "ylab"]:
if k not in pconfig:
errmsg = "LINT: {}Linegraph pconfig was missing key '{}'".format(
modname, k
)
logger.error(errmsg)
report.lint_errors.append(errmsg)
# Check plot title format
if not re.match(r"^[^:]*\S: \S[^:]*$", pconfig.get("title", "")):
errmsg = "LINT: {} Linegraph title did not match format 'Module: Plot Name' (found '{}')".format(
modname, pconfig.get("title", "")
)
logger.error(errmsg)
report.lint_errors.append(errmsg)
# Smooth dataset if requested in config
if pconfig.get("smooth_points", None) is not None:
sumcounts = pconfig.get("smooth_points_sumcounts", True)
for i, d in enumerate(data):
if type(sumcounts) is list:
sumc = sumcounts[i]
else:
sumc = sumcounts
data[i] = smooth_line_data(d, pconfig["smooth_points"], sumc)
# Add sane plotting config defaults
for idx, yp in enumerate(pconfig.get("yPlotLines", [])):
pconfig["yPlotLines"][idx]["width"] = pconfig["yPlotLines"][idx].get("width", 2)
# Add initial axis labels if defined in `data_labels` but not main config
if pconfig.get("ylab") is None:
try:
pconfig["ylab"] = pconfig["data_labels"][0]["ylab"]
except (KeyError, IndexError):
pass
if pconfig.get("xlab") is None:
try:
pconfig["xlab"] = pconfig["data_labels"][0]["xlab"]
except (KeyError, IndexError):
pass
# Generate the data dict structure expected by HighCharts series
plotdata = list()
for data_index, d in enumerate(data):
thisplotdata = list()
for s in sorted(d.keys()):
# Ensure any overwritting conditionals from data_labels (e.g. ymax) are taken in consideration
series_config = pconfig.copy()
if (
"data_labels" in pconfig
and type(pconfig["data_labels"][data_index]) is dict
): # if not a dict: only dataset name is provided
series_config.update(pconfig["data_labels"][data_index])
pairs = list()
maxval = 0
if "categories" in series_config:
pconfig["categories"] = list()
for k in d[s].keys():
pconfig["categories"].append(k)
pairs.append(d[s][k])
maxval = max(maxval, d[s][k])
else:
for k in sorted(d[s].keys()):
if k is not None:
if "xmax" in series_config and float(k) > float(
series_config["xmax"]
):
continue
if "xmin" in series_config and float(k) < float(
series_config["xmin"]
):
continue
if d[s][k] is not None:
if "ymax" in series_config and float(d[s][k]) > float(
series_config["ymax"]
):
continue
if "ymin" in series_config and float(d[s][k]) < float(
series_config["ymin"]
):
continue
pairs.append([k, d[s][k]])
try:
maxval = max(maxval, d[s][k])
except TypeError:
pass
if maxval > 0 or series_config.get("hide_empty") is not True:
this_series = {"name": s, "data": pairs}
try:
this_series["color"] = series_config["colors"][s]
except:
pass
thisplotdata.append(this_series)
plotdata.append(thisplotdata)
# Add on annotation data series
try:
if pconfig.get("extra_series"):
extra_series = pconfig["extra_series"]
if type(pconfig["extra_series"]) == dict:
extra_series = [[pconfig["extra_series"]]]
elif (
type(pconfig["extra_series"]) == list
and type(pconfig["extra_series"][0]) == dict
):
extra_series = [pconfig["extra_series"]]
for i, es in enumerate(extra_series):
for s in es:
plotdata[i].append(s)
except (KeyError, IndexError):
pass
# Make a plot - template custom, or interactive or flat
try:
return get_template_mod().linegraph(plotdata, pconfig)
except (AttributeError, TypeError):
if config.plots_force_flat or (
not config.plots_force_interactive
and plotdata
and len(plotdata[0]) > config.plots_flat_numseries
):
try:
return matplotlib_linegraph(plotdata, pconfig)
except Exception as e:
logger.error(
"############### Error making MatPlotLib figure! Falling back to HighCharts."
)
logger.debug(e, exc_info=True)
return highcharts_linegraph(plotdata, pconfig)
else:
# Use MatPlotLib to generate static plots if requested
if config.export_plots:
matplotlib_linegraph(plotdata, pconfig)
# Return HTML for HighCharts dynamic plot
return highcharts_linegraph(plotdata, pconfig)
|
def plot(data, pconfig=None):
"""Plot a line graph with X,Y data.
:param data: 2D dict, first keys as sample names, then x:y data pairs
:param pconfig: optional dict with config key:value pairs. See CONTRIBUTING.md
:return: HTML and JS, ready to be inserted into the page
"""
# Don't just use {} as the default argument as it's mutable. See:
# http://python-guide-pt-br.readthedocs.io/en/latest/writing/gotchas/
if pconfig is None:
pconfig = {}
# Allow user to overwrite any given config for this plot
if "id" in pconfig and pconfig["id"] and pconfig["id"] in config.custom_plot_config:
for k, v in config.custom_plot_config[pconfig["id"]].items():
pconfig[k] = v
# Given one dataset - turn it into a list
if type(data) is not list:
data = [data]
# Validate config if linting
if config.lint:
# Get module name
modname = ""
callstack = inspect.stack()
for n in callstack:
if "multiqc/modules/" in n[1] and "base_module.py" not in n[1]:
callpath = n[1].split("multiqc/modules/", 1)[-1]
modname = ">{}< ".format(callpath)
break
# Look for essential missing pconfig keys
for k in ["id", "title", "ylab"]:
if k not in pconfig:
errmsg = "LINT: {}Linegraph pconfig was missing key '{}'".format(
modname, k
)
logger.error(errmsg)
report.lint_errors.append(errmsg)
# Check plot title format
if not re.match(r"^[^:]*\S: \S[^:]*$", pconfig.get("title", "")):
errmsg = "LINT: {} Linegraph title did not match format 'Module: Plot Name' (found '{}')".format(
modname, pconfig.get("title", "")
)
logger.error(errmsg)
report.lint_errors.append(errmsg)
# Smooth dataset if requested in config
if pconfig.get("smooth_points", None) is not None:
sumcounts = pconfig.get("smooth_points_sumcounts", True)
for i, d in enumerate(data):
if type(sumcounts) is list:
sumc = sumcounts[i]
else:
sumc = sumcounts
data[i] = smooth_line_data(d, pconfig["smooth_points"], sumc)
# Add sane plotting config defaults
for idx, yp in enumerate(pconfig.get("yPlotLines", [])):
pconfig["yPlotLines"][idx]["width"] = pconfig["yPlotLines"][idx].get("width", 2)
# Add initial axis labels if defined in `data_labels` but not main config
if pconfig.get("ylab") is None:
try:
pconfig["ylab"] = pconfig["data_labels"][0]["ylab"]
except (KeyError, IndexError):
pass
if pconfig.get("xlab") is None:
try:
pconfig["xlab"] = pconfig["data_labels"][0]["xlab"]
except (KeyError, IndexError):
pass
# Generate the data dict structure expected by HighCharts series
plotdata = list()
for data_index, d in enumerate(data):
thisplotdata = list()
for s in sorted(d.keys()):
# Ensure any overwritting conditionals from data_labels (e.g. ymax) are taken in consideration
series_config = pconfig.copy()
if (
"data_labels" in pconfig
and type(pconfig["data_labels"][data_index]) is dict
): # if not a dict: only dataset name is provided
series_config.update(pconfig["data_labels"][data_index])
pairs = list()
maxval = 0
if "categories" in series_config:
pconfig["categories"] = list()
for k in d[s].keys():
pconfig["categories"].append(k)
pairs.append(d[s][k])
maxval = max(maxval, d[s][k])
else:
for k in sorted(d[s].keys()):
if k is not None:
if "xmax" in series_config and float(k) > float(
series_config["xmax"]
):
continue
if "xmin" in series_config and float(k) < float(
series_config["xmin"]
):
continue
if d[s][k] is not None:
if "ymax" in series_config and float(d[s][k]) > float(
series_config["ymax"]
):
continue
if "ymin" in series_config and float(d[s][k]) < float(
series_config["ymin"]
):
continue
pairs.append([k, d[s][k]])
try:
maxval = max(maxval, d[s][k])
except TypeError:
pass
if maxval > 0 or series_config.get("hide_empty") is not True:
this_series = {"name": s, "data": pairs}
try:
this_series["color"] = series_config["colors"][s]
except:
pass
thisplotdata.append(this_series)
plotdata.append(thisplotdata)
# Add on annotation data series
try:
if pconfig.get("extra_series"):
extra_series = pconfig["extra_series"]
if type(pconfig["extra_series"]) == dict:
extra_series = [[pconfig["extra_series"]]]
elif (
type(pconfig["extra_series"]) == list
and type(pconfig["extra_series"][0]) == dict
):
extra_series = [pconfig["extra_series"]]
for i, es in enumerate(extra_series):
for s in es:
plotdata[i].append(s)
except (KeyError, IndexError):
pass
# Make a plot - template custom, or interactive or flat
try:
return get_template_mod().linegraph(plotdata, pconfig)
except (AttributeError, TypeError):
if config.plots_force_flat or (
not config.plots_force_interactive
and len(plotdata[0]) > config.plots_flat_numseries
):
try:
return matplotlib_linegraph(plotdata, pconfig)
except Exception as e:
logger.error(
"############### Error making MatPlotLib figure! Falling back to HighCharts."
)
logger.debug(e, exc_info=True)
return highcharts_linegraph(plotdata, pconfig)
else:
# Use MatPlotLib to generate static plots if requested
if config.export_plots:
matplotlib_linegraph(plotdata, pconfig)
# Return HTML for HighCharts dynamic plot
return highcharts_linegraph(plotdata, pconfig)
|
https://github.com/ewels/MultiQC/issues/1265
|
$ multiqc -f abcd_CL1005827_L01_R1-indexcov.ped -o OUT
[INFO ] multiqc : This is MultiQC v1.9
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching : work/qc/abcd_CL1005827_L01_R1/coverage/indexcov/abcd_CL1005827_L01_R1-indexcov.ped
[INFO ] bcbio : Found 0 reports
[INFO ] goleft_indexcov : Found 1 samples
[ERROR ] multiqc : Oops! The 'goleft_indexcov' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
work/qc/abcd_CL1005827_L01_R1/coverage/indexcov/abcd_CL1005827_L01_R1-indexcov.ped
============================================================
Module goleft_indexcov raised an exception: Traceback (most recent call last):
File "anaconda/lib/python3.6/site-packages/multiqc/plots/linegraph.py", line 173, in plot
return get_template_mod().linegraph(plotdata, pconfig)
AttributeError: module 'multiqc.templates.default' has no attribute 'linegraph'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "anaconda/lib/python3.6/site-packages/multiqc/multiqc.py", line 569, in run
output = mod()
File "anaconda/lib/python3.6/site-packages/multiqc/modules/goleft_indexcov/goleft_indexcov.py", line 47, in __init__
self.roc_plot()
File "anaconda/lib/python3.6/site-packages/multiqc/modules/goleft_indexcov/goleft_indexcov.py", line 117, in roc_plot
plot = linegraph.plot([self.roc_plot_data[c] for c in chroms], pconfig)
File "anaconda/lib/python3.6/site-packages/multiqc/plots/linegraph.py", line 175, in plot
if config.plots_force_flat or (not config.plots_force_interactive and len(plotdata[0]) > config.plots_flat_numseries):
IndexError: list index out of range
============================================================
[INFO ] multiqc : Compressing plot data
[WARNING] multiqc : Deleting : OUT/multiqc_report.html (-f was specified)
[WARNING] multiqc : Deleting : OUT/multiqc_data (-f was specified)
[INFO ] multiqc : Report : OUT/multiqc_report.html
[INFO ] multiqc : Data : OUT/multiqc_data
[INFO ] multiqc : MultiQC complete
|
AttributeError
|
def __init__(self):
# Initialise the parent object
super(MultiqcModule, self).__init__(
name="Kraken",
anchor="kraken",
href="https://ccb.jhu.edu/software/kraken/",
info="is a taxonomic classification tool that uses exact k-mer matches to find the lowest common ancestor (LCA) of a given sequence.",
)
self.t_ranks = OrderedDict()
self.t_ranks["S"] = "Species"
self.t_ranks["G"] = "Genus"
self.t_ranks["F"] = "Family"
self.t_ranks["O"] = "Order"
self.t_ranks["C"] = "Class"
self.t_ranks["P"] = "Phylum"
self.t_ranks["K"] = "Kingdom"
self.t_ranks["D"] = "Domain"
self.t_ranks["R"] = "Root"
# self.t_ranks['U'] = 'Unclassified'
# Find and load any kraken reports
self.kraken_raw_data = dict()
for f in self.find_log_files("kraken", filehandles=True):
self.parse_logs(f)
# Filter to strip out ignored sample names
self.kraken_raw_data = self.ignore_samples(self.kraken_raw_data)
if len(self.kraken_raw_data) == 0:
raise UserWarning
log.info("Found {} reports".format(len(self.kraken_raw_data)))
# Sum counts across all samples, so that we can pick top 5
self.kraken_total_pct = dict()
self.kraken_total_counts = dict()
self.kraken_sample_total_readcounts = dict()
self.sample_total_readcounts()
self.sum_sample_counts()
self.general_stats_cols()
self.top_five_barplot()
|
def __init__(self):
# Initialise the parent object
super(MultiqcModule, self).__init__(
name="Kraken",
anchor="kraken",
href="https://ccb.jhu.edu/software/kraken/",
info="is a taxonomic classification tool that uses exact k-mer matches to find the lowest common ancestor (LCA) of a given sequence.",
)
self.t_ranks = OrderedDict()
self.t_ranks["S"] = "Species"
self.t_ranks["G"] = "Genus"
self.t_ranks["F"] = "Family"
self.t_ranks["O"] = "Order"
self.t_ranks["C"] = "Class"
self.t_ranks["P"] = "Phylum"
self.t_ranks["K"] = "Kingdom"
self.t_ranks["D"] = "Domain"
self.t_ranks["R"] = "Root"
# self.t_ranks['U'] = 'Unclassified'
# Find and load any kraken reports
self.kraken_raw_data = dict()
for f in self.find_log_files("kraken", filehandles=True):
self.parse_logs(f)
# Filter to strip out ignored sample names
self.kraken_raw_data = self.ignore_samples(self.kraken_raw_data)
if len(self.kraken_raw_data) == 0:
raise UserWarning
log.info("Found {} reports".format(len(self.kraken_raw_data)))
# Sum counts across all samples, so that we can pick top 5
self.kraken_total_pct = dict()
self.kraken_total_counts = dict()
self.kraken_sample_total_readcounts = dict()
self.sum_sample_counts()
self.general_stats_cols()
self.top_five_barplot()
|
https://github.com/ewels/MultiQC/issues/1276
|
Module kraken raised an exception: Traceback (most recent call last):
File "/python3.7/site-packages/multiqc-1.9-py3.7.egg/multiqc/multiqc.py", line 569, in run
output = mod()
File "//multiqc/1.9/lib/python3.7/site-packages/multiqc-1.9-py3.7.egg/multiqc/modules/kraken/kraken.py", line 60, in __init__
self.sum_sample_counts()
File "//multiqc/1.9/lib/python3.7/site-packages/multiqc-1.9-py3.7.egg/multiqc/modules/kraken/kraken.py", line 134, in sum_sample_counts
self.kraken_sample_total_readcounts[s_name] = round(float(row['counts_rooted']) / (row['percent'] / 100.0))
ZeroDivisionError: float division by zero
|
ZeroDivisionError
|
def sum_sample_counts(self):
"""Sum counts across all samples for kraken data"""
# Sum the percentages for each taxa across all samples
# Allows us to pick top-5 for each rank
# Use percentages instead of counts so that deeply-sequences samples
# are not unfairly over-represented
for s_name, data in self.kraken_raw_data.items():
for row in data:
# Convenience vars that are easier to read
rank_code = row["rank_code"]
classif = row["classif"]
# Skip anything that doesn't exactly fit a tax rank level
if row["rank_code"] == "-" or any(c.isdigit() for c in row["rank_code"]):
continue
if rank_code not in self.kraken_total_pct:
self.kraken_total_pct[rank_code] = dict()
self.kraken_total_counts[rank_code] = dict()
if classif not in self.kraken_total_pct[rank_code]:
self.kraken_total_pct[rank_code][classif] = 0
self.kraken_total_counts[rank_code][classif] = 0
self.kraken_total_pct[rank_code][classif] += (
row["counts_rooted"] / self.kraken_sample_total_readcounts[s_name]
)
self.kraken_total_counts[rank_code][classif] += row["counts_rooted"]
|
def sum_sample_counts(self):
"""Sum counts across all samples for kraken data"""
# Sum the percentages for each taxa across all samples
# Allows us to pick top-5 for each rank
# Use percentages instead of counts so that deeply-sequences samples
# are not unfairly over-represented
for s_name, data in self.kraken_raw_data.items():
total_guess_count = None
for row in data:
# Convenience vars that are easier to read
rank_code = row["rank_code"]
classif = row["classif"]
# Skip anything that doesn't exactly fit a tax rank level
if row["rank_code"] == "-" or any(c.isdigit() for c in row["rank_code"]):
continue
# Calculate the total read count using percentages
# We use either unclassified or the first domain encountered, to try to use the largest proportion of reads = most accurate guess
if rank_code == "U" or (
rank_code == "D" and row["counts_rooted"] > total_guess_count
):
self.kraken_sample_total_readcounts[s_name] = round(
float(row["counts_rooted"]) / (row["percent"] / 100.0)
)
total_guess_count = row["counts_rooted"]
if rank_code not in self.kraken_total_pct:
self.kraken_total_pct[rank_code] = dict()
self.kraken_total_counts[rank_code] = dict()
if classif not in self.kraken_total_pct[rank_code]:
self.kraken_total_pct[rank_code][classif] = 0
self.kraken_total_counts[rank_code][classif] = 0
self.kraken_total_pct[rank_code][classif] += row["percent"]
self.kraken_total_counts[rank_code][classif] += row["counts_rooted"]
|
https://github.com/ewels/MultiQC/issues/1276
|
Module kraken raised an exception: Traceback (most recent call last):
File "/python3.7/site-packages/multiqc-1.9-py3.7.egg/multiqc/multiqc.py", line 569, in run
output = mod()
File "//multiqc/1.9/lib/python3.7/site-packages/multiqc-1.9-py3.7.egg/multiqc/modules/kraken/kraken.py", line 60, in __init__
self.sum_sample_counts()
File "//multiqc/1.9/lib/python3.7/site-packages/multiqc-1.9-py3.7.egg/multiqc/modules/kraken/kraken.py", line 134, in sum_sample_counts
self.kraken_sample_total_readcounts[s_name] = round(float(row['counts_rooted']) / (row['percent'] / 100.0))
ZeroDivisionError: float division by zero
|
ZeroDivisionError
|
def general_stats_cols(self):
"""Add a couple of columns to the General Statistics table"""
# Get top taxa in most specific taxa rank that we have
top_five = []
top_rank_code = None
top_rank_name = None
for rank_code, rank_name in self.t_ranks.items():
try:
sorted_pct = sorted(
self.kraken_total_pct[rank_code].items(),
key=lambda x: x[1],
reverse=True,
)
for classif, pct_sum in sorted_pct[:5]:
top_five.append(classif)
top_rank_code = rank_code
top_rank_name = rank_name
break
except KeyError:
# No species-level data found etc
pass
top_one_hkey = "% {}".format(top_five[0])
# Column headers
headers = OrderedDict()
headers[top_one_hkey] = {
"title": top_one_hkey,
"description": "Percentage of reads that were the top {} over all samples - {}".format(
top_rank_name, top_five[0]
),
"suffix": "%",
"max": 100,
"scale": "PuBuGn",
}
headers["% Top 5"] = {
"title": "% Top 5 {}".format(top_rank_name),
"description": "Percentage of reads that were classified by one of the top 5 {} ({})".format(
top_rank_name, ", ".join(top_five)
),
"suffix": "%",
"max": 100,
"scale": "PuBu",
}
headers["% Unclassified"] = {
"title": "% Unclassified",
"description": "Percentage of reads that were unclassified",
"suffix": "%",
"max": 100,
"scale": "OrRd",
}
# Get table data
tdata = {}
for s_name, d in self.kraken_raw_data.items():
tdata[s_name] = {}
for row in d:
percent = (
row["counts_rooted"] / self.kraken_sample_total_readcounts[s_name]
) * 100.0
if row["rank_code"] == "U":
tdata[s_name]["% Unclassified"] = percent
if row["rank_code"] == top_rank_code and row["classif"] in top_five:
tdata[s_name]["% Top 5"] = percent + tdata[s_name].get("% Top 5", 0)
if row["rank_code"] == top_rank_code and row["classif"] == top_five[0]:
tdata[s_name][top_one_hkey] = percent
if top_one_hkey not in tdata[s_name]:
tdata[s_name][top_one_hkey] = 0
self.general_stats_addcols(tdata, headers)
|
def general_stats_cols(self):
"""Add a couple of columns to the General Statistics table"""
# Get top taxa in most specific taxa rank that we have
top_five = []
top_rank_code = None
top_rank_name = None
for rank_code, rank_name in self.t_ranks.items():
try:
sorted_pct = sorted(
self.kraken_total_pct[rank_code].items(),
key=lambda x: x[1],
reverse=True,
)
for classif, pct_sum in sorted_pct[:5]:
top_five.append(classif)
top_rank_code = rank_code
top_rank_name = rank_name
break
except KeyError:
# No species-level data found etc
pass
top_one_hkey = "% {}".format(top_five[0])
# Column headers
headers = OrderedDict()
headers[top_one_hkey] = {
"title": top_one_hkey,
"description": "Percentage of reads that were the top {} over all samples - {}".format(
top_rank_name, top_five[0]
),
"suffix": "%",
"max": 100,
"scale": "PuBuGn",
}
headers["% Top 5"] = {
"title": "% Top 5 {}".format(top_rank_name),
"description": "Percentage of reads that were classified by one of the top 5 {} ({})".format(
top_rank_name, ", ".join(top_five)
),
"suffix": "%",
"max": 100,
"scale": "PuBu",
}
headers["% Unclassified"] = {
"title": "% Unclassified",
"description": "Percentage of reads that were unclassified",
"suffix": "%",
"max": 100,
"scale": "OrRd",
}
# Get table data
tdata = {}
for s_name, d in self.kraken_raw_data.items():
tdata[s_name] = {}
for row in d:
if row["rank_code"] == "U":
tdata[s_name]["% Unclassified"] = row["percent"]
if row["rank_code"] == top_rank_code and row["classif"] in top_five:
tdata[s_name]["% Top 5"] = row["percent"] + tdata[s_name].get(
"% Top 5", 0
)
if row["rank_code"] == top_rank_code and row["classif"] == top_five[0]:
tdata[s_name][top_one_hkey] = row["percent"]
if top_one_hkey not in tdata[s_name]:
tdata[s_name][top_one_hkey] = 0
self.general_stats_addcols(tdata, headers)
|
https://github.com/ewels/MultiQC/issues/1276
|
Module kraken raised an exception: Traceback (most recent call last):
File "/python3.7/site-packages/multiqc-1.9-py3.7.egg/multiqc/multiqc.py", line 569, in run
output = mod()
File "//multiqc/1.9/lib/python3.7/site-packages/multiqc-1.9-py3.7.egg/multiqc/modules/kraken/kraken.py", line 60, in __init__
self.sum_sample_counts()
File "//multiqc/1.9/lib/python3.7/site-packages/multiqc-1.9-py3.7.egg/multiqc/modules/kraken/kraken.py", line 134, in sum_sample_counts
self.kraken_sample_total_readcounts[s_name] = round(float(row['counts_rooted']) / (row['percent'] / 100.0))
ZeroDivisionError: float division by zero
|
ZeroDivisionError
|
def custom_module_classes():
"""
MultiQC Custom Content class. This module does a lot of different
things depending on the input and is as flexible as possible.
NB: THIS IS TOTALLY DIFFERENT TO ALL OTHER MODULES
"""
# Dict to hold parsed data. Each key should contain a custom data type
# eg. output from a particular script. Note that this script may pick
# up many different types of data from many different sources.
# Second level keys should be 'config' and 'data'. Data key should then
# contain sample names, and finally data.
cust_mods = defaultdict(lambda: defaultdict(lambda: OrderedDict()))
# Dictionary to hold search patterns - start with those defined in the config
search_patterns = ["custom_content"]
# First - find files using patterns described in the config
config_data = getattr(config, "custom_data", {})
mod_cust_config = {}
for k, f in config_data.items():
# Check that we have a dictionary
if type(f) != dict:
log.debug("config.custom_data row was not a dictionary: {}".format(k))
continue
c_id = f.get("id", k)
# Data supplied in with config (eg. from a multiqc_config.yaml file in working directory)
if "data" in f:
try:
cust_mods[c_id]["data"].update(f["data"])
except ValueError:
# HTML plot type doesn't have a data sample-id key, so just take the whole chunk of data
cust_mods[c_id]["data"] = f["data"]
cust_mods[c_id]["config"].update(
{k: v for k, v in f.items() if k != "data"}
)
cust_mods[c_id]["config"]["id"] = cust_mods[c_id]["config"].get("id", c_id)
continue
# Custom Content ID has search patterns in the config
if c_id in report.files:
cust_mods[c_id]["config"] = f
cust_mods[c_id]["config"]["id"] = cust_mods[c_id]["config"].get("id", c_id)
search_patterns.append(c_id)
continue
# Must just be configuration for a separate custom-content class
mod_cust_config[c_id] = f
# Now go through each of the file search patterns
bm = BaseMultiqcModule()
for k in search_patterns:
num_sp_found_files = 0
for f in bm.find_log_files(k):
num_sp_found_files += 1
# Handle any exception without messing up for remaining custom content files
try:
f_extension = os.path.splitext(f["fn"])[1]
# YAML and JSON files are the easiest
parsed_data = None
if f_extension == ".yaml" or f_extension == ".yml":
try:
parsed_data = yaml_ordered_load(f["f"])
except Exception as e:
log.warning(
"Error parsing YAML file '{}' (probably invalid YAML)".format(
f["fn"]
)
)
log.debug("YAML error: {}".format(e), exc_info=True)
break
elif f_extension == ".json":
try:
# Use OrderedDict for objects so that column order is honoured
parsed_data = json.loads(f["f"], object_pairs_hook=OrderedDict)
except Exception as e:
log.warning(
"Error parsing JSON file '{}' (probably invalid JSON)".format(
f["fn"]
)
)
log.warning("JSON error: {}".format(e))
break
elif (
f_extension == ".png"
or f_extension == ".jpeg"
or f_extension == ".jpg"
):
image_string = base64.b64encode(f["f"].read()).decode("utf-8")
image_format = "png" if f_extension == ".png" else "jpg"
img_html = '<div class="mqc-custom-content-image"><img src="data:image/{};base64,{}" /></div>'.format(
image_format, image_string
)
parsed_data = {
"id": f["s_name"],
"plot_type": "image",
"section_name": f["s_name"]
.replace("_", " ")
.replace("-", " ")
.replace(".", " "),
"data": img_html,
}
elif f_extension == ".html":
parsed_data = {
"id": f["s_name"],
"plot_type": "html",
"data": f["f"],
}
parsed_data.update(_find_html_file_header(f))
if parsed_data is not None:
c_id = parsed_data.get("id", k)
if len(parsed_data.get("data", {})) > 0:
if type(parsed_data["data"]) == str:
cust_mods[c_id]["data"] = parsed_data["data"]
else:
cust_mods[c_id]["data"].update(parsed_data["data"])
cust_mods[c_id]["config"].update(
{j: k for j, k in parsed_data.items() if j != "data"}
)
else:
log.warning("No data found in {}".format(f["fn"]))
# txt, csv, tsv etc
else:
# Look for configuration details in the header
m_config = _find_file_header(f)
s_name = None
if m_config is not None:
c_id = m_config.get("id", k)
# Update the base config with anything parsed from the file
b_config = cust_mods.get(c_id, {}).get("config", {})
b_config.update(m_config)
# Now set the module config to the merged dict
m_config = dict(b_config)
s_name = m_config.get("sample_name")
else:
c_id = k
m_config = cust_mods.get(c_id, {}).get("config", {})
# Guess sample name if not given
if s_name is None:
s_name = bm.clean_s_name(f["s_name"], f["root"])
# Guess c_id if no information known
if k == "custom_content":
c_id = s_name
# Merge with config from a MultiQC config file if we have it
m_config.update(mod_cust_config.get(c_id, {}))
# Add information about the file to the config dict
if "files" not in m_config:
m_config["files"] = dict()
m_config["files"].update(
{s_name: {"fn": f["fn"], "root": f["root"]}}
)
# Guess file format if not given
if m_config.get("file_format") is None:
m_config["file_format"] = _guess_file_format(f)
# Parse data
try:
parsed_data, conf = _parse_txt(f, m_config)
if parsed_data is None or len(parsed_data) == 0:
log.warning(
"Not able to parse custom data in {}".format(f["fn"])
)
else:
# Did we get a new section id from the file?
if conf.get("id") is not None:
c_id = conf.get("id")
# heatmap - special data type
if type(parsed_data) == list:
cust_mods[c_id]["data"] = parsed_data
elif conf.get("plot_type") == "html":
cust_mods[c_id]["data"] = parsed_data
else:
cust_mods[c_id]["data"].update(parsed_data)
cust_mods[c_id]["config"].update(conf)
except (IndexError, AttributeError, TypeError):
log.error(
"Unexpected parsing error for {}".format(f["fn"]),
exc_info=True,
)
raise # testing
except Exception as e:
log.error("Uncaught exception raised for file '{}'".format(f["fn"]))
log.exception(e)
# Give log message if no files found for search pattern
if num_sp_found_files == 0 and k != "custom_content":
log.debug("No samples found: custom content ({})".format(k))
# Filter to strip out ignored sample names
for k in cust_mods:
cust_mods[k]["data"] = bm.ignore_samples(cust_mods[k]["data"])
# Remove any configs that have no data
remove_cids = [k for k in cust_mods if len(cust_mods[k]["data"]) == 0]
for k in remove_cids:
del cust_mods[k]
if len(cust_mods) == 0:
raise UserWarning
# Go through each data type
parsed_modules = OrderedDict()
for c_id, mod in cust_mods.items():
# General Stats
if mod["config"].get("plot_type") == "generalstats":
gsheaders = mod["config"].get("pconfig")
if gsheaders is None:
headers = set()
for d in mod["data"].values():
headers.update(d.keys())
headers = list(headers)
headers.sort()
gsheaders = OrderedDict()
for h in headers:
gsheaders[h] = dict()
# Headers is a list of dicts
if type(gsheaders) == list:
gsheaders_dict = OrderedDict()
for gsheader in gsheaders:
for col_id, col_data in gsheader.items():
gsheaders_dict[col_id] = col_data
gsheaders = gsheaders_dict
# Add namespace and description if not specified
for m_id in gsheaders:
if "namespace" not in gsheaders[m_id]:
gsheaders[m_id]["namespace"] = mod["config"].get("namespace", c_id)
bm.general_stats_addcols(mod["data"], gsheaders)
# Initialise this new module class and append to list
else:
# Is this file asking to be a sub-section under a parent section?
mod_id = mod["config"].get("parent_id", c_id)
# If we have any custom configuration from a MultiQC config file, update here
# This is done earlier for tsv files too, but we do it here so that it overwrites what was in the file
if mod_id in mod_cust_config:
mod["config"].update(mod_cust_config[mod_id])
# We've not seen this module section before (normal for most custom content)
if mod_id not in parsed_modules:
parsed_modules[mod_id] = MultiqcModule(mod_id, mod)
else:
# New sub-section
parsed_modules[mod_id].update_init(c_id, mod)
parsed_modules[mod_id].add_cc_section(c_id, mod)
if mod["config"].get("plot_type") == "html":
log.info("{}: Found 1 sample (html)".format(c_id))
elif mod["config"].get("plot_type") == "image":
log.info("{}: Found 1 sample (image)".format(c_id))
else:
log.info(
"{}: Found {} samples ({})".format(
c_id, len(mod["data"]), mod["config"].get("plot_type")
)
)
# Sort sections if we have a config option for order
mod_order = getattr(config, "custom_content", {}).get("order", [])
sorted_modules = [
parsed_mod
for parsed_mod in parsed_modules.values()
if parsed_mod.anchor not in mod_order
]
sorted_modules.extend(
[
parsed_mod
for mod_id in mod_order
for parsed_mod in parsed_modules.values()
if parsed_mod.anchor == mod_id
]
)
# If we only have General Stats columns then there are no module outputs
if len(sorted_modules) == 0:
raise UserWarning
return sorted_modules
|
def custom_module_classes():
"""
MultiQC Custom Content class. This module does a lot of different
things depending on the input and is as flexible as possible.
NB: THIS IS TOTALLY DIFFERENT TO ALL OTHER MODULES
"""
# Dict to hold parsed data. Each key should contain a custom data type
# eg. output from a particular script. Note that this script may pick
# up many different types of data from many different sources.
# Second level keys should be 'config' and 'data'. Data key should then
# contain sample names, and finally data.
cust_mods = defaultdict(lambda: defaultdict(lambda: OrderedDict()))
# Dictionary to hold search patterns - start with those defined in the config
search_patterns = ["custom_content"]
# First - find files using patterns described in the config
config_data = getattr(config, "custom_data", {})
mod_cust_config = {}
for k, f in config_data.items():
# Check that we have a dictionary
if type(f) != dict:
log.debug("config.custom_data row was not a dictionary: {}".format(k))
continue
c_id = f.get("id", k)
# Data supplied in with config (eg. from a multiqc_config.yaml file in working directory)
if "data" in f:
try:
cust_mods[c_id]["data"].update(f["data"])
except ValueError:
# HTML plot type doesn't have a data sample-id key, so just take the whole chunk of data
cust_mods[c_id]["data"] = f["data"]
cust_mods[c_id]["config"].update(
{k: v for k, v in f.items() if k is not "data"}
)
cust_mods[c_id]["config"]["id"] = cust_mods[c_id]["config"].get("id", c_id)
continue
# Custom Content ID has search patterns in the config
if c_id in report.files:
cust_mods[c_id]["config"] = f
cust_mods[c_id]["config"]["id"] = cust_mods[c_id]["config"].get("id", c_id)
search_patterns.append(c_id)
continue
# Must just be configuration for a separate custom-content class
mod_cust_config[c_id] = f
# Now go through each of the file search patterns
bm = BaseMultiqcModule()
for k in search_patterns:
num_sp_found_files = 0
for f in bm.find_log_files(k):
num_sp_found_files += 1
# Handle any exception without messing up for remaining custom content files
try:
f_extension = os.path.splitext(f["fn"])[1]
# YAML and JSON files are the easiest
parsed_data = None
if f_extension == ".yaml" or f_extension == ".yml":
try:
parsed_data = yaml_ordered_load(f["f"])
except Exception as e:
log.warning(
"Error parsing YAML file '{}' (probably invalid YAML)".format(
f["fn"]
)
)
log.debug("YAML error: {}".format(e), exc_info=True)
break
elif f_extension == ".json":
try:
# Use OrderedDict for objects so that column order is honoured
parsed_data = json.loads(f["f"], object_pairs_hook=OrderedDict)
except Exception as e:
log.warning(
"Error parsing JSON file '{}' (probably invalid JSON)".format(
f["fn"]
)
)
log.warning("JSON error: {}".format(e))
break
elif (
f_extension == ".png"
or f_extension == ".jpeg"
or f_extension == ".jpg"
):
image_string = base64.b64encode(f["f"].read()).decode("utf-8")
image_format = "png" if f_extension == ".png" else "jpg"
img_html = '<div class="mqc-custom-content-image"><img src="data:image/{};base64,{}" /></div>'.format(
image_format, image_string
)
parsed_data = {
"id": f["s_name"],
"plot_type": "image",
"section_name": f["s_name"]
.replace("_", " ")
.replace("-", " ")
.replace(".", " "),
"data": img_html,
}
elif f_extension == ".html":
parsed_data = {
"id": f["s_name"],
"plot_type": "html",
"data": f["f"],
}
parsed_data.update(_find_html_file_header(f))
if parsed_data is not None:
c_id = parsed_data.get("id", k)
if len(parsed_data.get("data", {})) > 0:
if type(parsed_data["data"]) == str:
cust_mods[c_id]["data"] = parsed_data["data"]
else:
cust_mods[c_id]["data"].update(parsed_data["data"])
cust_mods[c_id]["config"].update(
{j: k for j, k in parsed_data.items() if j != "data"}
)
else:
log.warning("No data found in {}".format(f["fn"]))
# txt, csv, tsv etc
else:
# Look for configuration details in the header
m_config = _find_file_header(f)
s_name = None
if m_config is not None:
c_id = m_config.get("id", k)
# Update the base config with anything parsed from the file
b_config = cust_mods.get(c_id, {}).get("config", {})
b_config.update(m_config)
# Now set the module config to the merged dict
m_config = dict(b_config)
s_name = m_config.get("sample_name")
else:
c_id = k
m_config = cust_mods.get(c_id, {}).get("config", {})
# Guess sample name if not given
if s_name is None:
s_name = bm.clean_s_name(f["s_name"], f["root"])
# Guess c_id if no information known
if k == "custom_content":
c_id = s_name
# Merge with config from a MultiQC config file if we have it
m_config.update(mod_cust_config.get(c_id, {}))
# Add information about the file to the config dict
if "files" not in m_config:
m_config["files"] = dict()
m_config["files"].update(
{s_name: {"fn": f["fn"], "root": f["root"]}}
)
# Guess file format if not given
if m_config.get("file_format") is None:
m_config["file_format"] = _guess_file_format(f)
# Parse data
try:
parsed_data, conf = _parse_txt(f, m_config)
if parsed_data is None or len(parsed_data) == 0:
log.warning(
"Not able to parse custom data in {}".format(f["fn"])
)
else:
# Did we get a new section id from the file?
if conf.get("id") is not None:
c_id = conf.get("id")
# heatmap - special data type
if type(parsed_data) == list:
cust_mods[c_id]["data"] = parsed_data
elif conf.get("plot_type") == "html":
cust_mods[c_id]["data"] = parsed_data
else:
cust_mods[c_id]["data"].update(parsed_data)
cust_mods[c_id]["config"].update(conf)
except (IndexError, AttributeError, TypeError):
log.error(
"Unexpected parsing error for {}".format(f["fn"]),
exc_info=True,
)
raise # testing
except Exception as e:
log.error("Uncaught exception raised for file '{}'".format(f["fn"]))
log.exception(e)
# Give log message if no files found for search pattern
if num_sp_found_files == 0 and k != "custom_content":
log.debug("No samples found: custom content ({})".format(k))
# Filter to strip out ignored sample names
for k in cust_mods:
cust_mods[k]["data"] = bm.ignore_samples(cust_mods[k]["data"])
# Remove any configs that have no data
remove_cids = [k for k in cust_mods if len(cust_mods[k]["data"]) == 0]
for k in remove_cids:
del cust_mods[k]
if len(cust_mods) == 0:
raise UserWarning
# Go through each data type
parsed_modules = OrderedDict()
for c_id, mod in cust_mods.items():
# General Stats
if mod["config"].get("plot_type") == "generalstats":
gsheaders = mod["config"].get("pconfig")
if gsheaders is None:
headers = set()
for d in mod["data"].values():
headers.update(d.keys())
headers = list(headers)
headers.sort()
gsheaders = OrderedDict()
for h in headers:
gsheaders[h] = dict()
# Headers is a list of dicts
if type(gsheaders) == list:
gsheaders_dict = OrderedDict()
for gsheader in gsheaders:
for col_id, col_data in gsheader.items():
gsheaders_dict[col_id] = col_data
gsheaders = gsheaders_dict
# Add namespace and description if not specified
for m_id in gsheaders:
if "namespace" not in gsheaders[m_id]:
gsheaders[m_id]["namespace"] = mod["config"].get("namespace", c_id)
bm.general_stats_addcols(mod["data"], gsheaders)
# Initialise this new module class and append to list
else:
# Is this file asking to be a sub-section under a parent section?
mod_id = mod["config"].get("parent_id", c_id)
# If we have any custom configuration from a MultiQC config file, update here
# This is done earlier for tsv files too, but we do it here so that it overwrites what was in the file
if mod_id in mod_cust_config:
mod["config"].update(mod_cust_config[mod_id])
# We've not seen this module section before (normal for most custom content)
if mod_id not in parsed_modules:
parsed_modules[mod_id] = MultiqcModule(mod_id, mod)
else:
# New sub-section
parsed_modules[mod_id].update_init(c_id, mod)
parsed_modules[mod_id].add_cc_section(c_id, mod)
if mod["config"].get("plot_type") == "html":
log.info("{}: Found 1 sample (html)".format(c_id))
elif mod["config"].get("plot_type") == "image":
log.info("{}: Found 1 sample (image)".format(c_id))
else:
log.info(
"{}: Found {} samples ({})".format(
c_id, len(mod["data"]), mod["config"].get("plot_type")
)
)
# Sort sections if we have a config option for order
mod_order = getattr(config, "custom_content", {}).get("order", [])
sorted_modules = [
parsed_mod
for parsed_mod in parsed_modules.values()
if parsed_mod.anchor not in mod_order
]
sorted_modules.extend(
[
parsed_mod
for mod_id in mod_order
for parsed_mod in parsed_modules.values()
if parsed_mod.anchor == mod_id
]
)
# If we only have General Stats columns then there are no module outputs
if len(sorted_modules) == 0:
raise UserWarning
return sorted_modules
|
https://github.com/ewels/MultiQC/issues/1221
|
Traceback (most recent call last):
File "/home/noronhaa/.local/lib/python3.8/site-packages/numpy/core/__init__.py", line 24, in <module>
from . import multiarray
File "/home/noronhaa/.local/lib/python3.8/site-packages/numpy/core/multiarray.py", line 14, in <module>
from . import overrides
File "/home/noronhaa/.local/lib/python3.8/site-packages/numpy/core/overrides.py", line 7, in <module>
from numpy.core._multiarray_umath import (
ImportError: Error loading shared library ld-linux-x86-64.so.2: No such file or directory (needed by /home/noronhaa/.local/lib/python3.8/site-packages/numpy/core/_multiarray_umath.cpython-38-x86_64-linux-gnu.so)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/bin/multiqc", line 11, in <module>
load_entry_point('multiqc==1.9', 'console_scripts', 'multiqc')()
File "/usr/lib/python3.8/site-packages/pkg_resources/__init__.py", line 489, in load_entry_point
return get_distribution(dist).load_entry_point(group, name)
File "/usr/lib/python3.8/site-packages/pkg_resources/__init__.py", line 2852, in load_entry_point
return ep.load()
File "/usr/lib/python3.8/site-packages/pkg_resources/__init__.py", line 2443, in load
return self.resolve()
File "/usr/lib/python3.8/site-packages/pkg_resources/__init__.py", line 2449, in resolve
module = __import__(self.module_name, fromlist=['__name__'], level=0)
File "/usr/lib/python3.8/site-packages/multiqc/__init__.py", line 16, in <module>
from .multiqc import run
File "/usr/lib/python3.8/site-packages/multiqc/multiqc.py", line 38, in <module>
from .plots import table
File "/usr/lib/python3.8/site-packages/multiqc/plots/table.py", line 9, in <module>
from multiqc.utils import config, report, util_functions, mqc_colour
File "/usr/lib/python3.8/site-packages/multiqc/utils/mqc_colour.py", line 7, in <module>
import spectra
File "/home/noronhaa/.local/lib/python3.8/site-packages/spectra/__init__.py", line 1, in <module>
from .core import COLOR_SPACES, Color, Scale
File "/home/noronhaa/.local/lib/python3.8/site-packages/spectra/core.py", line 1, in <module>
from colormath import color_objects, color_conversions
File "/home/noronhaa/.local/lib/python3.8/site-packages/colormath/color_objects.py", line 8, in <module>
import numpy
File "/home/noronhaa/.local/lib/python3.8/site-packages/numpy/__init__.py", line 142, in <module>
from . import core
File "/home/noronhaa/.local/lib/python3.8/site-packages/numpy/core/__init__.py", line 50, in <module>
raise ImportError(msg)
ImportError:
IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!
Importing the numpy C-extensions failed. This error can happen for
many reasons, often due to issues with your setup or how NumPy was
installed.
We have compiled some common reasons and troubleshooting tips at:
https://numpy.org/devdocs/user/troubleshooting-importerror.html
Please note and check the following:
* The Python version is: Python3.8 from "/usr/local/bin/python"
* The NumPy version is: "1.18.4"
and make sure that they are the versions you expect.
Please carefully study the documentation linked above for further help.
Original error was: Error loading shared library ld-linux-x86-64.so.2: No such file or directory (needed by /home/noronhaa/.local/lib/python3.8/site-packages/numpy/core/_multiarray_umath.cpython-38-x86_64-linux-gnu.so)
|
ImportError
|
def kaiju_stats_table(self):
"""Take the parsed stats from the Kaiju reports and add them to the
basic stats table at the top of the report"""
headers = {}
general_data = {}
taxo_ranks = self.kaiju_data.keys()
# print only phylum rank in general table.
if len(taxo_ranks) >= 1 and "phylum" in taxo_ranks:
general_data = self.kaiju_data["phylum"]
general_taxo_rank = "Phylum"
else:
general_data = self.kaiju_data[sorted(self.kaiju_data)[0]]
general_taxo_rank = sorted(self.kaiju_data)[0].capitalize()
headers["percentage_assigned"] = {
"title": "% Reads assigned {}".format(general_taxo_rank),
"description": "Percentage of reads assigned at {} rank".format(
general_taxo_rank
),
"min": 0,
"max": 100,
"suffix": "%",
"scale": "RdYlGn",
}
headers["assigned"] = {
"title": "{} Reads assigned {} ".format(
config.read_count_prefix, general_taxo_rank
),
"description": "Number of reads assigned ({}) at {} rank".format(
config.read_count_desc, general_taxo_rank
),
"modify": lambda x: x * config.read_count_multiplier,
"scale": "Blues",
}
self.general_stats_addcols(general_data, headers)
|
def kaiju_stats_table(self):
"""Take the parsed stats from the Kaiju reports and add them to the
basic stats table at the top of the report"""
headers = {}
general_data = {}
taxo_ranks = self.kaiju_data.keys()
# print only phylum rank in general table.
if len(taxo_ranks) >= 1 and "phylum" in taxo_ranks:
general_data = self.kaiju_data["phylum"]
general_taxo_rank = "Phylum"
else:
general_data = self.kaiju_data[self.kaiju_data.keys().sorted()[0]]
general_taxo_rank = self.kaiju_data.keys().sorted()[0].capitalize()
headers["percentage_assigned"] = {
"title": "% Reads assigned {}".format(general_taxo_rank),
"description": "Percentage of reads assigned at {} rank".format(
general_taxo_rank
),
"min": 0,
"max": 100,
"suffix": "%",
"scale": "RdYlGn",
}
headers["assigned"] = {
"title": "{} Reads assigned {} ".format(
config.read_count_prefix, general_taxo_rank
),
"description": "Number of reads assigned ({}) at {} rank".format(
config.read_count_desc, general_taxo_rank
),
"modify": lambda x: x * config.read_count_multiplier,
"scale": "Blues",
}
self.general_stats_addcols(general_data, headers)
|
https://github.com/ewels/MultiQC/issues/1217
|
[INFO ] multiqc : This is MultiQC v1.9
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching : /mnt/home/juriski/projects/scratch/MiSeq_run_200529/results/kaiju1/taxa/genus
[INFO ] kaiju : Found 1 reports
[ERROR ] multiqc : Oops! The 'kaiju' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
genus/Li-2020-14538-3.kaiju.genus.tsv
============================================================
Module kaiju raised an exception: Traceback (most recent call last):
File "/home/juriski/.conda/envs/multiqc/lib/python3.6/site-packages/multiqc/multiqc.py", line 569, in run
output = mod()
File "/home/juriski/.conda/envs/multiqc/lib/python3.6/site-packages/multiqc/modules/kaiju/kaiju.py", line 64, in __init__
self.kaiju_stats_table()
File "/home/juriski/.conda/envs/multiqc/lib/python3.6/site-packages/multiqc/modules/kaiju/kaiju.py", line 105, in kaiju_stats_table
general_data = self.kaiju_data[self.kaiju_data.keys().sorted()[0]]
AttributeError: 'dict_keys' object has no attribute 'sorted'
============================================================
[WARNING] multiqc : No analysis results found. Cleaning up..
[INFO ] multiqc : MultiQC complete
|
AttributeError
|
def kaiju_stats_table(self):
"""Take the parsed stats from the Kaiju reports and add them to the
basic stats table at the top of the report"""
headers = {}
general_data = {}
taxo_ranks = self.kaiju_data.keys()
# print only phylum rank in general table.
if len(taxo_ranks) >= 1 and "phylum" in taxo_ranks:
general_data = self.kaiju_data["phylum"]
general_taxo_rank = "Phylum"
else:
general_data = self.kaiju_data[sorted(self.kaiju_data.keys())[0]]
general_taxo_rank = sorted(self.kaiju_data.keys())[0].capitalize()
headers["percentage_assigned"] = {
"title": "% Reads assigned {}".format(general_taxo_rank),
"description": "Percentage of reads assigned at {} rank".format(
general_taxo_rank
),
"min": 0,
"max": 100,
"suffix": "%",
"scale": "RdYlGn",
}
headers["assigned"] = {
"title": "{} Reads assigned {} ".format(
config.read_count_prefix, general_taxo_rank
),
"description": "Number of reads assigned ({}) at {} rank".format(
config.read_count_desc, general_taxo_rank
),
"modify": lambda x: x * config.read_count_multiplier,
"scale": "Blues",
}
self.general_stats_addcols(general_data, headers)
|
def kaiju_stats_table(self):
"""Take the parsed stats from the Kaiju reports and add them to the
basic stats table at the top of the report"""
headers = {}
general_data = {}
taxo_ranks = self.kaiju_data.keys()
# print only phylum rank in general table.
if len(taxo_ranks) >= 1 and "phylum" in taxo_ranks:
general_data = self.kaiju_data["phylum"]
general_taxo_rank = "Phylum"
else:
general_data = self.kaiju_data[self.kaiju_data.keys()[0]]
general_taxo_rank = self.kaiju_data.keys()[0].capitalize()
headers["percentage_assigned"] = {
"title": "% Reads assigned {}".format(general_taxo_rank),
"description": "Percentage of reads assigned at {} rank".format(
general_taxo_rank
),
"min": 0,
"max": 100,
"suffix": "%",
"scale": "RdYlGn",
}
headers["assigned"] = {
"title": "{} Reads assigned {} ".format(
config.read_count_prefix, general_taxo_rank
),
"description": "Number of reads assigned ({}) at {} rank".format(
config.read_count_desc, general_taxo_rank
),
"modify": lambda x: x * config.read_count_multiplier,
"scale": "Blues",
}
self.general_stats_addcols(general_data, headers)
|
https://github.com/ewels/MultiQC/issues/1217
|
[INFO ] multiqc : This is MultiQC v1.9
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching : /mnt/home/juriski/projects/scratch/MiSeq_run_200529/results/kaiju1/taxa/genus
[INFO ] kaiju : Found 1 reports
[ERROR ] multiqc : Oops! The 'kaiju' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
genus/Li-2020-14538-3.kaiju.genus.tsv
============================================================
Module kaiju raised an exception: Traceback (most recent call last):
File "/home/juriski/.conda/envs/multiqc/lib/python3.6/site-packages/multiqc/multiqc.py", line 569, in run
output = mod()
File "/home/juriski/.conda/envs/multiqc/lib/python3.6/site-packages/multiqc/modules/kaiju/kaiju.py", line 64, in __init__
self.kaiju_stats_table()
File "/home/juriski/.conda/envs/multiqc/lib/python3.6/site-packages/multiqc/modules/kaiju/kaiju.py", line 105, in kaiju_stats_table
general_data = self.kaiju_data[self.kaiju_data.keys().sorted()[0]]
AttributeError: 'dict_keys' object has no attribute 'sorted'
============================================================
[WARNING] multiqc : No analysis results found. Cleaning up..
[INFO ] multiqc : MultiQC complete
|
AttributeError
|
def kaiju_stats_table(self):
"""Take the parsed stats from the Kaiju reports and add them to the
basic stats table at the top of the report"""
headers = {}
general_data = {}
taxo_ranks = self.kaiju_data.keys()
# print only phylum rank in general table.
if len(taxo_ranks) >= 1 and "phylum" in taxo_ranks:
general_data = self.kaiju_data["phylum"]
general_taxo_rank = "Phylum"
else:
general_data = self.kaiju_data[sorted(self.kaiju_data)[0]]
general_taxo_rank = sorted(self.kaiju_data)[0].capitalize()
headers["percentage_assigned"] = {
"title": "% Reads assigned {}".format(general_taxo_rank),
"description": "Percentage of reads assigned at {} rank".format(
general_taxo_rank
),
"min": 0,
"max": 100,
"suffix": "%",
"scale": "RdYlGn",
}
headers["assigned"] = {
"title": "{} Reads assigned {} ".format(
config.read_count_prefix, general_taxo_rank
),
"description": "Number of reads assigned ({}) at {} rank".format(
config.read_count_desc, general_taxo_rank
),
"modify": lambda x: x * config.read_count_multiplier,
"scale": "Blues",
}
self.general_stats_addcols(general_data, headers)
|
def kaiju_stats_table(self):
"""Take the parsed stats from the Kaiju reports and add them to the
basic stats table at the top of the report"""
headers = {}
general_data = {}
taxo_ranks = self.kaiju_data.keys()
# print only phylum rank in general table.
if len(taxo_ranks) >= 1 and "phylum" in taxo_ranks:
general_data = self.kaiju_data["phylum"]
general_taxo_rank = "Phylum"
else:
general_data = self.kaiju_data[sorted(self.kaiju_data.keys())[0]]
general_taxo_rank = sorted(self.kaiju_data.keys())[0].capitalize()
headers["percentage_assigned"] = {
"title": "% Reads assigned {}".format(general_taxo_rank),
"description": "Percentage of reads assigned at {} rank".format(
general_taxo_rank
),
"min": 0,
"max": 100,
"suffix": "%",
"scale": "RdYlGn",
}
headers["assigned"] = {
"title": "{} Reads assigned {} ".format(
config.read_count_prefix, general_taxo_rank
),
"description": "Number of reads assigned ({}) at {} rank".format(
config.read_count_desc, general_taxo_rank
),
"modify": lambda x: x * config.read_count_multiplier,
"scale": "Blues",
}
self.general_stats_addcols(general_data, headers)
|
https://github.com/ewels/MultiQC/issues/1217
|
[INFO ] multiqc : This is MultiQC v1.9
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching : /mnt/home/juriski/projects/scratch/MiSeq_run_200529/results/kaiju1/taxa/genus
[INFO ] kaiju : Found 1 reports
[ERROR ] multiqc : Oops! The 'kaiju' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
genus/Li-2020-14538-3.kaiju.genus.tsv
============================================================
Module kaiju raised an exception: Traceback (most recent call last):
File "/home/juriski/.conda/envs/multiqc/lib/python3.6/site-packages/multiqc/multiqc.py", line 569, in run
output = mod()
File "/home/juriski/.conda/envs/multiqc/lib/python3.6/site-packages/multiqc/modules/kaiju/kaiju.py", line 64, in __init__
self.kaiju_stats_table()
File "/home/juriski/.conda/envs/multiqc/lib/python3.6/site-packages/multiqc/modules/kaiju/kaiju.py", line 105, in kaiju_stats_table
general_data = self.kaiju_data[self.kaiju_data.keys().sorted()[0]]
AttributeError: 'dict_keys' object has no attribute 'sorted'
============================================================
[WARNING] multiqc : No analysis results found. Cleaning up..
[INFO ] multiqc : MultiQC complete
|
AttributeError
|
def parse_qorts(self, f):
s_names = None
for l in f["f"]:
s = l.split("\t")
if s_names is None:
s_names = [self.clean_s_name(s_name, f["root"]) for s_name in s[1:]]
if len(s_names) <= 2 and s_names[0].endswith("COUNT"):
if f["fn"] == "QC.summary.txt":
s_names = [
self.clean_s_name(
os.path.basename(os.path.normpath(f["root"])), f["root"]
)
]
else:
s_names = [f["s_name"]]
for s_name in s_names:
if s_name in self.qorts_data:
log.debug(
"Duplicate sample name found! Overwriting: {}".format(s_name)
)
self.qorts_data[s_name] = dict()
else:
for i, s_name in enumerate(s_names):
# Hack to get around Java localisation with commas for decimal places
if "," in s[i + 1] and "." not in s[i + 1]:
s[i + 1] = s[i + 1].replace(",", ".")
self.qorts_data[s_name][s[0]] = float(s[i + 1])
# Add some extra fields
for i, s_name in enumerate(s_names):
if (
"Genes_Total" in self.qorts_data[s_name]
and "Genes_WithNonzeroCounts" in self.qorts_data[s_name]
):
self.qorts_data[s_name]["Genes_PercentWithNonzeroCounts"] = (
self.qorts_data[s_name]["Genes_WithNonzeroCounts"]
/ self.qorts_data[s_name]["Genes_Total"]
) * 100.0
|
def parse_qorts(self, f):
s_names = None
for l in f["f"]:
s = l.split("\t")
if s_names is None:
s_names = [self.clean_s_name(s_name, f["root"]) for s_name in s[1:]]
if len(s_names) <= 2 and s_names[0].endswith("COUNT"):
if f["fn"] == "QC.summary.txt":
s_names = [
self.clean_s_name(
os.path.basename(os.path.normpath(f["root"])), f["root"]
)
]
else:
s_names = [f["s_name"]]
for s_name in s_names:
if s_name in self.qorts_data:
log.debug(
"Duplicate sample name found! Overwriting: {}".format(s_name)
)
self.qorts_data[s_name] = dict()
else:
for i, s_name in enumerate(s_names):
self.qorts_data[s_name][s[0]] = float(s[i + 1])
# Add some extra fields
for i, s_name in enumerate(s_names):
if (
"Genes_Total" in self.qorts_data[s_name]
and "Genes_WithNonzeroCounts" in self.qorts_data[s_name]
):
self.qorts_data[s_name]["Genes_PercentWithNonzeroCounts"] = (
self.qorts_data[s_name]["Genes_WithNonzeroCounts"]
/ self.qorts_data[s_name]["Genes_Total"]
) * 100.0
|
https://github.com/ewels/MultiQC/issues/1153
|
Traceback (most recent call last):
File "/Users/Ernesto/opt/miniconda3/lib/python3.7/site-packages/multiqc/multiqc.py", line 546, in run
output = mod()
File "/Users/Ernesto/opt/miniconda3/lib/python3.7/site-packages/multiqc/modules/qorts/qorts.py", line 29, in __init__
self.parse_qorts(f)
File "/Users/Ernesto/opt/miniconda3/lib/python3.7/site-packages/multiqc/modules/qorts/qorts.py", line 67, in parse_qorts
self.qorts_data[s_name][s[0]] = float(s[i+1])
ValueError: could not convert string to float: '1,22'
|
ValueError
|
def fastqc_general_stats(self):
"""Add some single-number stats to the basic statistics
table at the top of the report"""
# Prep the data
data = dict()
for s_name in self.fastqc_data:
bs = self.fastqc_data[s_name]["basic_statistics"]
try:
# FastQC reports with 0 reads will trigger a KeyError here
data[s_name] = {
"percent_gc": bs["%GC"],
"avg_sequence_length": bs["avg_sequence_length"],
"total_sequences": bs["Total Sequences"],
}
except KeyError:
log.warning("Sample had zero reads: '{}'".format(s_name))
data[s_name] = {
"percent_gc": 0,
"avg_sequence_length": 0,
"total_sequences": 0,
}
try:
# Older versions of FastQC don't have this
data[s_name]["percent_duplicates"] = (
100 - bs["total_deduplicated_percentage"]
)
except KeyError:
pass
# Add count of fail statuses
num_statuses = 0
num_fails = 0
for s in self.fastqc_data[s_name]["statuses"].values():
num_statuses += 1
if s == "fail":
num_fails += 1
try:
data[s_name]["percent_fails"] = (
float(num_fails) / float(num_statuses)
) * 100.0
except KeyError:
# If we had no reads then we have no sample in data
pass
# Are sequence lengths interesting?
seq_lengths = [x["avg_sequence_length"] for x in data.values()]
try:
hide_seq_length = False if max(seq_lengths) - min(seq_lengths) > 10 else True
except ValueError:
# Zero reads
hide_seq_length = True
headers = OrderedDict()
headers["percent_duplicates"] = {
"title": "% Dups",
"description": "% Duplicate Reads",
"max": 100,
"min": 0,
"suffix": "%",
"scale": "RdYlGn-rev",
}
headers["percent_gc"] = {
"title": "% GC",
"description": "Average % GC Content",
"max": 100,
"min": 0,
"suffix": "%",
"scale": "Set1",
"format": "{:,.0f}",
}
headers["avg_sequence_length"] = {
"title": "Length",
"description": "Average Sequence Length (bp)",
"min": 0,
"suffix": " bp",
"scale": "RdYlGn",
"format": "{:,.0f}",
"hidden": hide_seq_length,
}
headers["percent_fails"] = {
"title": "% Failed",
"description": "Percentage of modules failed in FastQC report (includes those not plotted here)",
"max": 100,
"min": 0,
"suffix": "%",
"scale": "Reds",
"format": "{:,.0f}",
"hidden": True,
}
headers["total_sequences"] = {
"title": "{} Seqs".format(config.read_count_prefix),
"description": "Total Sequences ({})".format(config.read_count_desc),
"min": 0,
"scale": "Blues",
"modify": lambda x: x * config.read_count_multiplier,
"shared_key": "read_count",
}
self.general_stats_addcols(data, headers)
|
def fastqc_general_stats(self):
"""Add some single-number stats to the basic statistics
table at the top of the report"""
# Prep the data
data = dict()
for s_name in self.fastqc_data:
bs = self.fastqc_data[s_name]["basic_statistics"]
data[s_name] = {
"percent_gc": bs["%GC"],
"avg_sequence_length": bs["avg_sequence_length"],
"total_sequences": bs["Total Sequences"],
}
try:
data[s_name]["percent_duplicates"] = (
100 - bs["total_deduplicated_percentage"]
)
except KeyError:
pass # Older versions of FastQC don't have this
# Add count of fail statuses
num_statuses = 0
num_fails = 0
for s in self.fastqc_data[s_name]["statuses"].values():
num_statuses += 1
if s == "fail":
num_fails += 1
data[s_name]["percent_fails"] = (float(num_fails) / float(num_statuses)) * 100.0
# Are sequence lengths interesting?
seq_lengths = [x["avg_sequence_length"] for x in data.values()]
hide_seq_length = False if max(seq_lengths) - min(seq_lengths) > 10 else True
headers = OrderedDict()
headers["percent_duplicates"] = {
"title": "% Dups",
"description": "% Duplicate Reads",
"max": 100,
"min": 0,
"suffix": "%",
"scale": "RdYlGn-rev",
}
headers["percent_gc"] = {
"title": "% GC",
"description": "Average % GC Content",
"max": 100,
"min": 0,
"suffix": "%",
"scale": "Set1",
"format": "{:,.0f}",
}
headers["avg_sequence_length"] = {
"title": "Length",
"description": "Average Sequence Length (bp)",
"min": 0,
"suffix": " bp",
"scale": "RdYlGn",
"format": "{:,.0f}",
"hidden": hide_seq_length,
}
headers["percent_fails"] = {
"title": "% Failed",
"description": "Percentage of modules failed in FastQC report (includes those not plotted here)",
"max": 100,
"min": 0,
"suffix": "%",
"scale": "Reds",
"format": "{:,.0f}",
"hidden": True,
}
headers["total_sequences"] = {
"title": "{} Seqs".format(config.read_count_prefix),
"description": "Total Sequences ({})".format(config.read_count_desc),
"min": 0,
"scale": "Blues",
"modify": lambda x: x * config.read_count_multiplier,
"shared_key": "read_count",
}
self.general_stats_addcols(data, headers)
|
https://github.com/ewels/MultiQC/issues/1129
|
[INFO ] multiqc : This is MultiQC v1.8
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching : /home/bioinf/gbs_data/sample3/log
[INFO ] trimmomatic : Found 179 logs
[INFO ] fastqc : Found 368 reports
[ERROR ] multiqc : Oops! The 'fastqc' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
log/fastqc/SK-GBD-000435.1_fastqc.zip
============================================================
Module fastqc raised an exception: Traceback (most recent call last):
File "/home/bioinf/anaconda3/envs/gbs/lib/python3.6/site-packages/multiqc/multiqc.py", line 546, in run
output = mod()
File "/home/bioinf/anaconda3/envs/gbs/lib/python3.6/site-packages/multiqc/modules/fastqc/fastqc.py", line 94, in __init__
self.fastqc_general_stats()
File "/home/bioinf/anaconda3/envs/gbs/lib/python3.6/site-packages/multiqc/modules/fastqc/fastqc.py", line 204, in fastqc_general_stats
'avg_sequence_length': bs['avg_sequence_length'],
KeyError: 'avg_sequence_length'
|
KeyError
|
def gc_content_plot(self):
"""Create the HTML for the FastQC GC content plot"""
data = dict()
data_norm = dict()
for s_name in self.fastqc_data:
try:
data[s_name] = {
d["gc_content"]: d["count"]
for d in self.fastqc_data[s_name]["per_sequence_gc_content"]
}
except KeyError:
pass
else:
data_norm[s_name] = dict()
total = sum([c for c in data[s_name].values()])
for gc, count in data[s_name].items():
try:
data_norm[s_name][gc] = (count / total) * 100
except ZeroDivisionError:
data_norm[s_name][gc] = 0
if len(data) == 0:
log.debug("per_sequence_gc_content not found in FastQC reports")
return None
pconfig = {
"id": "fastqc_per_sequence_gc_content_plot",
"title": "FastQC: Per Sequence GC Content",
"xlab": "% GC",
"ylab": "Percentage",
"ymin": 0,
"xmax": 100,
"xmin": 0,
"yDecimals": False,
"tt_label": "<b>{point.x}% GC</b>: {point.y}",
"colors": self.get_status_cols("per_sequence_gc_content"),
"data_labels": [
{"name": "Percentages", "ylab": "Percentage"},
{"name": "Counts", "ylab": "Count"},
],
}
# Try to find and plot a theoretical GC line
theoretical_gc = None
theoretical_gc_raw = None
theoretical_gc_name = None
for f in self.find_log_files("fastqc/theoretical_gc"):
if theoretical_gc_raw is not None:
log.warning(
"Multiple FastQC Theoretical GC Content files found, now using {}".format(
f["fn"]
)
)
theoretical_gc_raw = f["f"]
theoretical_gc_name = f["fn"]
if theoretical_gc_raw is None:
tgc = getattr(config, "fastqc_config", {}).get("fastqc_theoretical_gc", None)
if tgc is not None:
theoretical_gc_name = os.path.basename(tgc)
tgc_fn = "fastqc_theoretical_gc_{}.txt".format(tgc)
tgc_path = os.path.join(
os.path.dirname(__file__), "fastqc_theoretical_gc", tgc_fn
)
if not os.path.isfile(tgc_path):
tgc_path = tgc
try:
with io.open(tgc_path, "r", encoding="utf-8") as f:
theoretical_gc_raw = f.read()
except IOError:
log.warning(
"Couldn't open FastQC Theoretical GC Content file {}".format(
tgc_path
)
)
theoretical_gc_raw = None
if theoretical_gc_raw is not None:
theoretical_gc = list()
for l in theoretical_gc_raw.splitlines():
if "# FastQC theoretical GC content curve:" in l:
theoretical_gc_name = l[39:]
elif not l.startswith("#"):
s = l.split()
try:
theoretical_gc.append([float(s[0]), float(s[1])])
except (TypeError, IndexError):
pass
desc = """The average GC content of reads. Normal random library typically have a
roughly normal distribution of GC content."""
if theoretical_gc is not None:
# Calculate the count version of the theoretical data based on the largest data store
max_total = max([sum(d.values()) for d in data.values()])
esconfig = {
"name": "Theoretical GC Content",
"dashStyle": "Dash",
"lineWidth": 2,
"color": "#000000",
"marker": {"enabled": False},
"enableMouseTracking": False,
"showInLegend": False,
}
pconfig["extra_series"] = [[dict(esconfig)], [dict(esconfig)]]
pconfig["extra_series"][0][0]["data"] = theoretical_gc
pconfig["extra_series"][1][0]["data"] = [
[d[0], (d[1] / 100.0) * max_total] for d in theoretical_gc
]
desc = " **The dashed black line shows theoretical GC content:** `{}`".format(
theoretical_gc_name
)
self.add_section(
name="Per Sequence GC Content",
anchor="fastqc_per_sequence_gc_content",
description=desc,
helptext="""
From the [FastQC help](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/5%20Per%20Sequence%20GC%20Content.html):
_This module measures the GC content across the whole length of each sequence
in a file and compares it to a modelled normal distribution of GC content._
_In a normal random library you would expect to see a roughly normal distribution
of GC content where the central peak corresponds to the overall GC content of
the underlying genome. Since we don't know the the GC content of the genome the
modal GC content is calculated from the observed data and used to build a
reference distribution._
_An unusually shaped distribution could indicate a contaminated library or
some other kinds of biased subset. A normal distribution which is shifted
indicates some systematic bias which is independent of base position. If there
is a systematic bias which creates a shifted normal distribution then this won't
be flagged as an error by the module since it doesn't know what your genome's
GC content should be._
""",
plot=linegraph.plot([data_norm, data], pconfig),
)
|
def gc_content_plot(self):
"""Create the HTML for the FastQC GC content plot"""
data = dict()
data_norm = dict()
for s_name in self.fastqc_data:
try:
data[s_name] = {
d["gc_content"]: d["count"]
for d in self.fastqc_data[s_name]["per_sequence_gc_content"]
}
except KeyError:
pass
else:
data_norm[s_name] = dict()
total = sum([c for c in data[s_name].values()])
for gc, count in data[s_name].items():
data_norm[s_name][gc] = (count / total) * 100
if len(data) == 0:
log.debug("per_sequence_gc_content not found in FastQC reports")
return None
pconfig = {
"id": "fastqc_per_sequence_gc_content_plot",
"title": "FastQC: Per Sequence GC Content",
"xlab": "% GC",
"ylab": "Percentage",
"ymin": 0,
"xmax": 100,
"xmin": 0,
"yDecimals": False,
"tt_label": "<b>{point.x}% GC</b>: {point.y}",
"colors": self.get_status_cols("per_sequence_gc_content"),
"data_labels": [
{"name": "Percentages", "ylab": "Percentage"},
{"name": "Counts", "ylab": "Count"},
],
}
# Try to find and plot a theoretical GC line
theoretical_gc = None
theoretical_gc_raw = None
theoretical_gc_name = None
for f in self.find_log_files("fastqc/theoretical_gc"):
if theoretical_gc_raw is not None:
log.warning(
"Multiple FastQC Theoretical GC Content files found, now using {}".format(
f["fn"]
)
)
theoretical_gc_raw = f["f"]
theoretical_gc_name = f["fn"]
if theoretical_gc_raw is None:
tgc = getattr(config, "fastqc_config", {}).get("fastqc_theoretical_gc", None)
if tgc is not None:
theoretical_gc_name = os.path.basename(tgc)
tgc_fn = "fastqc_theoretical_gc_{}.txt".format(tgc)
tgc_path = os.path.join(
os.path.dirname(__file__), "fastqc_theoretical_gc", tgc_fn
)
if not os.path.isfile(tgc_path):
tgc_path = tgc
try:
with io.open(tgc_path, "r", encoding="utf-8") as f:
theoretical_gc_raw = f.read()
except IOError:
log.warning(
"Couldn't open FastQC Theoretical GC Content file {}".format(
tgc_path
)
)
theoretical_gc_raw = None
if theoretical_gc_raw is not None:
theoretical_gc = list()
for l in theoretical_gc_raw.splitlines():
if "# FastQC theoretical GC content curve:" in l:
theoretical_gc_name = l[39:]
elif not l.startswith("#"):
s = l.split()
try:
theoretical_gc.append([float(s[0]), float(s[1])])
except (TypeError, IndexError):
pass
desc = """The average GC content of reads. Normal random library typically have a
roughly normal distribution of GC content."""
if theoretical_gc is not None:
# Calculate the count version of the theoretical data based on the largest data store
max_total = max([sum(d.values()) for d in data.values()])
esconfig = {
"name": "Theoretical GC Content",
"dashStyle": "Dash",
"lineWidth": 2,
"color": "#000000",
"marker": {"enabled": False},
"enableMouseTracking": False,
"showInLegend": False,
}
pconfig["extra_series"] = [[dict(esconfig)], [dict(esconfig)]]
pconfig["extra_series"][0][0]["data"] = theoretical_gc
pconfig["extra_series"][1][0]["data"] = [
[d[0], (d[1] / 100.0) * max_total] for d in theoretical_gc
]
desc = " **The dashed black line shows theoretical GC content:** `{}`".format(
theoretical_gc_name
)
self.add_section(
name="Per Sequence GC Content",
anchor="fastqc_per_sequence_gc_content",
description=desc,
helptext="""
From the [FastQC help](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/5%20Per%20Sequence%20GC%20Content.html):
_This module measures the GC content across the whole length of each sequence
in a file and compares it to a modelled normal distribution of GC content._
_In a normal random library you would expect to see a roughly normal distribution
of GC content where the central peak corresponds to the overall GC content of
the underlying genome. Since we don't know the the GC content of the genome the
modal GC content is calculated from the observed data and used to build a
reference distribution._
_An unusually shaped distribution could indicate a contaminated library or
some other kinds of biased subset. A normal distribution which is shifted
indicates some systematic bias which is independent of base position. If there
is a systematic bias which creates a shifted normal distribution then this won't
be flagged as an error by the module since it doesn't know what your genome's
GC content should be._
""",
plot=linegraph.plot([data_norm, data], pconfig),
)
|
https://github.com/ewels/MultiQC/issues/1129
|
[INFO ] multiqc : This is MultiQC v1.8
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching : /home/bioinf/gbs_data/sample3/log
[INFO ] trimmomatic : Found 179 logs
[INFO ] fastqc : Found 368 reports
[ERROR ] multiqc : Oops! The 'fastqc' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
log/fastqc/SK-GBD-000435.1_fastqc.zip
============================================================
Module fastqc raised an exception: Traceback (most recent call last):
File "/home/bioinf/anaconda3/envs/gbs/lib/python3.6/site-packages/multiqc/multiqc.py", line 546, in run
output = mod()
File "/home/bioinf/anaconda3/envs/gbs/lib/python3.6/site-packages/multiqc/modules/fastqc/fastqc.py", line 94, in __init__
self.fastqc_general_stats()
File "/home/bioinf/anaconda3/envs/gbs/lib/python3.6/site-packages/multiqc/modules/fastqc/fastqc.py", line 204, in fastqc_general_stats
'avg_sequence_length': bs['avg_sequence_length'],
KeyError: 'avg_sequence_length'
|
KeyError
|
def custom_module_classes():
"""
MultiQC Custom Content class. This module does a lot of different
things depending on the input and is as flexible as possible.
NB: THIS IS TOTALLY DIFFERENT TO ALL OTHER MODULES
"""
# Dict to hold parsed data. Each key should contain a custom data type
# eg. output from a particular script. Note that this script may pick
# up many different types of data from many different sources.
# Second level keys should be 'config' and 'data'. Data key should then
# contain sample names, and finally data.
cust_mods = defaultdict(lambda: defaultdict(lambda: OrderedDict()))
# Dictionary to hold search patterns - start with those defined in the config
search_patterns = ["custom_content"]
# First - find files using patterns described in the config
config_data = getattr(config, "custom_data", {})
for k, f in config_data.items():
# Check that we have a dictionary
if type(f) != dict:
log.debug("config.custom_data row was not a dictionary: {}".format(k))
continue
c_id = f.get("id", k)
# Data supplied in with config (eg. from a multiqc_config.yaml file in working directory)
if "data" in f:
try:
cust_mods[c_id]["data"].update(f["data"])
except ValueError:
# HTML plot type doesn't have a data sample-id key, so just take the whole chunk of data
cust_mods[c_id]["data"] = f["data"]
cust_mods[c_id]["config"].update(
{k: v for k, v in f.items() if k is not "data"}
)
cust_mods[c_id]["config"]["id"] = cust_mods[c_id]["config"].get("id", c_id)
continue
# Custom Content ID has search patterns in the config
if c_id in report.files:
cust_mods[c_id]["config"] = f
cust_mods[c_id]["config"]["id"] = cust_mods[c_id]["config"].get("id", c_id)
search_patterns.append(c_id)
continue
# We should have had something by now
log.warning(
"Found section '{}' in config for under custom_data, but no data or search patterns.".format(
c_id
)
)
# Now go through each of the file search patterns
bm = BaseMultiqcModule()
for k in search_patterns:
num_sp_found_files = 0
for f in bm.find_log_files(k):
num_sp_found_files += 1
# Handle any exception without messing up for remaining custom content files
try:
f_extension = os.path.splitext(f["fn"])[1]
# YAML and JSON files are the easiest
parsed_data = None
if f_extension == ".yaml" or f_extension == ".yml":
try:
parsed_data = yaml_ordered_load(f["f"])
except Exception as e:
log.warning(
"Error parsing YAML file '{}' (probably invalid YAML)".format(
f["fn"]
)
)
log.debug("YAML error: {}".format(e), exc_info=True)
break
elif f_extension == ".json":
try:
# Use OrderedDict for objects so that column order is honoured
parsed_data = json.loads(f["f"], object_pairs_hook=OrderedDict)
except Exception as e:
log.warning(
"Error parsing JSON file '{}' (probably invalid JSON)".format(
f["fn"]
)
)
log.warning("JSON error: {}".format(e))
break
elif (
f_extension == ".png"
or f_extension == ".jpeg"
or f_extension == ".jpg"
):
image_string = base64.b64encode(f["f"].read()).decode("utf-8")
image_format = "png" if f_extension == ".png" else "jpg"
img_html = '<div class="mqc-custom-content-image"><img src="data:image/{};base64,{}" /></div>'.format(
image_format, image_string
)
parsed_data = {
"id": f["s_name"],
"plot_type": "image",
"section_name": f["s_name"]
.replace("_", " ")
.replace("-", " ")
.replace(".", " "),
"description": "Embedded image <code>{}</code>".format(f["fn"]),
"data": img_html,
}
if parsed_data is not None:
c_id = parsed_data.get("id", k)
if len(parsed_data.get("data", {})) > 0:
if type(parsed_data["data"]) == str:
cust_mods[c_id]["data"] = parsed_data["data"]
else:
cust_mods[c_id]["data"].update(parsed_data["data"])
cust_mods[c_id]["config"].update(
{j: k for j, k in parsed_data.items() if j != "data"}
)
else:
log.warning("No data found in {}".format(f["fn"]))
# txt, csv, tsv etc
else:
# Look for configuration details in the header
m_config = _find_file_header(f)
s_name = None
if m_config is not None:
c_id = m_config.get("id", k)
# Update the base config with anything parsed from the file
b_config = cust_mods.get(c_id, {}).get("config", {})
b_config.update(m_config)
# Now set the module config to the merged dict
m_config = dict(b_config)
s_name = m_config.get("sample_name")
else:
c_id = k
m_config = cust_mods.get(c_id, {}).get("config", {})
# Guess sample name if not given
if s_name is None:
s_name = bm.clean_s_name(f["s_name"], f["root"])
# Guess c_id if no information known
if k == "custom_content":
c_id = s_name
# Add information about the file to the config dict
if "files" not in m_config:
m_config["files"] = dict()
m_config["files"].update(
{s_name: {"fn": f["fn"], "root": f["root"]}}
)
# Guess file format if not given
if m_config.get("file_format") is None:
m_config["file_format"] = _guess_file_format(f)
# Parse data
try:
parsed_data, conf = _parse_txt(f, m_config)
if parsed_data is None or len(parsed_data) == 0:
log.warning(
"Not able to parse custom data in {}".format(f["fn"])
)
else:
# Did we get a new section id from the file?
if conf.get("id") is not None:
c_id = conf.get("id")
# heatmap - special data type
if type(parsed_data) == list:
cust_mods[c_id]["data"] = parsed_data
elif conf.get("plot_type") == "html":
cust_mods[c_id]["data"] = parsed_data
else:
cust_mods[c_id]["data"].update(parsed_data)
cust_mods[c_id]["config"].update(conf)
except (IndexError, AttributeError, TypeError):
log.error(
"Unexpected parsing error for {}".format(f["fn"]),
exc_info=True,
)
raise # testing
except Exception as e:
log.error("Uncaught exception raised for file '{}'".format(f["fn"]))
log.exception(e)
# Give log message if no files found for search pattern
if num_sp_found_files == 0 and k != "custom_content":
log.debug("No samples found: custom content ({})".format(k))
# Filter to strip out ignored sample names
for k in cust_mods:
cust_mods[k]["data"] = bm.ignore_samples(cust_mods[k]["data"])
# Remove any configs that have no data
remove_cids = [k for k in cust_mods if len(cust_mods[k]["data"]) == 0]
for k in remove_cids:
del cust_mods[k]
if len(cust_mods) == 0:
raise UserWarning
# Go through each data type
parsed_modules = list()
for module_id, mod in cust_mods.items():
# General Stats
if mod["config"].get("plot_type") == "generalstats":
gsheaders = mod["config"].get("pconfig")
if gsheaders is None:
headers = set()
for d in mod["data"].values():
headers.update(d.keys())
headers = list(headers)
headers.sort()
gsheaders = OrderedDict()
for h in headers:
gsheaders[h] = dict()
# Headers is a list of dicts
if type(gsheaders) == list:
gsheaders_dict = OrderedDict()
for gsheader in gsheaders:
for col_id, col_data in gsheader.items():
gsheaders_dict[col_id] = col_data
gsheaders = gsheaders_dict
# Add namespace and description if not specified
for m_id in gsheaders:
if "namespace" not in gsheaders[m_id]:
gsheaders[m_id]["namespace"] = mod["config"].get(
"namespace", module_id
)
bm.general_stats_addcols(mod["data"], gsheaders)
# Initialise this new module class and append to list
else:
parsed_modules.append(MultiqcModule(module_id, mod))
if mod["config"].get("plot_type") == "html":
log.info("{}: Found 1 sample (html)".format(module_id))
elif mod["config"].get("plot_type") == "image":
log.info("{}: Found 1 sample (image)".format(module_id))
else:
log.info(
"{}: Found {} samples ({})".format(
module_id, len(mod["data"]), mod["config"].get("plot_type")
)
)
# Sort sections if we have a config option for order
mod_order = getattr(config, "custom_content", {}).get("order", [])
sorted_modules = [
parsed_mod
for parsed_mod in parsed_modules
if parsed_mod.anchor not in mod_order
]
sorted_modules.extend(
[
parsed_mod
for mod_id in mod_order
for parsed_mod in parsed_modules
if parsed_mod.anchor == mod_id
]
)
# If we only have General Stats columns then there are no module outputs
if len(sorted_modules) == 0:
raise UserWarning
return sorted_modules
|
def custom_module_classes():
"""
MultiQC Custom Content class. This module does a lot of different
things depending on the input and is as flexible as possible.
NB: THIS IS TOTALLY DIFFERENT TO ALL OTHER MODULES
"""
# Dict to hold parsed data. Each key should contain a custom data type
# eg. output from a particular script. Note that this script may pick
# up many different types of data from many different sources.
# Second level keys should be 'config' and 'data'. Data key should then
# contain sample names, and finally data.
cust_mods = defaultdict(lambda: defaultdict(lambda: OrderedDict()))
# Dictionary to hold search patterns - start with those defined in the config
search_patterns = ["custom_content"]
# First - find files using patterns described in the config
config_data = getattr(config, "custom_data", {})
for k, f in config_data.items():
# Check that we have a dictionary
if type(f) != dict:
log.debug("config.custom_data row was not a dictionary: {}".format(k))
continue
c_id = f.get("id", k)
# Data supplied in with config (eg. from a multiqc_config.yaml file in working directory)
if "data" in f:
cust_mods[c_id]["data"].update(f["data"])
cust_mods[c_id]["config"].update(
{k: v for k, v in f.items() if k is not "data"}
)
cust_mods[c_id]["config"]["id"] = cust_mods[c_id]["config"].get("id", c_id)
continue
# Custom Content ID has search patterns in the config
if c_id in report.files:
cust_mods[c_id]["config"] = f
cust_mods[c_id]["config"]["id"] = cust_mods[c_id]["config"].get("id", c_id)
search_patterns.append(c_id)
continue
# We should have had something by now
log.warning(
"Found section '{}' in config for under custom_data, but no data or search patterns.".format(
c_id
)
)
# Now go through each of the file search patterns
bm = BaseMultiqcModule()
for k in search_patterns:
num_sp_found_files = 0
for f in bm.find_log_files(k):
num_sp_found_files += 1
# Handle any exception without messing up for remaining custom content files
try:
f_extension = os.path.splitext(f["fn"])[1]
# YAML and JSON files are the easiest
parsed_data = None
if f_extension == ".yaml" or f_extension == ".yml":
try:
parsed_data = yaml_ordered_load(f["f"])
except Exception as e:
log.warning(
"Error parsing YAML file '{}' (probably invalid YAML)".format(
f["fn"]
)
)
log.debug("YAML error: {}".format(e), exc_info=True)
break
elif f_extension == ".json":
try:
# Use OrderedDict for objects so that column order is honoured
parsed_data = json.loads(f["f"], object_pairs_hook=OrderedDict)
except Exception as e:
log.warning(
"Error parsing JSON file '{}' (probably invalid JSON)".format(
f["fn"]
)
)
log.warning("JSON error: {}".format(e))
break
elif (
f_extension == ".png"
or f_extension == ".jpeg"
or f_extension == ".jpg"
):
image_string = base64.b64encode(f["f"].read()).decode("utf-8")
image_format = "png" if f_extension == ".png" else "jpg"
img_html = '<div class="mqc-custom-content-image"><img src="data:image/{};base64,{}" /></div>'.format(
image_format, image_string
)
parsed_data = {
"id": f["s_name"],
"plot_type": "image",
"section_name": f["s_name"]
.replace("_", " ")
.replace("-", " ")
.replace(".", " "),
"description": "Embedded image <code>{}</code>".format(f["fn"]),
"data": img_html,
}
if parsed_data is not None:
c_id = parsed_data.get("id", k)
if len(parsed_data.get("data", {})) > 0:
if type(parsed_data["data"]) == str:
cust_mods[c_id]["data"] = parsed_data["data"]
else:
cust_mods[c_id]["data"].update(parsed_data["data"])
cust_mods[c_id]["config"].update(
{j: k for j, k in parsed_data.items() if j != "data"}
)
else:
log.warning("No data found in {}".format(f["fn"]))
# txt, csv, tsv etc
else:
# Look for configuration details in the header
m_config = _find_file_header(f)
s_name = None
if m_config is not None:
c_id = m_config.get("id", k)
# Update the base config with anything parsed from the file
b_config = cust_mods.get(c_id, {}).get("config", {})
b_config.update(m_config)
# Now set the module config to the merged dict
m_config = dict(b_config)
s_name = m_config.get("sample_name")
else:
c_id = k
m_config = cust_mods.get(c_id, {}).get("config", {})
# Guess sample name if not given
if s_name is None:
s_name = bm.clean_s_name(f["s_name"], f["root"])
# Guess c_id if no information known
if k == "custom_content":
c_id = s_name
# Add information about the file to the config dict
if "files" not in m_config:
m_config["files"] = dict()
m_config["files"].update(
{s_name: {"fn": f["fn"], "root": f["root"]}}
)
# Guess file format if not given
if m_config.get("file_format") is None:
m_config["file_format"] = _guess_file_format(f)
# Parse data
try:
parsed_data, conf = _parse_txt(f, m_config)
if parsed_data is None or len(parsed_data) == 0:
log.warning(
"Not able to parse custom data in {}".format(f["fn"])
)
else:
# Did we get a new section id from the file?
if conf.get("id") is not None:
c_id = conf.get("id")
# heatmap - special data type
if type(parsed_data) == list:
cust_mods[c_id]["data"] = parsed_data
elif conf.get("plot_type") == "html":
cust_mods[c_id]["data"] = parsed_data
else:
cust_mods[c_id]["data"].update(parsed_data)
cust_mods[c_id]["config"].update(conf)
except (IndexError, AttributeError, TypeError):
log.error(
"Unexpected parsing error for {}".format(f["fn"]),
exc_info=True,
)
raise # testing
except Exception as e:
log.error("Uncaught exception raised for file '{}'".format(f["fn"]))
log.exception(e)
# Give log message if no files found for search pattern
if num_sp_found_files == 0 and k != "custom_content":
log.debug("No samples found: custom content ({})".format(k))
# Filter to strip out ignored sample names
for k in cust_mods:
cust_mods[k]["data"] = bm.ignore_samples(cust_mods[k]["data"])
# Remove any configs that have no data
remove_cids = [k for k in cust_mods if len(cust_mods[k]["data"]) == 0]
for k in remove_cids:
del cust_mods[k]
if len(cust_mods) == 0:
raise UserWarning
# Go through each data type
parsed_modules = list()
for module_id, mod in cust_mods.items():
# General Stats
if mod["config"].get("plot_type") == "generalstats":
gsheaders = mod["config"].get("pconfig")
if gsheaders is None:
headers = set()
for d in mod["data"].values():
headers.update(d.keys())
headers = list(headers)
headers.sort()
gsheaders = OrderedDict()
for h in headers:
gsheaders[h] = dict()
# Headers is a list of dicts
if type(gsheaders) == list:
gsheaders_dict = OrderedDict()
for gsheader in gsheaders:
for col_id, col_data in gsheader.items():
gsheaders_dict[col_id] = col_data
gsheaders = gsheaders_dict
# Add namespace and description if not specified
for m_id in gsheaders:
if "namespace" not in gsheaders[m_id]:
gsheaders[m_id]["namespace"] = mod["config"].get(
"namespace", module_id
)
bm.general_stats_addcols(mod["data"], gsheaders)
# Initialise this new module class and append to list
else:
parsed_modules.append(MultiqcModule(module_id, mod))
if mod["config"].get("plot_type") == "html":
log.info("{}: Found 1 sample (html)".format(module_id))
elif mod["config"].get("plot_type") == "image":
log.info("{}: Found 1 sample (image)".format(module_id))
else:
log.info(
"{}: Found {} samples ({})".format(
module_id, len(mod["data"]), mod["config"].get("plot_type")
)
)
# Sort sections if we have a config option for order
mod_order = getattr(config, "custom_content", {}).get("order", [])
sorted_modules = [
parsed_mod
for parsed_mod in parsed_modules
if parsed_mod.anchor not in mod_order
]
sorted_modules.extend(
[
parsed_mod
for mod_id in mod_order
for parsed_mod in parsed_modules
if parsed_mod.anchor == mod_id
]
)
# If we only have General Stats columns then there are no module outputs
if len(sorted_modules) == 0:
raise UserWarning
return sorted_modules
|
https://github.com/ewels/MultiQC/issues/1071
|
[INFO ] multiqc : This is MultiQC v1.8
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching : /home/sande/Dropbox/Studie/PhD/snakemake-workflows/some_data.out
[ERROR ] multiqc : Oops! The 'custom_content' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
None
============================================================
Module custom_content raised an exception: Traceback (most recent call last):
File "/home/sande/anaconda3/envs/snakemake-workflows/envs/multiqc/lib/python3.6/site-packages/multiqc/multiqc.py", line 546, in run
output = mod()
File "/home/sande/anaconda3/envs/snakemake-workflows/envs/multiqc/lib/python3.6/site-packages/multiqc/modules/custom_content/custom_content.py", line 64, in custom_module_classes
cust_mods[c_id]['data'].update( f['data'] )
ValueError: need more than 1 value to unpack
============================================================
[WARNING] multiqc : No analysis results found. Cleaning up..
[INFO ] multiqc : MultiQC complete
|
ValueError
|
def write_data_file(data, fn, sort_cols=False, data_format=None):
"""Write a data file to the report directory. Will not do anything
if config.data_dir is not set.
:param: data - a 2D dict, first key sample name (row header),
second key field (column header).
:param: fn - Desired filename. Directory will be prepended automatically.
:param: sort_cols - Sort columns alphabetically
:param: data_format - Output format. Defaults to config.data_format (usually tsv)
:return: None"""
if config.data_dir is not None:
# Add relevant file extension to filename
if data_format is None:
data_format = config.data_format
fn = "{}.{}".format(fn, config.data_format_extensions[data_format])
# JSON encoder class to handle lambda functions
class MQCJSONEncoder(json.JSONEncoder):
def default(self, obj):
if callable(obj):
try:
return obj(1)
except:
return None
return json.JSONEncoder.default(self, obj)
# Save file
with io.open(os.path.join(config.data_dir, fn), "w", encoding="utf-8") as f:
if data_format == "json":
jsonstr = json.dumps(
data, indent=4, cls=MQCJSONEncoder, ensure_ascii=False
)
print(jsonstr.encode("utf-8", "ignore").decode("utf-8"), file=f)
elif data_format == "yaml":
yaml.dump(data, f, default_flow_style=False)
else:
# Default - tab separated output
# Convert keys to strings
data = {str(k): v for k, v in data.items()}
# Get all headers
h = ["Sample"]
for sn in sorted(data.keys()):
for k in data[sn].keys():
if type(data[sn][k]) is not dict and k not in h:
h.append(str(k))
if sort_cols:
h = sorted(h)
# Get the rows
rows = ["\t".join(h)]
for sn in sorted(data.keys()):
# Make a list starting with the sample name, then each field in order of the header cols
l = [str(sn)] + [str(data[sn].get(k, "")) for k in h[1:]]
rows.append("\t".join(l))
body = "\n".join(rows)
print(body.encode("utf-8", "ignore").decode("utf-8"), file=f)
|
def write_data_file(data, fn, sort_cols=False, data_format=None):
"""Write a data file to the report directory. Will not do anything
if config.data_dir is not set.
:param: data - a 2D dict, first key sample name (row header),
second key field (column header).
:param: fn - Desired filename. Directory will be prepended automatically.
:param: sort_cols - Sort columns alphabetically
:param: data_format - Output format. Defaults to config.data_format (usually tsv)
:return: None"""
if config.data_dir is not None:
# Add relevant file extension to filename
if data_format is None:
data_format = config.data_format
fn = "{}.{}".format(fn, config.data_format_extensions[data_format])
# JSON encoder class to handle lambda functions
class MQCJSONEncoder(json.JSONEncoder):
def default(self, obj):
if callable(obj):
try:
return obj(1)
except:
return None
return json.JSONEncoder.default(self, obj)
# Save file
with io.open(os.path.join(config.data_dir, fn), "w", encoding="utf-8") as f:
if data_format == "json":
jsonstr = json.dumps(
data, indent=4, cls=MQCJSONEncoder, ensure_ascii=False
)
print(jsonstr.encode("utf-8", "ignore").decode("utf-8"), file=f)
elif data_format == "yaml":
yaml.dump(data, f, default_flow_style=False)
else:
# Default - tab separated output
# Get all headers
h = ["Sample"]
for sn in sorted(data.keys()):
for k in data[sn].keys():
if type(data[sn][k]) is not dict and k not in h:
h.append(str(k))
if sort_cols:
h = sorted(h)
# Get the rows
rows = ["\t".join(h)]
for sn in sorted(data.keys()):
# Make a list starting with the sample name, then each field in order of the header cols
l = [str(sn)] + [str(data[sn].get(k, "")) for k in h[1:]]
rows.append("\t".join(l))
body = "\n".join(rows)
print(body.encode("utf-8", "ignore").decode("utf-8"), file=f)
|
https://github.com/ewels/MultiQC/issues/1091
|
Module custom_content raised an exception: Traceback (most recent call last):
File "/usr/lib/python3.6/site-packages/multiqc-1.8.dev0-py3.6.egg/EGG-INFO/scripts/multiqc", line 440, in multiqc
output = mod()
File "/usr/lib/python3.6/site-packages/multiqc-1.8.dev0-py3.6.egg/multiqc/modules/custom_content/custom_content.py", line 226, in custom_module_classes
parsed_modules.append( MultiqcModule(module_id, mod) )
File "/usr/lib/python3.6/site-packages/multiqc-1.8.dev0-py3.6.egg/multiqc/modules/custom_content/custom_content.py", line 272, in __init__
self.write_data_file( mod['data'], "multiqc_{}".format(modname.lower().replace(' ', '_')) )
File "/usr/lib/python3.6/site-packages/multiqc-1.8.dev0-py3.6.egg/multiqc/modules/base_module.py", line 332, in write_data_file
util_functions.write_data_file(data, fn, sort_cols, data_format)
File "/usr/lib/python3.6/site-packages/multiqc-1.8.dev0-py3.6.egg/multiqc/utils/util_functions.py", line 78, in write_data_file
for sn in sorted(data.keys()):
TypeError: '<' not supported between instances of 'str' and 'float'
|
TypeError
|
def run(
analysis_dir,
dirs=False,
dirs_depth=None,
no_clean_sname=False,
title=None,
report_comment=None,
template=None,
module_tag=(),
module=(),
exclude=(),
outdir=None,
ignore=(),
ignore_samples=(),
sample_names=None,
sample_filters=None,
file_list=False,
filename=None,
make_data_dir=False,
no_data_dir=False,
data_format=None,
zip_data_dir=False,
force=True,
ignore_symlinks=False,
export_plots=False,
plots_flat=False,
plots_interactive=False,
lint=False,
make_pdf=False,
no_megaqc_upload=False,
config_file=(),
cl_config=(),
verbose=0,
quiet=False,
no_ansi=False,
kwargs={},
):
"""MultiQC aggregates results from bioinformatics analyses across many samples into a single report.
It searches a given directory for analysis logs and compiles a HTML report.
It's a general use tool, perfect for summarising the output from numerous
bioinformatics tools.
To run, supply with one or more directory to scan for analysis results.
To run here, use 'multiqc .'
See http://multiqc.info for more details.
Author: Phil Ewels (http://phil.ewels.co.uk)
"""
# Set up logging level
loglevel = log.LEVELS.get(min(verbose, 1), "INFO")
if quiet:
loglevel = "WARNING"
log.init_log(logger, loglevel=loglevel, no_ansi=no_ansi)
# Load config files
plugin_hooks.mqc_trigger("before_config")
config.mqc_load_userconfig(config_file)
plugin_hooks.mqc_trigger("config_loaded")
# Command-line config YAML
if len(cl_config) > 0:
config.mqc_cl_config(cl_config)
# Log the command used to launch MultiQC
report.multiqc_command = " ".join(sys.argv)
logger.debug("Command used: {}".format(report.multiqc_command))
# Check that we're running the latest version of MultiQC
if config.no_version_check is not True:
try:
response = urlopen(
"http://multiqc.info/version.php?v={}".format(config.short_version),
timeout=5,
)
remote_version = response.read().decode("utf-8").strip()
if version.StrictVersion(
re.sub("[^0-9\.]", "", remote_version)
) > version.StrictVersion(re.sub("[^0-9\.]", "", config.short_version)):
logger.warn("MultiQC Version {} now available!".format(remote_version))
else:
logger.debug("Latest MultiQC version is {}".format(remote_version))
except Exception as e:
logger.debug(
"Could not connect to multiqc.info for version check: {}".format(e)
)
# Set up key variables (overwrite config vars from command line)
if template is not None:
config.template = template
if title is not None:
config.title = title
if report_comment is not None:
config.report_comment = report_comment
if dirs is True:
config.prepend_dirs = dirs
if dirs_depth is not None:
config.prepend_dirs = True
config.prepend_dirs_depth = dirs_depth
config.analysis_dir = analysis_dir
if outdir is not None:
config.output_dir = outdir
if no_clean_sname:
config.fn_clean_sample_names = False
logger.info("Not cleaning sample names")
if make_data_dir:
config.make_data_dir = True
if no_data_dir:
config.make_data_dir = False
if force:
config.force = True
if ignore_symlinks:
config.ignore_symlinks = True
if zip_data_dir:
config.zip_data_dir = True
if data_format is not None:
config.data_format = data_format
if export_plots:
config.export_plots = True
if plots_flat:
config.plots_force_flat = True
if plots_interactive:
config.plots_force_interactive = True
if lint:
config.lint = True
lint_helpers.run_tests()
if make_pdf:
config.template = "simple"
if no_megaqc_upload:
config.megaqc_upload = False
else:
config.megaqc_upload = True
if sample_names:
config.load_sample_names(sample_names)
config.load_show_hide(sample_filters)
if module_tag is not None:
config.module_tag = module_tag
if len(module) > 0:
config.run_modules = module
if len(exclude) > 0:
config.exclude_modules = exclude
config.kwargs = kwargs # Plugin command line options
# Clean up analysis_dir if a string (interactive environment only)
if isinstance(config.analysis_dir, str):
config.analysis_dir = [config.analysis_dir]
plugin_hooks.mqc_trigger("execution_start")
logger.info("This is MultiQC v{}".format(config.version))
logger.debug("Command : {}".format(" ".join(sys.argv)))
logger.debug("Working dir : {}".format(os.getcwd()))
if make_pdf:
logger.info("--pdf specified. Using non-interactive HTML template.")
logger.info("Template : {}".format(config.template))
if lint:
logger.info("--lint specified. Being strict with validation.")
# Throw a warning if we are running on Python 2
if sys.version_info[0] < 3:
logger.warn(
"You are running MultiQC with Python {}.{}.{}".format(
sys.version_info[0], sys.version_info[1], sys.version_info[2]
)
)
logger.warn("Please upgrade! MultiQC will soon drop support for Python < 3.6")
else:
logger.debug("Running Python {}".format(sys.version.replace("\n", " ")))
# Add files if --file-list option is given
if file_list:
if len(analysis_dir) > 1:
raise ValueError(
"If --file-list is giving, analysis_dir should have only one plain text file."
)
config.analysis_dir = []
with open(analysis_dir[0]) as in_handle:
for line in in_handle:
if os.path.exists(line.strip()):
path = os.path.abspath(line.strip())
config.analysis_dir.append(path)
if len(config.analysis_dir) == 0:
logger.error(
"No files or directories were added from {} using --file-list option.".format(
analysis_dir[0]
)
)
logger.error(
"Please, check that {} contains correct paths.".format(analysis_dir[0])
)
raise ValueError("Any files or directories to be searched.")
if len(ignore) > 0:
logger.debug(
"Ignoring files, directories and paths that match: {}".format(
", ".join(ignore)
)
)
config.fn_ignore_files.extend(ignore)
config.fn_ignore_dirs.extend(ignore)
config.fn_ignore_paths.extend(ignore)
if len(ignore_samples) > 0:
logger.debug(
"Ignoring sample names that match: {}".format(", ".join(ignore_samples))
)
config.sample_names_ignore.extend(ignore_samples)
if filename == "stdout":
config.output_fn = sys.stdout
logger.info("Printing report to stdout")
else:
if title is not None and filename is None:
filename = re.sub("[^\w\.-]", "", re.sub("[-\s]+", "-", title)).strip()
filename += "_multiqc_report"
if filename is not None:
if filename.endswith(".html"):
filename = filename[:-5]
config.output_fn_name = filename
config.data_dir_name = "{}_data".format(filename)
config.plots_dir_name = "{}_plots".format(filename)
if not config.output_fn_name.endswith(".html"):
config.output_fn_name = "{}.html".format(config.output_fn_name)
# Print some status updates
if config.title is not None:
logger.info("Report title: {}".format(config.title))
if dirs:
logger.info("Prepending directory to sample names")
for d in config.analysis_dir:
logger.info("Searching : {}".format(os.path.abspath(d)))
# Prep module configs
config.top_modules = [m if type(m) is dict else {m: {}} for m in config.top_modules]
config.module_order = [
m if type(m) is dict else {m: {}} for m in config.module_order
]
mod_keys = [list(m.keys())[0] for m in config.module_order]
# Lint the module configs
if config.lint:
for m in config.avail_modules.keys():
if m not in mod_keys:
errmsg = "LINT: Module '{}' not found in config.module_order".format(m)
logger.error(errmsg)
report.lint_errors.append(errmsg)
else:
for mo in config.module_order:
if (
m != "custom_content"
and m in mo.keys()
and "module_tag" not in mo[m]
):
errmsg = "LINT: Module '{}' in config.module_order did not have 'module_tag' config".format(
m
)
logger.error(errmsg)
report.lint_errors.append(errmsg)
# Get the avaiable tags to decide which modules to run.
modules_from_tags = set()
if config.module_tag is not None:
tags = config.module_tag
for m in config.module_order:
module_name = list(m.keys())[0] # only one name in each dict
for tag in tags:
for t in m[module_name].get("module_tag", []):
if tag.lower() == t.lower():
modules_from_tags.add(module_name)
# Get the list of modules we want to run, in the order that we want them
run_modules = [
m
for m in config.top_modules
if list(m.keys())[0] in config.avail_modules.keys()
]
run_modules.extend(
[
{m: {}}
for m in config.avail_modules.keys()
if m not in mod_keys and m not in run_modules
]
)
run_modules.extend(
[
m
for m in config.module_order
if list(m.keys())[0] in config.avail_modules.keys()
and list(m.keys())[0] not in [list(rm.keys())[0] for rm in run_modules]
]
)
if len(getattr(config, "run_modules", {})) > 0:
run_modules = [
m for m in run_modules if list(m.keys())[0] in config.run_modules
]
logger.info("Only using modules {}".format(", ".join(config.run_modules)))
elif modules_from_tags:
run_modules = [m for m in run_modules if list(m.keys())[0] in modules_from_tags]
logger.info("Only using modules with '{}' tag".format(", ".join(module_tag)))
if len(getattr(config, "exclude_modules", {})) > 0:
logger.info(
"Excluding modules '{}'".format("', '".join(config.exclude_modules))
)
if "general_stats" in config.exclude_modules:
config.skip_generalstats = True
config.exclude_modules = tuple(
x for x in config.exclude_modules if x != "general_stats"
)
run_modules = [
m for m in run_modules if list(m.keys())[0] not in config.exclude_modules
]
if len(run_modules) == 0:
logger.critical("No analysis modules specified!")
sys.exit(1)
run_module_names = [list(m.keys())[0] for m in run_modules]
logger.debug("Analysing modules: {}".format(", ".join(run_module_names)))
# Create the temporary working directories
tmp_dir = tempfile.mkdtemp()
logger.debug("Using temporary directory for creating report: {}".format(tmp_dir))
config.data_tmp_dir = os.path.join(tmp_dir, "multiqc_data")
if filename != "stdout" and config.make_data_dir == True:
config.data_dir = config.data_tmp_dir
os.makedirs(config.data_dir)
else:
config.data_dir = None
config.plots_tmp_dir = os.path.join(tmp_dir, "multiqc_plots")
if filename != "stdout" and config.export_plots == True:
config.plots_dir = config.plots_tmp_dir
os.makedirs(config.plots_dir)
else:
config.plots_dir = None
# Load the template
template_mod = config.avail_templates[config.template].load()
# Add an output subdirectory if specified by template
try:
config.output_dir = os.path.join(config.output_dir, template_mod.output_subdir)
except AttributeError:
pass # No subdirectory variable given
# Add custom content section names
try:
if "custom_content" in run_module_names:
run_module_names.extend(config.custom_data.keys())
except AttributeError:
pass # custom_data not in config
# Get the list of files to search
report.get_filelist(run_module_names)
# Run the modules!
plugin_hooks.mqc_trigger("before_modules")
report.modules_output = list()
sys_exit_code = 0
for mod_dict in run_modules:
try:
this_module = list(mod_dict.keys())[0]
mod_cust_config = list(mod_dict.values())[0]
mod = config.avail_modules[this_module].load()
mod.mod_cust_config = (
mod_cust_config # feels bad doing this, but seems to work
)
output = mod()
if type(output) != list:
output = [output]
for m in output:
report.modules_output.append(m)
# Copy over css & js files if requested by the theme
try:
for to, path in report.modules_output[-1].css.items():
copy_to = os.path.join(tmp_dir, to)
os.makedirs(os.path.dirname(copy_to))
shutil.copyfile(path, copy_to)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
except AttributeError:
pass
try:
for to, path in report.modules_output[-1].js.items():
copy_to = os.path.join(tmp_dir, to)
os.makedirs(os.path.dirname(copy_to))
shutil.copyfile(path, copy_to)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
except AttributeError:
pass
except UserWarning:
logger.debug("No samples found: {}".format(list(mod_dict.keys())[0]))
except KeyboardInterrupt:
shutil.rmtree(tmp_dir)
logger.critical(
"User Cancelled Execution!\n{eq}\n{tb}{eq}\n".format(
eq=("=" * 60), tb=traceback.format_exc()
)
+ "User Cancelled Execution!\nExiting MultiQC..."
)
sys.exit(1)
except:
# Flag the error, but carry on
logger.error(
"Oops! The '{}' MultiQC module broke... \n".format(this_module)
+ " Please copy the following traceback and report it at "
+ "https://github.com/ewels/MultiQC/issues \n"
+ " If possible, please include a log file that triggers the error - "
+ "the last file found was:\n"
+ " {}\n".format(report.last_found_file)
+ ("=" * 60)
+ "\nModule {} raised an exception: {}".format(
this_module, traceback.format_exc()
)
+ ("=" * 60)
)
sys_exit_code = 1
# Did we find anything?
if len(report.modules_output) == 0:
logger.warn("No analysis results found. Cleaning up..")
shutil.rmtree(tmp_dir)
logger.info("MultiQC complete")
# Exit with an error code if a module broke
sys.exit(sys_exit_code)
# Sort the report module output if we have a config
if len(getattr(config, "report_section_order", {})) > 0:
section_id_order = {}
idx = 10
for mod in reversed(report.modules_output):
section_id_order[mod.anchor] = idx
idx += 10
for anchor, ss in config.report_section_order.items():
if anchor not in section_id_order.keys():
logger.debug(
"Reordering sections: anchor '{}' not found.".format(anchor)
)
continue
if ss.get("order") is not None:
section_id_order[anchor] = ss["order"]
if ss.get("after") in section_id_order.keys():
section_id_order[anchor] = section_id_order[ss["after"]] + 1
if ss.get("before") in section_id_order.keys():
section_id_order[anchor] = section_id_order[ss["before"]] - 1
sorted_ids = sorted(section_id_order, key=section_id_order.get)
report.modules_output = [
mod
for i in reversed(sorted_ids)
for mod in report.modules_output
if mod.anchor == i
]
# Sort the report sections if we have a config
# Basically the same as above, but sections within a module
if len(getattr(config, "report_section_order", {})) > 0:
# Go through each module
for midx, mod in enumerate(report.modules_output):
section_id_order = {}
# Get a list of the section anchors
idx = 10
for s in mod.sections:
section_id_order[s["anchor"]] = idx
idx += 10
# Go through each section to be reordered
for anchor, ss in config.report_section_order.items():
# Section to be moved is not in this module
if anchor not in section_id_order.keys():
logger.debug(
"Reordering sections: anchor '{}' not found for module '{}'.".format(
anchor, mod.name
)
)
continue
if ss == "remove":
section_id_order[anchor] = False
continue
if ss.get("order") is not None:
section_id_order[anchor] = ss["order"]
if ss.get("after") in section_id_order.keys():
section_id_order[anchor] = section_id_order[ss["after"]] + 1
if ss.get("before") in section_id_order.keys():
section_id_order[anchor] = section_id_order[ss["before"]] - 1
# Remove module sections
section_id_order = {
s: o for s, o in section_id_order.items() if o is not False
}
# Sort the module sections
sorted_ids = sorted(section_id_order, key=section_id_order.get)
report.modules_output[midx].sections = [
s for i in sorted_ids for s in mod.sections if s["anchor"] == i
]
plugin_hooks.mqc_trigger("after_modules")
# Remove empty data sections from the General Stats table
empty_keys = [i for i, d in enumerate(report.general_stats_data[:]) if len(d) == 0]
empty_keys.sort(reverse=True)
for i in empty_keys:
del report.general_stats_data[i]
del report.general_stats_headers[i]
# Add general-stats IDs to table row headers
for idx, h in enumerate(report.general_stats_headers):
for k in h.keys():
if "rid" not in h[k]:
h[k]["rid"] = re.sub(r"\W+", "_", k).strip().strip("_")
ns_html = re.sub(r"\W+", "_", h[k]["namespace"]).strip().strip("_").lower()
report.general_stats_headers[idx][k]["rid"] = report.save_htmlid(
"mqc-generalstats-{}-{}".format(ns_html, h[k]["rid"])
)
# Generate the General Statistics HTML & write to file
if len(report.general_stats_data) > 0:
pconfig = {
"id": "general_stats_table",
"table_title": "General Statistics",
"save_file": True,
"raw_data_fn": "multiqc_general_stats",
}
report.general_stats_html = table.plot(
report.general_stats_data, report.general_stats_headers, pconfig
)
else:
config.skip_generalstats = True
# Write the report sources to disk
if config.data_dir is not None:
report.data_sources_tofile()
# Compress the report plot JSON data
logger.info("Compressing plot data")
report.plot_compressed_json = report.compress_json(report.plot_data)
plugin_hooks.mqc_trigger("before_report_generation")
# Data Export / MegaQC integration - save report data to file or send report data to an API endpoint
if (config.data_dump_file or config.megaqc_url) and config.megaqc_upload:
multiqc_json_dump = megaqc.multiqc_dump_json(report)
if config.data_dump_file:
util_functions.write_data_file(
multiqc_json_dump, "multiqc_data", False, "json"
)
if config.megaqc_url:
megaqc.multiqc_api_post(multiqc_json_dump)
# Make the final report path & data directories
if filename != "stdout":
config.output_fn = os.path.join(config.output_dir, config.output_fn_name)
config.data_dir = os.path.join(config.output_dir, config.data_dir_name)
config.plots_dir = os.path.join(config.output_dir, config.plots_dir_name)
# Check for existing reports and remove if -f was specified
if (
os.path.exists(config.output_fn)
or (config.make_data_dir and os.path.exists(config.data_dir))
or (config.export_plots and os.path.exists(config.plots_dir))
):
if config.force:
if os.path.exists(config.output_fn):
logger.warning(
"Deleting : {} (-f was specified)".format(
os.path.relpath(config.output_fn)
)
)
os.remove(config.output_fn)
if config.make_data_dir and os.path.exists(config.data_dir):
logger.warning(
"Deleting : {} (-f was specified)".format(
os.path.relpath(config.data_dir)
)
)
shutil.rmtree(config.data_dir)
if config.export_plots and os.path.exists(config.plots_dir):
logger.warning(
"Deleting : {} (-f was specified)".format(
os.path.relpath(config.plots_dir)
)
)
shutil.rmtree(config.plots_dir)
else:
# Set up the base names of the report and the data dir
report_num = 1
report_base, report_ext = os.path.splitext(config.output_fn_name)
dir_base = os.path.basename(config.data_dir)
plots_base = os.path.basename(config.plots_dir)
# Iterate through appended numbers until we find one that's free
while (
os.path.exists(config.output_fn)
or (config.make_data_dir and os.path.exists(config.data_dir))
or (config.export_plots and os.path.exists(config.plots_dir))
):
config.output_fn = os.path.join(
config.output_dir,
"{}_{}{}".format(report_base, report_num, report_ext),
)
config.data_dir = os.path.join(
config.output_dir, "{}_{}".format(dir_base, report_num)
)
config.plots_dir = os.path.join(
config.output_dir, "{}_{}".format(plots_base, report_num)
)
report_num += 1
config.output_fn_name = os.path.basename(config.output_fn)
config.data_dir_name = os.path.basename(config.data_dir)
config.plots_dir_name = os.path.basename(config.plots_dir)
logger.warning("Previous MultiQC output found! Adjusting filenames..")
logger.warning(
"Use -f or --force to overwrite existing reports instead"
)
# Make directories for report if needed
if not os.path.exists(os.path.dirname(config.output_fn)):
os.makedirs(os.path.dirname(config.output_fn))
logger.info("Report : {}".format(os.path.relpath(config.output_fn)))
if config.make_data_dir == False:
logger.info("Data : None")
else:
# Make directories for data_dir
logger.info("Data : {}".format(os.path.relpath(config.data_dir)))
# Modules have run, so data directory should be complete by now. Move its contents.
logger.debug(
"Moving data file from '{}' to '{}'".format(
config.data_tmp_dir, config.data_dir
)
)
copy_tree(config.data_tmp_dir, config.data_dir)
shutil.rmtree(config.data_tmp_dir)
# Copy across the static plot images if requested
if config.export_plots:
config.plots_dir = os.path.join(config.output_dir, config.plots_dir_name)
if os.path.exists(config.plots_dir):
if config.force:
logger.warning(
"Deleting : {} (-f was specified)".format(
os.path.relpath(config.plots_dir)
)
)
shutil.rmtree(config.plots_dir)
else:
logger.error(
"Output directory {} already exists.".format(config.plots_dir)
)
logger.info("Use -f or --force to overwrite existing reports")
shutil.rmtree(tmp_dir)
sys.exit(1)
logger.info("Plots : {}".format(os.path.relpath(config.plots_dir)))
# Modules have run, so plots directory should be complete by now. Move its contents.
logger.debug(
"Moving plots directory from '{}' to '{}'".format(
config.plots_tmp_dir, config.plots_dir
)
)
copy_tree(config.plots_tmp_dir, config.plots_dir)
shutil.rmtree(config.plots_tmp_dir)
plugin_hooks.mqc_trigger("before_template")
# Load in parent template files first if a child theme
try:
parent_template = config.avail_templates[template_mod.template_parent].load()
copy_tree(parent_template.template_dir, tmp_dir)
except AttributeError:
pass # Not a child theme
# Copy the template files to the tmp directory (distutils overwrites parent theme files)
copy_tree(template_mod.template_dir, tmp_dir)
# Function to include file contents in Jinja template
def include_file(name, fdir=tmp_dir, b64=False):
try:
if fdir is None:
fdir = ""
if b64:
with io.open(os.path.join(fdir, name), "rb") as f:
return base64.b64encode(f.read()).decode("utf-8")
else:
with io.open(os.path.join(fdir, name), "r", encoding="utf-8") as f:
return f.read()
except (OSError, IOError) as e:
logger.error("Could not include file '{}': {}".format(name, e))
# Load the report template
try:
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmp_dir))
env.globals["include_file"] = include_file
j_template = env.get_template(template_mod.base_fn)
except:
raise IOError(
"Could not load {} template file '{}'".format(
config.template, template_mod.base_fn
)
)
# Use jinja2 to render the template and overwrite
config.analysis_dir = [os.path.realpath(d) for d in config.analysis_dir]
report_output = j_template.render(report=report, config=config)
if filename == "stdout":
print(report_output.encode("utf-8"), file=sys.stdout)
else:
try:
with io.open(config.output_fn, "w", encoding="utf-8") as f:
print(report_output, file=f)
except IOError as e:
raise IOError(
"Could not print report to '{}' - {}".format(
config.output_fn, IOError(e)
)
)
# Copy over files if requested by the theme
try:
for f in template_mod.copy_files:
fn = os.path.join(tmp_dir, f)
dest_dir = os.path.join(os.path.dirname(config.output_fn), f)
copy_tree(fn, dest_dir)
except AttributeError:
pass # No files to copy
# Clean up temporary directory
shutil.rmtree(tmp_dir)
# Zip the data directory if requested
if config.zip_data_dir and config.data_dir is not None:
shutil.make_archive(config.data_dir, "zip", config.data_dir)
shutil.rmtree(config.data_dir)
# Try to create a PDF if requested
if make_pdf:
try:
pdf_fn_name = config.output_fn.replace(".html", ".pdf")
pandoc_call = [
"pandoc",
"--standalone",
config.output_fn,
"--output",
pdf_fn_name,
"--pdf-engine=xelatex",
"-V",
"documentclass=article",
"-V",
"geometry=margin=1in",
"-V",
"title=",
]
if config.pandoc_template is not None:
pandoc_call.append("--template={}".format(config.pandoc_template))
logger.debug(
"Attempting Pandoc conversion to PDF with following command:\n{}".format(
" ".join(pandoc_call)
)
)
pdf_exit_code = subprocess.call(pandoc_call)
if pdf_exit_code != 0:
logger.error(
"Error creating PDF! Pandoc returned a non-zero exit code."
)
else:
logger.info("PDF Report : {}".format(pdf_fn_name))
except OSError as e:
if e.errno == os.errno.ENOENT:
logger.error(
"Error creating PDF - pandoc not found. Is it installed? http://pandoc.org/"
)
else:
logger.error(
"Error creating PDF! Something went wrong when creating the PDF\n"
+ ("=" * 60)
+ "\n{}\n".format(traceback.format_exc())
+ ("=" * 60)
)
plugin_hooks.mqc_trigger("execution_finish")
logger.info("MultiQC complete")
if lint and len(report.lint_errors) > 0:
logger.error(
"Found {} linting errors!\n{}".format(
len(report.lint_errors), "\n".join(report.lint_errors)
)
)
sys_exit_code = 1
# Move the log file into the data directory
log.move_tmp_log(logger)
# Return the appropriate error code (eg. 1 if a module broke, 0 on success)
return sys_exit_code
|
def run(
analysis_dir,
dirs=False,
dirs_depth=None,
no_clean_sname=False,
title=None,
report_comment=None,
template=None,
module_tag=(),
module=(),
exclude=(),
outdir=None,
ignore=(),
ignore_samples=(),
sample_names=None,
sample_filters=None,
file_list=False,
filename=None,
make_data_dir=False,
no_data_dir=False,
data_format=None,
zip_data_dir=False,
force=True,
ignore_symlinks=False,
export_plots=False,
plots_flat=False,
plots_interactive=False,
lint=False,
make_pdf=False,
no_megaqc_upload=False,
config_file=(),
cl_config=(),
verbose=0,
quiet=False,
no_ansi=False,
kwargs={},
):
"""MultiQC aggregates results from bioinformatics analyses across many samples into a single report.
It searches a given directory for analysis logs and compiles a HTML report.
It's a general use tool, perfect for summarising the output from numerous
bioinformatics tools.
To run, supply with one or more directory to scan for analysis results.
To run here, use 'multiqc .'
See http://multiqc.info for more details.
Author: Phil Ewels (http://phil.ewels.co.uk)
"""
# Set up logging level
loglevel = log.LEVELS.get(min(verbose, 1), "INFO")
if quiet:
loglevel = "WARNING"
log.init_log(logger, loglevel=loglevel, no_ansi=no_ansi)
# Load config files
plugin_hooks.mqc_trigger("before_config")
config.mqc_load_userconfig(config_file)
plugin_hooks.mqc_trigger("config_loaded")
# Command-line config YAML
if len(cl_config) > 0:
config.mqc_cl_config(cl_config)
# Log the command used to launch MultiQC
report.multiqc_command = " ".join(sys.argv)
logger.debug("Command used: {}".format(report.multiqc_command))
# Check that we're running the latest version of MultiQC
if config.no_version_check is not True:
try:
response = urlopen(
"http://multiqc.info/version.php?v={}".format(config.short_version),
timeout=5,
)
remote_version = response.read().decode("utf-8").strip()
if version.StrictVersion(
re.sub("[^0-9\.]", "", remote_version)
) > version.StrictVersion(re.sub("[^0-9\.]", "", config.short_version)):
logger.warn("MultiQC Version {} now available!".format(remote_version))
else:
logger.debug("Latest MultiQC version is {}".format(remote_version))
except Exception as e:
logger.debug(
"Could not connect to multiqc.info for version check: {}".format(e)
)
# Set up key variables (overwrite config vars from command line)
if template is not None:
config.template = template
if title is not None:
config.title = title
if report_comment is not None:
config.report_comment = report_comment
if dirs is True:
config.prepend_dirs = dirs
if dirs_depth is not None:
config.prepend_dirs = True
config.prepend_dirs_depth = dirs_depth
config.analysis_dir = analysis_dir
if outdir is not None:
config.output_dir = outdir
if no_clean_sname:
config.fn_clean_sample_names = False
logger.info("Not cleaning sample names")
if make_data_dir:
config.make_data_dir = True
if no_data_dir:
config.make_data_dir = False
if force:
config.force = True
if ignore_symlinks:
config.ignore_symlinks = True
if zip_data_dir:
config.zip_data_dir = True
if data_format is not None:
config.data_format = data_format
if export_plots:
config.export_plots = True
if plots_flat:
config.plots_force_flat = True
if plots_interactive:
config.plots_force_interactive = True
if lint:
config.lint = True
lint_helpers.run_tests()
if make_pdf:
config.template = "simple"
if no_megaqc_upload:
config.megaqc_upload = False
else:
config.megaqc_upload = True
if sample_names:
config.load_sample_names(sample_names)
config.load_show_hide(sample_filters)
if module_tag is not None:
config.module_tag = module_tag
if len(module) > 0:
config.run_modules = module
if len(exclude) > 0:
config.exclude_modules = exclude
config.kwargs = kwargs # Plugin command line options
# Clean up analysis_dir if a string (interactive environment only)
if isinstance(config.analysis_dir, str):
config.analysis_dir = [config.analysis_dir]
plugin_hooks.mqc_trigger("execution_start")
logger.info("This is MultiQC v{}".format(config.version))
logger.debug("Command : {}".format(" ".join(sys.argv)))
logger.debug("Working dir : {}".format(os.getcwd()))
if make_pdf:
logger.info("--pdf specified. Using non-interactive HTML template.")
logger.info("Template : {}".format(config.template))
if lint:
logger.info("--lint specified. Being strict with validation.")
# Throw a warning if we are running on Python 2
if sys.version_info[0] < 3:
logger.warn(
"You are running MultiQC with Python {}.{}.{}".format(
sys.version_info[0], sys.version_info[1], sys.version_info[2]
)
)
logger.warn("Please upgrade! MultiQC will soon drop support for Python < 3.6")
else:
logger.debug("Running Python {}".format(sys.version.replace("\n", " ")))
# Add files if --file-list option is given
if file_list:
if len(analysis_dir) > 1:
raise ValueError(
"If --file-list is giving, analysis_dir should have only one plain text file."
)
config.analysis_dir = []
with open(analysis_dir[0]) as in_handle:
for line in in_handle:
if os.path.exists(line.strip()):
path = os.path.abspath(line.strip())
config.analysis_dir.append(path)
if len(config.analysis_dir) == 0:
logger.error(
"No files or directories were added from {} using --file-list option.".format(
analysis_dir[0]
)
)
logger.error(
"Please, check that {} contains correct paths.".format(analysis_dir[0])
)
raise ValueError("Any files or directories to be searched.")
if len(ignore) > 0:
logger.debug(
"Ignoring files, directories and paths that match: {}".format(
", ".join(ignore)
)
)
config.fn_ignore_files.extend(ignore)
config.fn_ignore_dirs.extend(ignore)
config.fn_ignore_paths.extend(ignore)
if len(ignore_samples) > 0:
logger.debug(
"Ignoring sample names that match: {}".format(", ".join(ignore_samples))
)
config.sample_names_ignore.extend(ignore_samples)
if filename == "stdout":
config.output_fn = sys.stdout
logger.info("Printing report to stdout")
else:
if title is not None and filename is None:
filename = re.sub("[^\w\.-]", "", re.sub("[-\s]+", "-", title)).strip()
filename += "_multiqc_report"
if filename is not None:
if filename.endswith(".html"):
filename = filename[:-5]
config.output_fn_name = filename
config.data_dir_name = "{}_data".format(filename)
if not config.output_fn_name.endswith(".html"):
config.output_fn_name = "{}.html".format(config.output_fn_name)
# Print some status updates
if config.title is not None:
logger.info("Report title: {}".format(config.title))
if dirs:
logger.info("Prepending directory to sample names")
for d in config.analysis_dir:
logger.info("Searching : {}".format(os.path.abspath(d)))
# Prep module configs
config.top_modules = [m if type(m) is dict else {m: {}} for m in config.top_modules]
config.module_order = [
m if type(m) is dict else {m: {}} for m in config.module_order
]
mod_keys = [list(m.keys())[0] for m in config.module_order]
# Lint the module configs
if config.lint:
for m in config.avail_modules.keys():
if m not in mod_keys:
errmsg = "LINT: Module '{}' not found in config.module_order".format(m)
logger.error(errmsg)
report.lint_errors.append(errmsg)
else:
for mo in config.module_order:
if (
m != "custom_content"
and m in mo.keys()
and "module_tag" not in mo[m]
):
errmsg = "LINT: Module '{}' in config.module_order did not have 'module_tag' config".format(
m
)
logger.error(errmsg)
report.lint_errors.append(errmsg)
# Get the avaiable tags to decide which modules to run.
modules_from_tags = set()
if config.module_tag is not None:
tags = config.module_tag
for m in config.module_order:
module_name = list(m.keys())[0] # only one name in each dict
for tag in tags:
for t in m[module_name].get("module_tag", []):
if tag.lower() == t.lower():
modules_from_tags.add(module_name)
# Get the list of modules we want to run, in the order that we want them
run_modules = [
m
for m in config.top_modules
if list(m.keys())[0] in config.avail_modules.keys()
]
run_modules.extend(
[
{m: {}}
for m in config.avail_modules.keys()
if m not in mod_keys and m not in run_modules
]
)
run_modules.extend(
[
m
for m in config.module_order
if list(m.keys())[0] in config.avail_modules.keys()
and list(m.keys())[0] not in [list(rm.keys())[0] for rm in run_modules]
]
)
if len(getattr(config, "run_modules", {})) > 0:
run_modules = [
m for m in run_modules if list(m.keys())[0] in config.run_modules
]
logger.info("Only using modules {}".format(", ".join(config.run_modules)))
elif modules_from_tags:
run_modules = [m for m in run_modules if list(m.keys())[0] in modules_from_tags]
logger.info("Only using modules with '{}' tag".format(", ".join(module_tag)))
if len(getattr(config, "exclude_modules", {})) > 0:
logger.info(
"Excluding modules '{}'".format("', '".join(config.exclude_modules))
)
if "general_stats" in config.exclude_modules:
config.skip_generalstats = True
config.exclude_modules = tuple(
x for x in config.exclude_modules if x != "general_stats"
)
run_modules = [
m for m in run_modules if list(m.keys())[0] not in config.exclude_modules
]
if len(run_modules) == 0:
logger.critical("No analysis modules specified!")
sys.exit(1)
run_module_names = [list(m.keys())[0] for m in run_modules]
logger.debug("Analysing modules: {}".format(", ".join(run_module_names)))
# Create the temporary working directories
tmp_dir = tempfile.mkdtemp()
logger.debug("Using temporary directory for creating report: {}".format(tmp_dir))
config.data_tmp_dir = os.path.join(tmp_dir, "multiqc_data")
if filename != "stdout" and config.make_data_dir == True:
config.data_dir = config.data_tmp_dir
os.makedirs(config.data_dir)
else:
config.data_dir = None
config.plots_tmp_dir = os.path.join(tmp_dir, "multiqc_plots")
if filename != "stdout" and config.export_plots == True:
config.plots_dir = config.plots_tmp_dir
os.makedirs(config.plots_dir)
# Load the template
template_mod = config.avail_templates[config.template].load()
# Add an output subdirectory if specified by template
try:
config.output_dir = os.path.join(config.output_dir, template_mod.output_subdir)
except AttributeError:
pass # No subdirectory variable given
# Add custom content section names
try:
if "custom_content" in run_module_names:
run_module_names.extend(config.custom_data.keys())
except AttributeError:
pass # custom_data not in config
# Get the list of files to search
report.get_filelist(run_module_names)
# Run the modules!
plugin_hooks.mqc_trigger("before_modules")
report.modules_output = list()
sys_exit_code = 0
for mod_dict in run_modules:
try:
this_module = list(mod_dict.keys())[0]
mod_cust_config = list(mod_dict.values())[0]
mod = config.avail_modules[this_module].load()
mod.mod_cust_config = (
mod_cust_config # feels bad doing this, but seems to work
)
output = mod()
if type(output) != list:
output = [output]
for m in output:
report.modules_output.append(m)
# Copy over css & js files if requested by the theme
try:
for to, path in report.modules_output[-1].css.items():
copy_to = os.path.join(tmp_dir, to)
os.makedirs(os.path.dirname(copy_to))
shutil.copyfile(path, copy_to)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
except AttributeError:
pass
try:
for to, path in report.modules_output[-1].js.items():
copy_to = os.path.join(tmp_dir, to)
os.makedirs(os.path.dirname(copy_to))
shutil.copyfile(path, copy_to)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
except AttributeError:
pass
except UserWarning:
logger.debug("No samples found: {}".format(list(mod_dict.keys())[0]))
except KeyboardInterrupt:
shutil.rmtree(tmp_dir)
logger.critical(
"User Cancelled Execution!\n{eq}\n{tb}{eq}\n".format(
eq=("=" * 60), tb=traceback.format_exc()
)
+ "User Cancelled Execution!\nExiting MultiQC..."
)
sys.exit(1)
except:
# Flag the error, but carry on
logger.error(
"Oops! The '{}' MultiQC module broke... \n".format(this_module)
+ " Please copy the following traceback and report it at "
+ "https://github.com/ewels/MultiQC/issues \n"
+ " If possible, please include a log file that triggers the error - "
+ "the last file found was:\n"
+ " {}\n".format(report.last_found_file)
+ ("=" * 60)
+ "\nModule {} raised an exception: {}".format(
this_module, traceback.format_exc()
)
+ ("=" * 60)
)
sys_exit_code = 1
# Did we find anything?
if len(report.modules_output) == 0:
logger.warn("No analysis results found. Cleaning up..")
shutil.rmtree(tmp_dir)
logger.info("MultiQC complete")
# Exit with an error code if a module broke
sys.exit(sys_exit_code)
# Sort the report module output if we have a config
if len(getattr(config, "report_section_order", {})) > 0:
section_id_order = {}
idx = 10
for mod in reversed(report.modules_output):
section_id_order[mod.anchor] = idx
idx += 10
for anchor, ss in config.report_section_order.items():
if anchor not in section_id_order.keys():
logger.debug(
"Reordering sections: anchor '{}' not found.".format(anchor)
)
continue
if ss.get("order") is not None:
section_id_order[anchor] = ss["order"]
if ss.get("after") in section_id_order.keys():
section_id_order[anchor] = section_id_order[ss["after"]] + 1
if ss.get("before") in section_id_order.keys():
section_id_order[anchor] = section_id_order[ss["before"]] - 1
sorted_ids = sorted(section_id_order, key=section_id_order.get)
report.modules_output = [
mod
for i in reversed(sorted_ids)
for mod in report.modules_output
if mod.anchor == i
]
# Sort the report sections if we have a config
# Basically the same as above, but sections within a module
if len(getattr(config, "report_section_order", {})) > 0:
# Go through each module
for midx, mod in enumerate(report.modules_output):
section_id_order = {}
# Get a list of the section anchors
idx = 10
for s in mod.sections:
section_id_order[s["anchor"]] = idx
idx += 10
# Go through each section to be reordered
for anchor, ss in config.report_section_order.items():
# Section to be moved is not in this module
if anchor not in section_id_order.keys():
logger.debug(
"Reordering sections: anchor '{}' not found for module '{}'.".format(
anchor, mod.name
)
)
continue
if ss == "remove":
section_id_order[anchor] = False
continue
if ss.get("order") is not None:
section_id_order[anchor] = ss["order"]
if ss.get("after") in section_id_order.keys():
section_id_order[anchor] = section_id_order[ss["after"]] + 1
if ss.get("before") in section_id_order.keys():
section_id_order[anchor] = section_id_order[ss["before"]] - 1
# Remove module sections
section_id_order = {
s: o for s, o in section_id_order.items() if o is not False
}
# Sort the module sections
sorted_ids = sorted(section_id_order, key=section_id_order.get)
report.modules_output[midx].sections = [
s for i in sorted_ids for s in mod.sections if s["anchor"] == i
]
plugin_hooks.mqc_trigger("after_modules")
# Remove empty data sections from the General Stats table
empty_keys = [i for i, d in enumerate(report.general_stats_data[:]) if len(d) == 0]
empty_keys.sort(reverse=True)
for i in empty_keys:
del report.general_stats_data[i]
del report.general_stats_headers[i]
# Add general-stats IDs to table row headers
for idx, h in enumerate(report.general_stats_headers):
for k in h.keys():
if "rid" not in h[k]:
h[k]["rid"] = re.sub(r"\W+", "_", k).strip().strip("_")
ns_html = re.sub(r"\W+", "_", h[k]["namespace"]).strip().strip("_").lower()
report.general_stats_headers[idx][k]["rid"] = report.save_htmlid(
"mqc-generalstats-{}-{}".format(ns_html, h[k]["rid"])
)
# Generate the General Statistics HTML & write to file
if len(report.general_stats_data) > 0:
pconfig = {
"id": "general_stats_table",
"table_title": "General Statistics",
"save_file": True,
"raw_data_fn": "multiqc_general_stats",
}
report.general_stats_html = table.plot(
report.general_stats_data, report.general_stats_headers, pconfig
)
else:
config.skip_generalstats = True
# Write the report sources to disk
if config.data_dir is not None:
report.data_sources_tofile()
# Compress the report plot JSON data
logger.info("Compressing plot data")
report.plot_compressed_json = report.compress_json(report.plot_data)
plugin_hooks.mqc_trigger("before_report_generation")
# Data Export / MegaQC integration - save report data to file or send report data to an API endpoint
if (config.data_dump_file or config.megaqc_url) and config.megaqc_upload:
multiqc_json_dump = megaqc.multiqc_dump_json(report)
if config.data_dump_file:
util_functions.write_data_file(
multiqc_json_dump, "multiqc_data", False, "json"
)
if config.megaqc_url:
megaqc.multiqc_api_post(multiqc_json_dump)
# Make the final report path & data directories
if filename != "stdout":
config.output_fn = os.path.join(config.output_dir, config.output_fn_name)
config.data_dir = os.path.join(config.output_dir, config.data_dir_name)
# Check for existing reports and remove if -f was specified
if os.path.exists(config.output_fn) or (
config.make_data_dir and os.path.exists(config.data_dir)
):
if config.force:
if os.path.exists(config.output_fn):
logger.warning(
"Deleting : {} (-f was specified)".format(
os.path.relpath(config.output_fn)
)
)
os.remove(config.output_fn)
if config.make_data_dir and os.path.exists(config.data_dir):
logger.warning(
"Deleting : {} (-f was specified)".format(
os.path.relpath(config.data_dir)
)
)
shutil.rmtree(config.data_dir)
else:
# Set up the base names of the report and the data dir
report_num = 1
report_base, report_ext = os.path.splitext(config.output_fn_name)
dir_base = os.path.basename(config.data_dir)
# Iterate through appended numbers until we find one that's free
while os.path.exists(config.output_fn) or (
config.make_data_dir and os.path.exists(config.data_dir)
):
config.output_fn = os.path.join(
config.output_dir,
"{}_{}{}".format(report_base, report_num, report_ext),
)
config.data_dir = os.path.join(
config.output_dir, "{}_{}".format(dir_base, report_num)
)
report_num += 1
config.output_fn_name = os.path.basename(config.output_fn)
config.data_dir_name = os.path.basename(config.data_dir)
logger.warning("Previous MultiQC output found! Adjusting filenames..")
logger.warning(
"Use -f or --force to overwrite existing reports instead"
)
# Make directories for report if needed
if not os.path.exists(os.path.dirname(config.output_fn)):
os.makedirs(os.path.dirname(config.output_fn))
logger.info("Report : {}".format(os.path.relpath(config.output_fn)))
if config.make_data_dir == False:
logger.info("Data : None")
else:
# Make directories for data_dir
logger.info("Data : {}".format(os.path.relpath(config.data_dir)))
if not os.path.exists(config.data_dir):
os.makedirs(config.data_dir)
# Modules have run, so data directory should be complete by now. Move its contents.
for f in os.listdir(config.data_tmp_dir):
fn = os.path.join(config.data_tmp_dir, f)
logger.debug(
"Moving data file from '{}' to '{}'".format(fn, config.data_dir)
)
shutil.move(fn, config.data_dir)
# Copy across the static plot images if requested
if config.export_plots:
config.plots_dir = os.path.join(config.output_dir, config.plots_dir_name)
if os.path.exists(config.plots_dir):
if config.force:
logger.warning(
"Deleting : {} (-f was specified)".format(
os.path.relpath(config.plots_dir)
)
)
shutil.rmtree(config.plots_dir)
else:
logger.error(
"Output directory {} already exists.".format(config.plots_dir)
)
logger.info("Use -f or --force to overwrite existing reports")
shutil.rmtree(tmp_dir)
sys.exit(1)
os.makedirs(config.plots_dir)
logger.info("Plots : {}".format(os.path.relpath(config.plots_dir)))
# Modules have run, so plots directory should be complete by now. Move its contents.
for f in os.listdir(config.plots_tmp_dir):
fn = os.path.join(config.plots_tmp_dir, f)
logger.debug(
"Moving plots directory from '{}' to '{}'".format(
fn, config.plots_dir
)
)
shutil.move(fn, config.plots_dir)
plugin_hooks.mqc_trigger("before_template")
# Load in parent template files first if a child theme
try:
parent_template = config.avail_templates[template_mod.template_parent].load()
copy_tree(parent_template.template_dir, tmp_dir)
except AttributeError:
pass # Not a child theme
# Copy the template files to the tmp directory (distutils overwrites parent theme files)
copy_tree(template_mod.template_dir, tmp_dir)
# Function to include file contents in Jinja template
def include_file(name, fdir=tmp_dir, b64=False):
try:
if fdir is None:
fdir = ""
if b64:
with io.open(os.path.join(fdir, name), "rb") as f:
return base64.b64encode(f.read()).decode("utf-8")
else:
with io.open(os.path.join(fdir, name), "r", encoding="utf-8") as f:
return f.read()
except (OSError, IOError) as e:
logger.error("Could not include file '{}': {}".format(name, e))
# Load the report template
try:
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmp_dir))
env.globals["include_file"] = include_file
j_template = env.get_template(template_mod.base_fn)
except:
raise IOError(
"Could not load {} template file '{}'".format(
config.template, template_mod.base_fn
)
)
# Use jinja2 to render the template and overwrite
config.analysis_dir = [os.path.realpath(d) for d in config.analysis_dir]
report_output = j_template.render(report=report, config=config)
if filename == "stdout":
print(report_output.encode("utf-8"), file=sys.stdout)
else:
try:
with io.open(config.output_fn, "w", encoding="utf-8") as f:
print(report_output, file=f)
except IOError as e:
raise IOError(
"Could not print report to '{}' - {}".format(
config.output_fn, IOError(e)
)
)
# Copy over files if requested by the theme
try:
for f in template_mod.copy_files:
fn = os.path.join(tmp_dir, f)
dest_dir = os.path.join(os.path.dirname(config.output_fn), f)
copy_tree(fn, dest_dir)
except AttributeError:
pass # No files to copy
# Clean up temporary directory
shutil.rmtree(tmp_dir)
# Zip the data directory if requested
if config.zip_data_dir and config.data_dir is not None:
shutil.make_archive(config.data_dir, "zip", config.data_dir)
shutil.rmtree(config.data_dir)
# Try to create a PDF if requested
if make_pdf:
try:
pdf_fn_name = config.output_fn.replace(".html", ".pdf")
pandoc_call = [
"pandoc",
"--standalone",
config.output_fn,
"--output",
pdf_fn_name,
"--pdf-engine=xelatex",
"-V",
"documentclass=article",
"-V",
"geometry=margin=1in",
"-V",
"title=",
]
if config.pandoc_template is not None:
pandoc_call.append("--template={}".format(config.pandoc_template))
logger.debug(
"Attempting Pandoc conversion to PDF with following command:\n{}".format(
" ".join(pandoc_call)
)
)
pdf_exit_code = subprocess.call(pandoc_call)
if pdf_exit_code != 0:
logger.error(
"Error creating PDF! Pandoc returned a non-zero exit code."
)
else:
logger.info("PDF Report : {}".format(pdf_fn_name))
except OSError as e:
if e.errno == os.errno.ENOENT:
logger.error(
"Error creating PDF - pandoc not found. Is it installed? http://pandoc.org/"
)
else:
logger.error(
"Error creating PDF! Something went wrong when creating the PDF\n"
+ ("=" * 60)
+ "\n{}\n".format(traceback.format_exc())
+ ("=" * 60)
)
plugin_hooks.mqc_trigger("execution_finish")
logger.info("MultiQC complete")
if lint and len(report.lint_errors) > 0:
logger.error(
"Found {} linting errors!\n{}".format(
len(report.lint_errors), "\n".join(report.lint_errors)
)
)
sys_exit_code = 1
# Move the log file into the data directory
log.move_tmp_log(logger)
# Return the appropriate error code (eg. 1 if a module broke, 0 on success)
return sys_exit_code
|
https://github.com/ewels/MultiQC/issues/806
|
[2018-07-26 14:06:51,065] multiqc.modules.fastqc.fastqc [INFO ] Found 2 reports
[2018-07-26 14:06:51,132] multiqc [DEBUG ] No samples found: clusterflow
[2018-07-26 14:06:51,135] multiqc [DEBUG ] No samples found: bcl2fastq
[2018-07-26 14:06:51,139] multiqc [DEBUG ] No samples found: interop
[2018-07-26 14:06:51,171] multiqc [INFO ] Compressing plot data
[2018-07-26 14:06:51,673] multiqc [INFO ] Report : qc/LB18-0784-Sequencing-Analysis_multiqc_report.html
[2018-07-26 14:06:51,673] multiqc [INFO ] Data : qc/LB18-0784-Sequencing-Analysis_multiqc_report_data
[2018-07-26 14:06:51,674] multiqc [DEBUG ] Moving data file from '/tmp/tmp7gzkuz5a/multiqc_data/multiqc_sources.txt' to 'qc/LB18-0784-Sequencing-Analysis_multiqc_report_data'
Traceback (most recent call last):
File "/opt/conda/miniconda3/envs/HUM-NGSDiag/lib/python3.5/shutil.py", line 544, in move
os.rename(src, real_dst)
OSError: [Errno 18] Invalid cross-device link: '/tmp/tmp7gzkuz5a/multiqc_data/multiqc_sources.txt' -> 'qc/LB18-0784-Sequencing-Analysis_multiqc_report_data/multiqc_sources.txt'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/opt/conda/miniconda3/envs/HUM-NGSDiag/bin/multiqc", line 767, in <module>
multiqc()
File "/opt/conda/miniconda3/envs/HUM-NGSDiag/lib/python3.5/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/miniconda3/envs/HUM-NGSDiag/lib/python3.5/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/opt/conda/miniconda3/envs/HUM-NGSDiag/lib/python3.5/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/miniconda3/envs/HUM-NGSDiag/lib/python3.5/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/opt/conda/miniconda3/envs/HUM-NGSDiag/bin/multiqc", line 611, in multiqc
shutil.move(fn, config.data_dir)
File "/opt/conda/miniconda3/envs/HUM-NGSDiag/lib/python3.5/shutil.py", line 558, in move
copy_function(src, real_dst)
File "/opt/conda/miniconda3/envs/HUM-NGSDiag/lib/python3.5/shutil.py", line 258, in copy2
copystat(src, dst, follow_symlinks=follow_symlinks)
File "/opt/conda/miniconda3/envs/HUM-NGSDiag/lib/python3.5/shutil.py", line 201, in copystat
follow_symlinks=follow)
PermissionError: [Errno 1] Operation not permitted
|
OSError
|
def move_tmp_log(logger):
"""Move the temporary log file to the MultiQC data directory
if it exists."""
try:
# https://stackoverflow.com/questions/15435652/python-does-not-release-filehandles-to-logfile
logging.shutdown()
shutil.copy(log_tmp_fn, os.path.join(config.data_dir, "multiqc.log"))
os.remove(log_tmp_fn)
util_functions.robust_rmtree(log_tmp_dir)
except (AttributeError, TypeError, IOError):
pass
|
def move_tmp_log(logger):
"""Move the temporary log file to the MultiQC data directory
if it exists."""
try:
# https://stackoverflow.com/questions/15435652/python-does-not-release-filehandles-to-logfile
logging.shutdown()
shutil.move(log_tmp_fn, os.path.join(config.data_dir, "multiqc.log"))
util_functions.robust_rmtree(log_tmp_dir)
except (AttributeError, TypeError, IOError):
pass
|
https://github.com/ewels/MultiQC/issues/806
|
[2018-07-26 14:06:51,065] multiqc.modules.fastqc.fastqc [INFO ] Found 2 reports
[2018-07-26 14:06:51,132] multiqc [DEBUG ] No samples found: clusterflow
[2018-07-26 14:06:51,135] multiqc [DEBUG ] No samples found: bcl2fastq
[2018-07-26 14:06:51,139] multiqc [DEBUG ] No samples found: interop
[2018-07-26 14:06:51,171] multiqc [INFO ] Compressing plot data
[2018-07-26 14:06:51,673] multiqc [INFO ] Report : qc/LB18-0784-Sequencing-Analysis_multiqc_report.html
[2018-07-26 14:06:51,673] multiqc [INFO ] Data : qc/LB18-0784-Sequencing-Analysis_multiqc_report_data
[2018-07-26 14:06:51,674] multiqc [DEBUG ] Moving data file from '/tmp/tmp7gzkuz5a/multiqc_data/multiqc_sources.txt' to 'qc/LB18-0784-Sequencing-Analysis_multiqc_report_data'
Traceback (most recent call last):
File "/opt/conda/miniconda3/envs/HUM-NGSDiag/lib/python3.5/shutil.py", line 544, in move
os.rename(src, real_dst)
OSError: [Errno 18] Invalid cross-device link: '/tmp/tmp7gzkuz5a/multiqc_data/multiqc_sources.txt' -> 'qc/LB18-0784-Sequencing-Analysis_multiqc_report_data/multiqc_sources.txt'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/opt/conda/miniconda3/envs/HUM-NGSDiag/bin/multiqc", line 767, in <module>
multiqc()
File "/opt/conda/miniconda3/envs/HUM-NGSDiag/lib/python3.5/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/miniconda3/envs/HUM-NGSDiag/lib/python3.5/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/opt/conda/miniconda3/envs/HUM-NGSDiag/lib/python3.5/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/miniconda3/envs/HUM-NGSDiag/lib/python3.5/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/opt/conda/miniconda3/envs/HUM-NGSDiag/bin/multiqc", line 611, in multiqc
shutil.move(fn, config.data_dir)
File "/opt/conda/miniconda3/envs/HUM-NGSDiag/lib/python3.5/shutil.py", line 558, in move
copy_function(src, real_dst)
File "/opt/conda/miniconda3/envs/HUM-NGSDiag/lib/python3.5/shutil.py", line 258, in copy2
copystat(src, dst, follow_symlinks=follow_symlinks)
File "/opt/conda/miniconda3/envs/HUM-NGSDiag/lib/python3.5/shutil.py", line 201, in copystat
follow_symlinks=follow)
PermissionError: [Errno 1] Operation not permitted
|
OSError
|
def parse_plotProfile(self):
"""Find plotProfile output"""
self.deeptools_plotProfile = dict()
for f in self.find_log_files("deeptools/plotProfile", filehandles=False):
parsed_data, bin_labels, converted_bin_labels = self.parsePlotProfileData(f)
for k, v in parsed_data.items():
if k in self.deeptools_plotProfile:
log.warning("Replacing duplicate sample {}.".format(k))
self.deeptools_plotProfile[k] = v
if len(parsed_data) > 0:
self.add_data_source(f, section="plotProfile")
if len(self.deeptools_plotProfile) > 0:
# Try to do plot bands but don't crash if the labels aren't as we expect
xPlotBands = []
xPlotLines = []
plotBandHelp = ""
try:
xPlotBands.append(
{
"from": converted_bin_labels[bin_labels.index("TES")],
"to": converted_bin_labels[-1],
"color": "#f7cfcf",
}
)
xPlotBands.append(
{
"from": converted_bin_labels[bin_labels.index("TSS")],
"to": converted_bin_labels[bin_labels.index("TES")],
"color": "#ffffe2",
}
)
xPlotBands.append(
{
"from": converted_bin_labels[0],
"to": converted_bin_labels[bin_labels.index("TSS")],
"color": "#e5fce0",
}
)
xPlotLines.append(
{
"width": 1,
"value": converted_bin_labels[bin_labels.index("TES")],
"dashStyle": "Dash",
"color": "#000000",
}
)
xPlotLines.append(
{
"width": 1,
"value": converted_bin_labels[bin_labels.index("TSS")],
"dashStyle": "Dash",
"color": "#000000",
}
)
plotBandHelp = """
* Green: {} upstream of gene to {}
* Yellow: {} to {}
* Pink: {} to {} downstream of gene
""".format(
list(filter(None, bin_labels))[0],
list(filter(None, bin_labels))[1],
list(filter(None, bin_labels))[1],
list(filter(None, bin_labels))[2],
list(filter(None, bin_labels))[2],
list(filter(None, bin_labels))[3],
)
except ValueError:
pass
config = {
"id": "read_distribution_profile",
"title": "deeptools: Read Distribution Profile after Annotation",
"ylab": "Occurrence",
"xlab": None,
"smooth_points": 100,
"xPlotBands": xPlotBands,
"xPlotLines": xPlotLines,
}
self.add_section(
name="Read Distribution Profile after Annotation",
anchor="read_distribution_profile_plot",
description="""
Accumulated view of the distribution of sequence reads related to the closest annotated gene.
All annotated genes have been normalized to the same size.
{}""".format(plotBandHelp),
plot=linegraph.plot(self.deeptools_plotProfile, config),
)
return len(self.deeptools_plotProfile)
|
def parse_plotProfile(self):
"""Find plotProfile output"""
self.deeptools_plotProfile = dict()
for f in self.find_log_files("deeptools/plotProfile", filehandles=False):
parsed_data, bin_labels, converted_bin_labels = self.parsePlotProfileData(f)
for k, v in parsed_data.items():
if k in self.deeptools_plotProfile:
log.warning("Replacing duplicate sample {}.".format(k))
self.deeptools_plotProfile[k] = v
if len(parsed_data) > 0:
self.add_data_source(f, section="plotProfile")
if len(self.deeptools_plotProfile) > 0:
config = {
"id": "read_distribution_profile",
"title": "deeptools: Read Distribution Profile after Annotation",
"ylab": "Occurrence",
"xlab": None,
"smooth_points": 100,
"xPlotBands": [
{
"from": converted_bin_labels[bin_labels.index("TES")],
"to": converted_bin_labels[-1],
"color": "#f7cfcf",
},
{
"from": converted_bin_labels[bin_labels.index("TSS")],
"to": converted_bin_labels[bin_labels.index("TES")],
"color": "#ffffe2",
},
{
"from": converted_bin_labels[0],
"to": converted_bin_labels[bin_labels.index("TSS")],
"color": "#e5fce0",
},
],
"xPlotLines": [
{
"width": 1,
"value": converted_bin_labels[bin_labels.index("TES")],
"dashStyle": "Dash",
"color": "#000000",
},
{
"width": 1,
"value": converted_bin_labels[bin_labels.index("TSS")],
"dashStyle": "Dash",
"color": "#000000",
},
],
}
self.add_section(
name="Read Distribution Profile after Annotation",
anchor="read_distribution_profile_plot",
description="Accumulated view of the distribution of sequence reads related to the closest annotated gene. All annotated genes have been normalized to the same size. Green: {} upstream of gene to {}; Yellow: {} to {}; Pink: {} to {} downstream of gene".format(
list(filter(None, bin_labels))[0],
list(filter(None, bin_labels))[1],
list(filter(None, bin_labels))[1],
list(filter(None, bin_labels))[2],
list(filter(None, bin_labels))[2],
list(filter(None, bin_labels))[3],
),
plot=linegraph.plot(self.deeptools_plotProfile, config),
)
return len(self.deeptools_bamPEFragmentSizeDistribution)
|
https://github.com/ewels/MultiQC/issues/1011
|
Module deeptools raised an exception: Traceback (most recent call last):
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/bin/multiqc", line 440, in multiqc
output = mod()
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/lib/python3.6/site-packages/multiqc/modules/deeptools/deeptools.py", line 75, in __init__
n['plotProfile'] = self.parse_plotProfile()
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/lib/python3.6/site-packages/multiqc/modules/deeptools/plotProfile.py", line 36, in parse_plotProfile
{'from': converted_bin_labels[bin_labels.index('TES')], 'to': converted_bin_labels[-1], 'color': '#f7cfcf'},
ValueError: 'TES' is not in list
|
ValueError
|
def parseBamPEFDistributionFile(self, f):
d = dict()
lastsample = []
for line in f["f"].splitlines():
cols = line.rstrip().split("\t")
if cols[0] == "#bamPEFragmentSize":
continue
elif cols[0] == "Size":
continue
else:
s_name = self.clean_s_name(cols[2].rstrip().split("/")[-1], f["root"])
if s_name != lastsample:
d[s_name] = dict()
lastsample = s_name
d[s_name].update({self._int(cols[0]): self._int(cols[1])})
return d
|
def parseBamPEFDistributionFile(self, f):
d = dict()
lastsample = []
for line in f["f"].splitlines():
cols = line.rstrip().split("\t")
if cols[0] == "#bamPEFragmentSize":
continue
elif cols[0] == "Size":
continue
else:
s_name = self.clean_s_name(cols[2].rstrip().split("/")[-1], f["root"])
if s_name != lastsample:
d[s_name] = dict()
lastsample = s_name
d[s_name].update({int(cols[0]): int(cols[1])})
return d
|
https://github.com/ewels/MultiQC/issues/1011
|
Module deeptools raised an exception: Traceback (most recent call last):
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/bin/multiqc", line 440, in multiqc
output = mod()
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/lib/python3.6/site-packages/multiqc/modules/deeptools/deeptools.py", line 75, in __init__
n['plotProfile'] = self.parse_plotProfile()
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/lib/python3.6/site-packages/multiqc/modules/deeptools/plotProfile.py", line 36, in parse_plotProfile
{'from': converted_bin_labels[bin_labels.index('TES')], 'to': converted_bin_labels[-1], 'color': '#f7cfcf'},
ValueError: 'TES' is not in list
|
ValueError
|
def parseBamPEFile(self, f):
d = {}
headers = None
for line in f["f"].splitlines():
cols = line.rstrip().split("\t")
if headers is None:
headers = cols
else:
s_name = None
for idx, h in enumerate(headers):
if idx == 0:
s_name = self.clean_s_name(cols[0], f["root"])
if s_name in d:
log.debug("Replacing duplicate sample {}.".format(s_name))
d[s_name] = OrderedDict()
else:
if idx < 19 and cols[1] == "0":
# Don't store fragment metrics for SE datasets, they're just 0.
continue
try:
# Most values are ac
d[s_name][h] = self._int(cols[idx])
except ValueError:
d[s_name][h] = float(cols[idx])
return d
|
def parseBamPEFile(self, f):
d = {}
headers = None
for line in f["f"].splitlines():
cols = line.rstrip().split("\t")
if headers is None:
headers = cols
else:
s_name = None
for idx, h in enumerate(headers):
if idx == 0:
s_name = self.clean_s_name(cols[0], f["root"])
if s_name in d:
log.debug("Replacing duplicate sample {}.".format(s_name))
d[s_name] = OrderedDict()
else:
if idx < 19 and cols[1] == "0":
# Don't store fragment metrics for SE datasets, they're just 0.
continue
try:
# Most values are ac
d[s_name][h] = int(cols[idx])
except ValueError:
d[s_name][h] = float(cols[idx])
return d
|
https://github.com/ewels/MultiQC/issues/1011
|
Module deeptools raised an exception: Traceback (most recent call last):
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/bin/multiqc", line 440, in multiqc
output = mod()
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/lib/python3.6/site-packages/multiqc/modules/deeptools/deeptools.py", line 75, in __init__
n['plotProfile'] = self.parse_plotProfile()
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/lib/python3.6/site-packages/multiqc/modules/deeptools/plotProfile.py", line 36, in parse_plotProfile
{'from': converted_bin_labels[bin_labels.index('TES')], 'to': converted_bin_labels[-1], 'color': '#f7cfcf'},
ValueError: 'TES' is not in list
|
ValueError
|
def parseEstimateReadFilteringFile(self, f):
d = {}
firstLine = True
for line in f["f"].splitlines():
if firstLine:
firstLine = False
continue
cols = line.strip().split("\t")
if len(cols) != 12:
# This is not really the output from estimateReadFiltering!
log.warning(
"{} was initially flagged as the tabular output from estimateReadFiltering, but that seems to not be the case. Skipping...".format(
f["fn"]
)
)
return dict()
s_name = self.clean_s_name(cols[0], f["root"])
if s_name in d:
log.debug("Replacing duplicate sample {}.".format(s_name))
d[s_name] = dict()
try:
d[s_name]["total"] = self._int(cols[1])
d[s_name]["mapped"] = self._int(cols[2])
d[s_name]["blacklisted"] = self._int(cols[3])
d[s_name]["filtered"] = float(cols[4])
d[s_name]["mapq"] = float(cols[5])
d[s_name]["required flags"] = float(cols[6])
d[s_name]["excluded flags"] = float(cols[7])
d[s_name]["internal dupes"] = float(cols[8])
d[s_name]["dupes"] = float(cols[9])
d[s_name]["singletons"] = float(cols[10])
d[s_name]["strand"] = float(cols[11])
except:
# Obviously this isn't really the output from estimateReadFiltering
log.warning(
"{} was initially flagged as the output from estimateReadFiltering, but that seems to not be the case. Skipping...".format(
f["fn"]
)
)
return dict()
return d
|
def parseEstimateReadFilteringFile(self, f):
d = {}
firstLine = True
for line in f["f"].splitlines():
if firstLine:
firstLine = False
continue
cols = line.strip().split("\t")
if len(cols) != 12:
# This is not really the output from estimateReadFiltering!
log.warning(
"{} was initially flagged as the tabular output from estimateReadFiltering, but that seems to not be the case. Skipping...".format(
f["fn"]
)
)
return dict()
s_name = self.clean_s_name(cols[0], f["root"])
if s_name in d:
log.debug("Replacing duplicate sample {}.".format(s_name))
d[s_name] = dict()
try:
d[s_name]["total"] = int(cols[1])
d[s_name]["mapped"] = int(cols[2])
d[s_name]["blacklisted"] = int(cols[3])
d[s_name]["filtered"] = float(cols[4])
d[s_name]["mapq"] = float(cols[5])
d[s_name]["required flags"] = float(cols[6])
d[s_name]["excluded flags"] = float(cols[7])
d[s_name]["internal dupes"] = float(cols[8])
d[s_name]["dupes"] = float(cols[9])
d[s_name]["singletons"] = float(cols[10])
d[s_name]["strand"] = float(cols[11])
except:
# Obviously this isn't really the output from estimateReadFiltering
log.warning(
"{} was initially flagged as the output from estimateReadFiltering, but that seems to not be the case. Skipping...".format(
f["fn"]
)
)
return dict()
return d
|
https://github.com/ewels/MultiQC/issues/1011
|
Module deeptools raised an exception: Traceback (most recent call last):
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/bin/multiqc", line 440, in multiqc
output = mod()
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/lib/python3.6/site-packages/multiqc/modules/deeptools/deeptools.py", line 75, in __init__
n['plotProfile'] = self.parse_plotProfile()
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/lib/python3.6/site-packages/multiqc/modules/deeptools/plotProfile.py", line 36, in parse_plotProfile
{'from': converted_bin_labels[bin_labels.index('TES')], 'to': converted_bin_labels[-1], 'color': '#f7cfcf'},
ValueError: 'TES' is not in list
|
ValueError
|
def parsePlotEnrichment(self, f):
d = {}
firstLine = True
for line in f["f"].splitlines():
if firstLine:
firstLine = False
continue
cols = line.strip().split("\t")
if len(cols) != 5:
log.warning(
"{} was initially flagged as the output from plotEnrichment, but that seems to not be the case. Skipping...".format(
f["fn"]
)
)
return dict()
s_name = self.clean_s_name(cols[0], f["root"])
if s_name not in d:
d[s_name] = dict()
cols[1] = str(cols[1])
if cols[1] in d[s_name]:
log.warning(
"Replacing duplicate sample:featureType {}:{}.".format(s_name, cols[1])
)
d[s_name][cols[1]] = dict()
try:
d[s_name][cols[1]]["percent"] = float(cols[2])
d[s_name][cols[1]]["count"] = self._int(cols[3])
except:
log.warning(
"{} was initially flagged as the output from plotEnrichment, but that seems to not be the case. Skipping...".format(
f["fn"]
)
)
return dict()
return d
|
def parsePlotEnrichment(self, f):
d = {}
firstLine = True
for line in f["f"].splitlines():
if firstLine:
firstLine = False
continue
cols = line.strip().split("\t")
if len(cols) != 5:
log.warning(
"{} was initially flagged as the output from plotEnrichment, but that seems to not be the case. Skipping...".format(
f["fn"]
)
)
return dict()
s_name = self.clean_s_name(cols[0], f["root"])
if s_name not in d:
d[s_name] = dict()
cols[1] = str(cols[1])
if cols[1] in d[s_name]:
log.warning(
"Replacing duplicate sample:featureType {}:{}.".format(s_name, cols[1])
)
d[s_name][cols[1]] = dict()
try:
d[s_name][cols[1]]["percent"] = float(cols[2])
d[s_name][cols[1]]["count"] = int(cols[3])
except:
log.warning(
"{} was initially flagged as the output from plotEnrichment, but that seems to not be the case. Skipping...".format(
f["fn"]
)
)
return dict()
return d
|
https://github.com/ewels/MultiQC/issues/1011
|
Module deeptools raised an exception: Traceback (most recent call last):
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/bin/multiqc", line 440, in multiqc
output = mod()
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/lib/python3.6/site-packages/multiqc/modules/deeptools/deeptools.py", line 75, in __init__
n['plotProfile'] = self.parse_plotProfile()
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/lib/python3.6/site-packages/multiqc/modules/deeptools/plotProfile.py", line 36, in parse_plotProfile
{'from': converted_bin_labels[bin_labels.index('TES')], 'to': converted_bin_labels[-1], 'color': '#f7cfcf'},
ValueError: 'TES' is not in list
|
ValueError
|
def parsePlotFingerprintOutRawCounts(self, f):
d = dict()
samples = []
firstLine = True
for line in f["f"].splitlines():
cols = line.strip().split("\t")
if cols[0] == "#plotFingerprint --outRawCounts":
continue
if firstLine:
for c in cols:
c = str(c).strip("'")
s_name = self.clean_s_name(c, f["root"])
d[s_name] = []
samples.append(s_name)
firstLine = False
continue
for idx, c in enumerate(cols):
d[samples[idx]].append(self._int(c))
# Switch to numpy, get the normalized cumsum
x = np.linspace(
0, len(d[samples[0]]) - 1, endpoint=True, num=100, dtype=int
) # The indices into the vectors that we'll actually return for plotting
xp = np.arange(len(d[samples[0]]) + 1) / float(len(d[samples[0]]) + 1)
for k, v in d.items():
v = np.array(v)
v = np.sort(v)
cs = np.cumsum(v)
cs = cs / float(cs[-1])
# Convert for plotting
v2 = dict()
v2[0.0] = 0.0
for _ in x:
v2[xp[_]] = cs[_]
d[k] = v2
return d
|
def parsePlotFingerprintOutRawCounts(self, f):
d = dict()
samples = []
firstLine = True
for line in f["f"].splitlines():
cols = line.strip().split("\t")
if cols[0] == "#plotFingerprint --outRawCounts":
continue
if firstLine:
for c in cols:
c = str(c).strip("'")
s_name = self.clean_s_name(c, f["root"])
d[s_name] = []
samples.append(s_name)
firstLine = False
continue
for idx, c in enumerate(cols):
d[samples[idx]].append(int(c))
# Switch to numpy, get the normalized cumsum
x = np.linspace(
0, len(d[samples[0]]) - 1, endpoint=True, num=100, dtype=int
) # The indices into the vectors that we'll actually return for plotting
xp = np.arange(len(d[samples[0]]) + 1) / float(len(d[samples[0]]) + 1)
for k, v in d.items():
v = np.array(v)
v = np.sort(v)
cs = np.cumsum(v)
cs = cs / float(cs[-1])
# Convert for plotting
v2 = dict()
v2[0.0] = 0.0
for _ in x:
v2[xp[_]] = cs[_]
d[k] = v2
return d
|
https://github.com/ewels/MultiQC/issues/1011
|
Module deeptools raised an exception: Traceback (most recent call last):
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/bin/multiqc", line 440, in multiqc
output = mod()
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/lib/python3.6/site-packages/multiqc/modules/deeptools/deeptools.py", line 75, in __init__
n['plotProfile'] = self.parse_plotProfile()
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/lib/python3.6/site-packages/multiqc/modules/deeptools/plotProfile.py", line 36, in parse_plotProfile
{'from': converted_bin_labels[bin_labels.index('TES')], 'to': converted_bin_labels[-1], 'color': '#f7cfcf'},
ValueError: 'TES' is not in list
|
ValueError
|
def parsePlotPCAData(self, f):
d = dict()
samples = []
for line in f["f"].splitlines():
cols = line.strip().split("\t")
if cols[0] == "#plotPCA --outFileNameData":
continue
elif cols[0] == "Component":
for c in cols[1 : (len(cols) - 1)]:
c = str(c).strip("'")
s_name = self.clean_s_name(c, f["root"])
d[s_name] = {}
samples.append(s_name)
else:
idx = 0
compo = cols[0]
for c in cols[1 : (len(cols) - 1)]:
d[samples[idx]][self._int(compo)] = float(c)
idx += 1
return d
|
def parsePlotPCAData(self, f):
d = dict()
samples = []
for line in f["f"].splitlines():
cols = line.strip().split("\t")
if cols[0] == "#plotPCA --outFileNameData":
continue
elif cols[0] == "Component":
for c in cols[1 : (len(cols) - 1)]:
c = str(c).strip("'")
s_name = self.clean_s_name(c, f["root"])
d[s_name] = {}
samples.append(s_name)
else:
idx = 0
compo = cols[0]
for c in cols[1 : (len(cols) - 1)]:
d[samples[idx]][int(compo)] = float(c)
idx += 1
return d
|
https://github.com/ewels/MultiQC/issues/1011
|
Module deeptools raised an exception: Traceback (most recent call last):
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/bin/multiqc", line 440, in multiqc
output = mod()
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/lib/python3.6/site-packages/multiqc/modules/deeptools/deeptools.py", line 75, in __init__
n['plotProfile'] = self.parse_plotProfile()
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/lib/python3.6/site-packages/multiqc/modules/deeptools/plotProfile.py", line 36, in parse_plotProfile
{'from': converted_bin_labels[bin_labels.index('TES')], 'to': converted_bin_labels[-1], 'color': '#f7cfcf'},
ValueError: 'TES' is not in list
|
ValueError
|
def parsePlotProfileData(self, f):
d = dict()
bin_labels = []
bins = []
for line in f["f"].splitlines():
cols = line.rstrip().split("\t")
if cols[0] == "bin labels":
for col in cols[2 : len(cols)]:
if col not in list(filter(None, bin_labels)):
bin_labels.append(col)
else:
break
elif cols[0] == "bins":
for col in cols[2 : len(cols)]:
if len(bins) != len(bin_labels):
bins.append(self._int(col))
else:
break
else:
s_name = self.clean_s_name(cols[0], f["root"])
d[s_name] = dict()
factors = {"Kb": 1e3, "Mb": 1e6, "Gb": 1e9}
convert_factor = 1
for k, v in factors.items():
if k in bin_labels[0]:
convert_factor *= v
start = float(bin_labels[0].strip(k)) * convert_factor
step = self._int(abs(start / bin_labels.index("TSS")))
end = step * (len(bin_labels) - bin_labels.index("TSS") - 1)
converted_bin_labels = range(
(self._int(start) + step), (self._int(end) + step), step
)
for i in bins:
d[s_name].update({converted_bin_labels[i - 1]: float(cols[i + 1])})
return d, bin_labels, converted_bin_labels
|
def parsePlotProfileData(self, f):
d = dict()
bin_labels = []
bins = []
for line in f["f"].splitlines():
cols = line.rstrip().split("\t")
if cols[0] == "bin labels":
for col in cols[2 : len(cols)]:
if col not in list(filter(None, bin_labels)):
bin_labels.append(col)
else:
break
elif cols[0] == "bins":
for col in cols[2 : len(cols)]:
if len(bins) != len(bin_labels):
bins.append(int(col))
else:
break
else:
s_name = self.clean_s_name(cols[0], f["root"])
d[s_name] = dict()
factors = {"Kb": 1e3, "Mb": 1e6, "Gb": 1e9}
convert_factor = 1
for k, v in factors.items():
if k in bin_labels[0]:
convert_factor *= v
start = float(bin_labels[0].strip(k)) * convert_factor
step = int(abs(start / bin_labels.index("TSS")))
end = step * (len(bin_labels) - bin_labels.index("TSS") - 1)
converted_bin_labels = range((int(start) + step), (int(end) + step), step)
for i in bins:
d[s_name].update({converted_bin_labels[i - 1]: float(cols[i + 1])})
return d, bin_labels, converted_bin_labels
|
https://github.com/ewels/MultiQC/issues/1011
|
Module deeptools raised an exception: Traceback (most recent call last):
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/bin/multiqc", line 440, in multiqc
output = mod()
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/lib/python3.6/site-packages/multiqc/modules/deeptools/deeptools.py", line 75, in __init__
n['plotProfile'] = self.parse_plotProfile()
File "/sandbox/users/foucal-a/miniconda3/envs/deeptools/lib/python3.6/site-packages/multiqc/modules/deeptools/plotProfile.py", line 36, in parse_plotProfile
{'from': converted_bin_labels[bin_labels.index('TES')], 'to': converted_bin_labels[-1], 'color': '#f7cfcf'},
ValueError: 'TES' is not in list
|
ValueError
|
def parse_logs(self, file_type, root, s_name, fn, f, **kw):
log.debug("Parsing %s/%s", root, fn)
if not file_type in file_types:
log.error("Unknown output type '%s'. Error in config?", file_type)
return False
log_descr = file_types[file_type]
if "not_implemented" in log_descr:
log.debug("Can't parse '%s' -- implementation missing", file_type)
return False
cols = log_descr["cols"]
if isinstance(cols, OrderedDict):
cols = list(cols.keys())
kv = {}
data = {}
for line_number, line in enumerate(f, start=1):
line = line.strip().split("\t")
try:
header_row = line[0][0] == "#"
except IndexError:
continue # The table is probably empty
if header_row:
line[0] = line[0][1:] # remove leading '#'
if line[0] != cols[0]:
# It's not the table header, it must be a key-value row
if len(line) == 3 and file_type == "stats":
# This is a special case for the 'stats' file type:
# The first line _might_ have three columns if processing paired-end reads,
# but we don't care about the first line.
# The third line is always three columns, which is what we really want.
if line[0] == "File":
continue
kv["Percent filtered"] = float(line[2].strip("%"))
kv[line[0]] = line[1]
elif len(line) != 2:
# Not two items? Wrong!
log.error(
"Expected key value pair in %s/%s:%d but found '%s'",
root,
s_name,
line_number,
repr(line),
)
log.error("Table header should begin with '%s'", cols[0])
else:
# save key value pair
kv[line[0]] = line[1]
else:
# It should be the table header. Verify:
if line != cols:
if line != cols + list(log_descr["extracols"].keys()):
log.error(
"Table headers do not match those 'on file'. %s != %s",
repr(line),
repr(cols),
)
return False
else:
if isinstance(log_descr["cols"], OrderedDict):
line = [
value_type(value)
for value_type, value in zip(log_descr["cols"].values(), line)
]
else:
line = list(map(int, line))
data[line[0]] = line[1:]
if not data:
log.warning("File %s appears to contain no data for plotting, ignoring...", fn)
return False
if s_name in self.mod_data[file_type]:
log.debug("Duplicate sample name found! Overwriting: %s", s_name)
self.mod_data[file_type][s_name] = {"data": data, "kv": kv}
log.debug(
"Found %s output for sample %s with %d rows", file_type, s_name, len(data)
)
return True
|
def parse_logs(self, file_type, root, s_name, fn, f, **kw):
log.debug("Parsing %s/%s", root, fn)
if not file_type in file_types:
log.error("Unknown output type '%s'. Error in config?", file_type)
return False
log_descr = file_types[file_type]
if "not_implemented" in log_descr:
log.debug("Can't parse '%s' -- implementation missing", file_type)
return False
cols = log_descr["cols"]
if isinstance(cols, OrderedDict):
cols = list(cols.keys())
kv = {}
data = {}
for line_number, line in enumerate(f, start=1):
line = line.strip().split("\t")
if line[0][0] == "#":
# It's a header row
line[0] = line[0][1:] # remove leading '#'
if line[0] != cols[0]:
# It's not the table header, it must be a key-value row
if len(line) == 3 and file_type == "stats":
# This is a special case for the 'stats' file type:
# The first line _might_ have three columns if processing paired-end reads,
# but we don't care about the first line.
# The third line is always three columns, which is what we really want.
if line[0] == "File":
continue
kv["Percent filtered"] = float(line[2].strip("%"))
kv[line[0]] = line[1]
elif len(line) != 2:
# Not two items? Wrong!
log.error(
"Expected key value pair in %s/%s:%d but found '%s'",
root,
s_name,
line_number,
repr(line),
)
log.error("Table header should begin with '%s'", cols[0])
else:
# save key value pair
kv[line[0]] = line[1]
else:
# It should be the table header. Verify:
if line != cols:
if line != cols + list(log_descr["extracols"].keys()):
log.error(
"Table headers do not match those 'on file'. %s != %s",
repr(line),
repr(cols),
)
return False
else:
if isinstance(log_descr["cols"], OrderedDict):
line = [
value_type(value)
for value_type, value in zip(log_descr["cols"].values(), line)
]
else:
line = list(map(int, line))
data[line[0]] = line[1:]
if s_name in self.mod_data[file_type]:
log.debug("Duplicate sample name found! Overwriting: %s", s_name)
self.mod_data[file_type][s_name] = {"data": data, "kv": kv}
log.debug(
"Found %s output for sample %s with %d rows", file_type, s_name, len(data)
)
return True
|
https://github.com/ewels/MultiQC/issues/952
|
[INFO ] multiqc : This is MultiQC v1.7
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching 'qc/'
Searching 252 files.. [####################################] 100%
[INFO ] preseq : Found 24 reports
[INFO ] rseqc : Found 12 read_distribution reports
[INFO ] rseqc : Found 12 read_duplication reports
[INFO ] rseqc : Found 12 infer_experiment reports
[INFO ] samtools : Found 12 flagstat reports
[INFO ] bbmap : Found 12 reports
[ERROR ] multiqc : Oops! The 'bbmap' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
qc/trimstats/41_DMSO_rep_2_S12_R1_001.refstats.txt
============================================================
Module bbmap raised an exception: Traceback (most recent call last):
File "/Users/magr0763/.local/bin/multiqc", line 440, in multiqc
output = mod()
File "/Users/magr0763/.local/lib/python3.6/site-packages/multiqc/modules/bbmap/bbmap.py", line 62, in __init__
plot = self.plot(file_type)
File "/Users/magr0763/.local/lib/python3.6/site-packages/multiqc/modules/bbmap/bbmap.py", line 155, in plot
plot_params=plot_params)
File "/Users/magr0763/.local/lib/python3.6/site-packages/multiqc/modules/bbmap/plot_basic_hist.py", line 37, in plot_basic_hist
'xmax': xmax
UnboundLocalError: local variable 'xmax' referenced before assignment
============================================================
[INFO ] fastqc : Found 24 reports
[INFO ] multiqc : Compressing plot data
|
UnboundLocalError
|
def plot_aqhist(samples, file_type, **plot_args):
"""Create line graph plot of histogram data for BBMap 'aqhist' output.
The 'samples' parameter could be from the bbmap mod_data dictionary:
samples = bbmap.MultiqcModule.mod_data[file_type]
"""
sumy = sum(
[
int(samples[sample]["data"][x][0])
for sample in samples
for x in samples[sample]["data"]
]
)
cutoff = sumy * 0.999
all_x = set()
for item in sorted(chain(*[samples[sample]["data"].items() for sample in samples])):
all_x.add(item[0])
cutoff -= item[1][0]
if cutoff < 0:
xmax = item[0]
break
else:
xmax = max(all_x)
columns_to_plot = {
"Counts": {0: "Read1", 2: "Read2"},
"Proportions": {1: "Read1", 3: "Read2"},
}
plot_data = []
for column_type in columns_to_plot:
plot_data.append(
{
sample + "." + column_name: {
x: samples[sample]["data"][x][column]
if x in samples[sample]["data"]
else 0
for x in all_x
}
for sample in samples
for column, column_name in columns_to_plot[column_type].items()
}
)
plot_params = {
"id": "bbmap-" + file_type + "_plot",
"title": "BBTools: " + plot_args["plot_title"],
"xmax": xmax,
"xlab": "Quality score",
"data_labels": [
{"name": "Count data", "ylab": "Read count"},
{"name": "Proportion data", "ylab": "Proportion of reads"},
],
}
plot_params.update(plot_args["plot_params"])
plot = linegraph.plot(plot_data, plot_params)
return plot
|
def plot_aqhist(samples, file_type, **plot_args):
"""Create line graph plot of histogram data for BBMap 'aqhist' output.
The 'samples' parameter could be from the bbmap mod_data dictionary:
samples = bbmap.MultiqcModule.mod_data[file_type]
"""
sumy = sum(
[
int(samples[sample]["data"][x][0])
for sample in samples
for x in samples[sample]["data"]
]
)
cutoff = sumy * 0.999
all_x = set()
for item in sorted(chain(*[samples[sample]["data"].items() for sample in samples])):
all_x.add(item[0])
cutoff -= item[1][0]
if cutoff < 0:
xmax = item[0]
break
columns_to_plot = {
"Counts": {0: "Read1", 2: "Read2"},
"Proportions": {1: "Read1", 3: "Read2"},
}
plot_data = []
for column_type in columns_to_plot:
plot_data.append(
{
sample + "." + column_name: {
x: samples[sample]["data"][x][column]
if x in samples[sample]["data"]
else 0
for x in all_x
}
for sample in samples
for column, column_name in columns_to_plot[column_type].items()
}
)
plot_params = {
"id": "bbmap-" + file_type + "_plot",
"title": "BBTools: " + plot_args["plot_title"],
"xmax": xmax,
"xlab": "Quality score",
"data_labels": [
{"name": "Count data", "ylab": "Read count"},
{"name": "Proportion data", "ylab": "Proportion of reads"},
],
}
plot_params.update(plot_args["plot_params"])
plot = linegraph.plot(plot_data, plot_params)
return plot
|
https://github.com/ewels/MultiQC/issues/952
|
[INFO ] multiqc : This is MultiQC v1.7
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching 'qc/'
Searching 252 files.. [####################################] 100%
[INFO ] preseq : Found 24 reports
[INFO ] rseqc : Found 12 read_distribution reports
[INFO ] rseqc : Found 12 read_duplication reports
[INFO ] rseqc : Found 12 infer_experiment reports
[INFO ] samtools : Found 12 flagstat reports
[INFO ] bbmap : Found 12 reports
[ERROR ] multiqc : Oops! The 'bbmap' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
qc/trimstats/41_DMSO_rep_2_S12_R1_001.refstats.txt
============================================================
Module bbmap raised an exception: Traceback (most recent call last):
File "/Users/magr0763/.local/bin/multiqc", line 440, in multiqc
output = mod()
File "/Users/magr0763/.local/lib/python3.6/site-packages/multiqc/modules/bbmap/bbmap.py", line 62, in __init__
plot = self.plot(file_type)
File "/Users/magr0763/.local/lib/python3.6/site-packages/multiqc/modules/bbmap/bbmap.py", line 155, in plot
plot_params=plot_params)
File "/Users/magr0763/.local/lib/python3.6/site-packages/multiqc/modules/bbmap/plot_basic_hist.py", line 37, in plot_basic_hist
'xmax': xmax
UnboundLocalError: local variable 'xmax' referenced before assignment
============================================================
[INFO ] fastqc : Found 24 reports
[INFO ] multiqc : Compressing plot data
|
UnboundLocalError
|
def plot_basic_hist(samples, file_type, **plot_args):
"""Create line graph plot for basic histogram data for 'file_type'.
The 'samples' parameter could be from the bbmap mod_data dictionary:
samples = bbmap.MultiqcModule.mod_data[file_type]
"""
sumy = sum(
[
int(samples[sample]["data"][x][0])
for sample in samples
for x in samples[sample]["data"]
]
)
cutoff = sumy * 0.999
all_x = set()
for item in sorted(chain(*[samples[sample]["data"].items() for sample in samples])):
all_x.add(item[0])
cutoff -= item[1][0]
if cutoff < 0:
xmax = item[0]
break
else:
xmax = max(all_x)
data = {
sample: {
x: samples[sample]["data"][x][0] if x in samples[sample]["data"] else 0
for x in all_x
}
for sample in samples
}
plot_params = {
"id": "bbmap-" + file_type + "_plot",
"title": "BBTools: " + plot_args["plot_title"],
"xmax": xmax,
}
plot_params.update(plot_args["plot_params"])
plot = linegraph.plot(data, plot_params)
return plot
|
def plot_basic_hist(samples, file_type, **plot_args):
"""Create line graph plot for basic histogram data for 'file_type'.
The 'samples' parameter could be from the bbmap mod_data dictionary:
samples = bbmap.MultiqcModule.mod_data[file_type]
"""
sumy = sum(
[
int(samples[sample]["data"][x][0])
for sample in samples
for x in samples[sample]["data"]
]
)
cutoff = sumy * 0.999
all_x = set()
for item in sorted(chain(*[samples[sample]["data"].items() for sample in samples])):
all_x.add(item[0])
cutoff -= item[1][0]
if cutoff < 0:
xmax = item[0]
break
data = {
sample: {
x: samples[sample]["data"][x][0] if x in samples[sample]["data"] else 0
for x in all_x
}
for sample in samples
}
plot_params = {
"id": "bbmap-" + file_type + "_plot",
"title": "BBTools: " + plot_args["plot_title"],
"xmax": xmax,
}
plot_params.update(plot_args["plot_params"])
plot = linegraph.plot(data, plot_params)
return plot
|
https://github.com/ewels/MultiQC/issues/952
|
[INFO ] multiqc : This is MultiQC v1.7
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching 'qc/'
Searching 252 files.. [####################################] 100%
[INFO ] preseq : Found 24 reports
[INFO ] rseqc : Found 12 read_distribution reports
[INFO ] rseqc : Found 12 read_duplication reports
[INFO ] rseqc : Found 12 infer_experiment reports
[INFO ] samtools : Found 12 flagstat reports
[INFO ] bbmap : Found 12 reports
[ERROR ] multiqc : Oops! The 'bbmap' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
qc/trimstats/41_DMSO_rep_2_S12_R1_001.refstats.txt
============================================================
Module bbmap raised an exception: Traceback (most recent call last):
File "/Users/magr0763/.local/bin/multiqc", line 440, in multiqc
output = mod()
File "/Users/magr0763/.local/lib/python3.6/site-packages/multiqc/modules/bbmap/bbmap.py", line 62, in __init__
plot = self.plot(file_type)
File "/Users/magr0763/.local/lib/python3.6/site-packages/multiqc/modules/bbmap/bbmap.py", line 155, in plot
plot_params=plot_params)
File "/Users/magr0763/.local/lib/python3.6/site-packages/multiqc/modules/bbmap/plot_basic_hist.py", line 37, in plot_basic_hist
'xmax': xmax
UnboundLocalError: local variable 'xmax' referenced before assignment
============================================================
[INFO ] fastqc : Found 24 reports
[INFO ] multiqc : Compressing plot data
|
UnboundLocalError
|
def plot_covhist(samples, file_type, **plot_args):
"""Create line graph plot for basic histogram data for 'covhist'.
The 'samples' parameter could be from the bbmap mod_data dictionary:
samples = bbmap.MultiqcModule.mod_data[file_type]
"""
sumy = sum(
[
int(samples[sample]["data"][x][0])
for sample in samples
for x in samples[sample]["data"]
]
)
cutoff = sumy * 0.999
all_x = set()
for item in sorted(chain(*[samples[sample]["data"].items() for sample in samples])):
all_x.add(item[0])
cutoff -= item[1][0]
if cutoff < 0:
xmax = item[0]
break
else:
xmax = max(all_x)
data = {
sample: {
x: samples[sample]["data"][x][0] if x in samples[sample]["data"] else 0
for x in all_x
}
for sample in samples
}
plot_params = {
"id": "bbmap-" + file_type + "_plot",
"title": "BBTools: " + plot_args["plot_title"],
"smooth_points": 400,
"xmax": xmax,
"xlab": "Coverage (depth)",
"ylab": "Number of occurences",
}
plot_params.update(plot_args["plot_params"])
plot = linegraph.plot(data, plot_params)
return plot
|
def plot_covhist(samples, file_type, **plot_args):
"""Create line graph plot for basic histogram data for 'covhist'.
The 'samples' parameter could be from the bbmap mod_data dictionary:
samples = bbmap.MultiqcModule.mod_data[file_type]
"""
sumy = sum(
[
int(samples[sample]["data"][x][0])
for sample in samples
for x in samples[sample]["data"]
]
)
cutoff = sumy * 0.999
all_x = set()
for item in sorted(chain(*[samples[sample]["data"].items() for sample in samples])):
all_x.add(item[0])
cutoff -= item[1][0]
if cutoff < 0:
xmax = item[0]
break
data = {
sample: {
x: samples[sample]["data"][x][0] if x in samples[sample]["data"] else 0
for x in all_x
}
for sample in samples
}
plot_params = {
"id": "bbmap-" + file_type + "_plot",
"title": "BBTools: " + plot_args["plot_title"],
"smooth_points": 400,
"xmax": xmax,
"xlab": "Coverage (depth)",
"ylab": "Number of occurences",
}
plot_params.update(plot_args["plot_params"])
plot = linegraph.plot(data, plot_params)
return plot
|
https://github.com/ewels/MultiQC/issues/952
|
[INFO ] multiqc : This is MultiQC v1.7
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching 'qc/'
Searching 252 files.. [####################################] 100%
[INFO ] preseq : Found 24 reports
[INFO ] rseqc : Found 12 read_distribution reports
[INFO ] rseqc : Found 12 read_duplication reports
[INFO ] rseqc : Found 12 infer_experiment reports
[INFO ] samtools : Found 12 flagstat reports
[INFO ] bbmap : Found 12 reports
[ERROR ] multiqc : Oops! The 'bbmap' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
qc/trimstats/41_DMSO_rep_2_S12_R1_001.refstats.txt
============================================================
Module bbmap raised an exception: Traceback (most recent call last):
File "/Users/magr0763/.local/bin/multiqc", line 440, in multiqc
output = mod()
File "/Users/magr0763/.local/lib/python3.6/site-packages/multiqc/modules/bbmap/bbmap.py", line 62, in __init__
plot = self.plot(file_type)
File "/Users/magr0763/.local/lib/python3.6/site-packages/multiqc/modules/bbmap/bbmap.py", line 155, in plot
plot_params=plot_params)
File "/Users/magr0763/.local/lib/python3.6/site-packages/multiqc/modules/bbmap/plot_basic_hist.py", line 37, in plot_basic_hist
'xmax': xmax
UnboundLocalError: local variable 'xmax' referenced before assignment
============================================================
[INFO ] fastqc : Found 24 reports
[INFO ] multiqc : Compressing plot data
|
UnboundLocalError
|
def plot_ihist(samples, file_type, **plot_args):
"""Create line graph plot for basic histogram data for 'ihist'.
The 'samples' parameter could be from the bbmap mod_data dictionary:
samples = bbmap.MultiqcModule.mod_data[file_type]
"""
sumy = sum(
[
int(samples[sample]["data"][x][0])
for sample in samples
for x in samples[sample]["data"]
]
)
cutoff = sumy * 0.99
all_x = set()
for item in sorted(chain(*[samples[sample]["data"].items() for sample in samples])):
all_x.add(item[0])
cutoff -= item[1][0]
if cutoff < 0:
xmax = item[0]
break
else:
xmax = max(all_x)
data = {
sample: {
x: samples[sample]["data"][x][0] if x in samples[sample]["data"] else 0
for x in all_x
}
for sample in samples
}
plot_params = {
"id": "bbmap-" + file_type + "_plot",
"title": "BBTools: " + plot_args["plot_title"],
"xmax": xmax,
"xlab": "Insert size (base pairs)",
"ylab": "Read count",
}
plot_params.update(plot_args["plot_params"])
plot = linegraph.plot(data, plot_params)
return plot
|
def plot_ihist(samples, file_type, **plot_args):
"""Create line graph plot for basic histogram data for 'ihist'.
The 'samples' parameter could be from the bbmap mod_data dictionary:
samples = bbmap.MultiqcModule.mod_data[file_type]
"""
sumy = sum(
[
int(samples[sample]["data"][x][0])
for sample in samples
for x in samples[sample]["data"]
]
)
cutoff = sumy * 0.99
all_x = set()
for item in sorted(chain(*[samples[sample]["data"].items() for sample in samples])):
all_x.add(item[0])
cutoff -= item[1][0]
if cutoff < 0:
xmax = item[0]
break
data = {
sample: {
x: samples[sample]["data"][x][0] if x in samples[sample]["data"] else 0
for x in all_x
}
for sample in samples
}
plot_params = {
"id": "bbmap-" + file_type + "_plot",
"title": "BBTools: " + plot_args["plot_title"],
"xmax": xmax,
"xlab": "Insert size (base pairs)",
"ylab": "Read count",
}
plot_params.update(plot_args["plot_params"])
plot = linegraph.plot(data, plot_params)
return plot
|
https://github.com/ewels/MultiQC/issues/952
|
[INFO ] multiqc : This is MultiQC v1.7
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching 'qc/'
Searching 252 files.. [####################################] 100%
[INFO ] preseq : Found 24 reports
[INFO ] rseqc : Found 12 read_distribution reports
[INFO ] rseqc : Found 12 read_duplication reports
[INFO ] rseqc : Found 12 infer_experiment reports
[INFO ] samtools : Found 12 flagstat reports
[INFO ] bbmap : Found 12 reports
[ERROR ] multiqc : Oops! The 'bbmap' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
qc/trimstats/41_DMSO_rep_2_S12_R1_001.refstats.txt
============================================================
Module bbmap raised an exception: Traceback (most recent call last):
File "/Users/magr0763/.local/bin/multiqc", line 440, in multiqc
output = mod()
File "/Users/magr0763/.local/lib/python3.6/site-packages/multiqc/modules/bbmap/bbmap.py", line 62, in __init__
plot = self.plot(file_type)
File "/Users/magr0763/.local/lib/python3.6/site-packages/multiqc/modules/bbmap/bbmap.py", line 155, in plot
plot_params=plot_params)
File "/Users/magr0763/.local/lib/python3.6/site-packages/multiqc/modules/bbmap/plot_basic_hist.py", line 37, in plot_basic_hist
'xmax': xmax
UnboundLocalError: local variable 'xmax' referenced before assignment
============================================================
[INFO ] fastqc : Found 24 reports
[INFO ] multiqc : Compressing plot data
|
UnboundLocalError
|
def plot_qhist(samples, file_type, **plot_args):
"""Create line graph plot of histogram data for BBMap 'qhist' output.
The 'samples' parameter could be from the bbmap mod_data dictionary:
samples = bbmap.MultiqcModule.mod_data[file_type]
"""
sumy = sum(
[
int(samples[sample]["data"][x][0])
for sample in samples
for x in samples[sample]["data"]
]
)
cutoff = sumy * 0.999
all_x = set()
for item in sorted(chain(*[samples[sample]["data"].items() for sample in samples])):
all_x.add(item[0])
cutoff -= item[1][0]
if cutoff < 0:
xmax = item[0]
break
else:
xmax = max(all_x)
# The columns_to_plot dictionary should be replaced with an OrderedDict,
# not to rely on the ordered-by-default implementation details on Python 3.6
columns_to_plot = {
"Linear": {0: "Read1", 3: "Read2"},
"Logarithmic": {1: "Read1", 4: "Read2"},
"Measured": {2: "Read1", 5: "Read2"},
}
plot_data = []
for column_type in columns_to_plot:
plot_data.append(
{
sample + "." + column_name: {
x: samples[sample]["data"][x][column]
if x in samples[sample]["data"]
else 0
for x in all_x
}
for sample in samples
for column, column_name in columns_to_plot[column_type].items()
}
)
plot_params = {
"id": "bbmap-" + file_type + "_plot",
"title": "BBTools: " + plot_args["plot_title"],
"xmax": xmax,
"xlab": "Position in read",
"data_labels": [
{"name": "Linear", "ylab": "Quality score"},
{"name": "Logarithmic", "ylab": "Log score"},
{"name": "Measured", "ylab": "Measured quality value"},
],
}
plot_params.update(plot_args["plot_params"])
plot = linegraph.plot(plot_data, plot_params)
return plot
|
def plot_qhist(samples, file_type, **plot_args):
"""Create line graph plot of histogram data for BBMap 'qhist' output.
The 'samples' parameter could be from the bbmap mod_data dictionary:
samples = bbmap.MultiqcModule.mod_data[file_type]
"""
sumy = sum(
[
int(samples[sample]["data"][x][0])
for sample in samples
for x in samples[sample]["data"]
]
)
cutoff = sumy * 0.999
all_x = set()
for item in sorted(chain(*[samples[sample]["data"].items() for sample in samples])):
all_x.add(item[0])
cutoff -= item[1][0]
if cutoff < 0:
xmax = item[0]
break
# The columns_to_plot dictionary should be replaced with an OrderedDict,
# not to rely on the ordered-by-default implementation details on Python 3.6
columns_to_plot = {
"Linear": {0: "Read1", 3: "Read2"},
"Logarithmic": {1: "Read1", 4: "Read2"},
"Measured": {2: "Read1", 5: "Read2"},
}
plot_data = []
for column_type in columns_to_plot:
plot_data.append(
{
sample + "." + column_name: {
x: samples[sample]["data"][x][column]
if x in samples[sample]["data"]
else 0
for x in all_x
}
for sample in samples
for column, column_name in columns_to_plot[column_type].items()
}
)
plot_params = {
"id": "bbmap-" + file_type + "_plot",
"title": "BBTools: " + plot_args["plot_title"],
"xmax": xmax,
"xlab": "Position in read",
"data_labels": [
{"name": "Linear", "ylab": "Quality score"},
{"name": "Logarithmic", "ylab": "Log score"},
{"name": "Measured", "ylab": "Measured quality value"},
],
}
plot_params.update(plot_args["plot_params"])
plot = linegraph.plot(plot_data, plot_params)
return plot
|
https://github.com/ewels/MultiQC/issues/952
|
[INFO ] multiqc : This is MultiQC v1.7
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching 'qc/'
Searching 252 files.. [####################################] 100%
[INFO ] preseq : Found 24 reports
[INFO ] rseqc : Found 12 read_distribution reports
[INFO ] rseqc : Found 12 read_duplication reports
[INFO ] rseqc : Found 12 infer_experiment reports
[INFO ] samtools : Found 12 flagstat reports
[INFO ] bbmap : Found 12 reports
[ERROR ] multiqc : Oops! The 'bbmap' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
qc/trimstats/41_DMSO_rep_2_S12_R1_001.refstats.txt
============================================================
Module bbmap raised an exception: Traceback (most recent call last):
File "/Users/magr0763/.local/bin/multiqc", line 440, in multiqc
output = mod()
File "/Users/magr0763/.local/lib/python3.6/site-packages/multiqc/modules/bbmap/bbmap.py", line 62, in __init__
plot = self.plot(file_type)
File "/Users/magr0763/.local/lib/python3.6/site-packages/multiqc/modules/bbmap/bbmap.py", line 155, in plot
plot_params=plot_params)
File "/Users/magr0763/.local/lib/python3.6/site-packages/multiqc/modules/bbmap/plot_basic_hist.py", line 37, in plot_basic_hist
'xmax': xmax
UnboundLocalError: local variable 'xmax' referenced before assignment
============================================================
[INFO ] fastqc : Found 24 reports
[INFO ] multiqc : Compressing plot data
|
UnboundLocalError
|
def __init__(self):
# Initialise the parent object
super(MultiqcModule, self).__init__(
name="Peddy",
anchor="peddy",
href="https://github.com/brentp/peddy",
info="calculates genotype :: pedigree correspondence checks, ancestry checks and sex checks using VCF files.",
)
# Find and load any Peddy reports
self.peddy_data = dict()
self.peddy_length_counts = dict()
self.peddy_length_exp = dict()
self.peddy_length_obsexp = dict()
# parse peddy summary file
for f in self.find_log_files("peddy/summary_table"):
parsed_data = self.parse_peddy_summary(f)
if parsed_data is not None:
for s_name in parsed_data:
s_name = self.clean_s_name(s_name, f["root"])
try:
self.peddy_data[s_name].update(parsed_data[s_name])
except KeyError:
self.peddy_data[s_name] = parsed_data[s_name]
# parse peddy CSV files
for pattern in ["het_check", "ped_check", "sex_check"]:
sp_key = "peddy/{}".format(pattern)
for f in self.find_log_files(sp_key):
# some columns have the same name in het_check and sex_check (median_depth)
# pass pattern to parse_peddy_csv so the column names can include pattern to
# avoid being overwritten
parsed_data = self.parse_peddy_csv(f, pattern)
if parsed_data is not None:
for s_name in parsed_data:
try:
self.peddy_data[s_name].update(parsed_data[s_name])
except KeyError:
self.peddy_data[s_name] = parsed_data[s_name]
# parse background PCA JSON file, this is identitical for all peddy runs,
# so just parse the first one we find
for f in self.find_log_files("peddy/background_pca"):
background = json.loads(f["f"])
PC1 = [x["PC1"] for x in background]
PC2 = [x["PC2"] for x in background]
ancestry = [x["ancestry"] for x in background]
self.peddy_data["background_pca"] = {
"PC1": PC1,
"PC2": PC2,
"ancestry": ancestry,
}
break
# Filter to strip out ignored sample names
self.peddy_data = self.ignore_samples(self.peddy_data)
if len(self.peddy_data) == 0:
raise UserWarning
log.info("Found {} reports".format(len(self.peddy_data)))
# Write parsed report data to a file
self.write_data_file(self.peddy_data, "multiqc_peddy")
# Basic Stats Table
self.peddy_general_stats_table()
# PCA plot
self.peddy_pca_plot()
# Relatedness plot
self.peddy_relatedness_plot()
# hetcheck plot
self.peddy_het_check_plot()
self.peddy_sex_check_plot()
|
def __init__(self):
# Initialise the parent object
super(MultiqcModule, self).__init__(
name="Peddy",
anchor="peddy",
href="https://github.com/brentp/peddy",
info="calculates genotype :: pedigree correspondence checks, ancestry checks and sex checks using VCF files.",
)
# Find and load any Peddy reports
self.peddy_data = dict()
self.peddy_length_counts = dict()
self.peddy_length_exp = dict()
self.peddy_length_obsexp = dict()
# parse peddy summary file
for f in self.find_log_files("peddy/summary_table"):
parsed_data = self.parse_peddy_summary(f)
if parsed_data is not None:
for s_name in parsed_data:
s_name = self.clean_s_name(s_name, f["root"])
try:
self.peddy_data[s_name].update(parsed_data[s_name])
except KeyError:
self.peddy_data[s_name] = parsed_data[s_name]
# parse peddy CSV files
for pattern in ["het_check", "ped_check", "sex_check"]:
sp_key = "peddy/{}".format(pattern)
for f in self.find_log_files(sp_key):
# some columns have the same name in het_check and sex_check (median_depth)
# pass pattern to parse_peddy_csv so the column names can include pattern to
# avoid being overwritten
parsed_data = self.parse_peddy_csv(f, pattern)
if parsed_data is not None:
for s_name in parsed_data:
s_name = self.clean_s_name(s_name, f["root"])
try:
self.peddy_data[s_name].update(parsed_data[s_name])
except KeyError:
self.peddy_data[s_name] = parsed_data[s_name]
# parse background PCA JSON file, this is identitical for all peddy runs,
# so just parse the first one we find
for f in self.find_log_files("peddy/background_pca"):
background = json.loads(f["f"])
PC1 = [x["PC1"] for x in background]
PC2 = [x["PC2"] for x in background]
ancestry = [x["ancestry"] for x in background]
self.peddy_data["background_pca"] = {
"PC1": PC1,
"PC2": PC2,
"ancestry": ancestry,
}
break
# Filter to strip out ignored sample names
self.peddy_data = self.ignore_samples(self.peddy_data)
if len(self.peddy_data) == 0:
raise UserWarning
log.info("Found {} reports".format(len(self.peddy_data)))
# Write parsed report data to a file
self.write_data_file(self.peddy_data, "multiqc_peddy")
# Basic Stats Table
self.peddy_general_stats_table()
# PCA plot
self.peddy_pca_plot()
# Relatedness plot
self.peddy_relatedness_plot()
# hetcheck plot
self.peddy_het_check_plot()
self.peddy_sex_check_plot()
|
https://github.com/ewels/MultiQC/issues/1024
|
[INFO ] multiqc : This is MultiQC v1.7
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/multiqc/report/metrics/target_info.yaml'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/contamination/ts_bcbio-verifybamid.failed'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_base_N_content.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_base_sequence_content.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_base_sequence_quality.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_sequence_GC_content.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_sequence_quality_scores.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Sequence_Duplication_Levels.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Sequence_Length_Distribution.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/fastqc_data.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/fastqc_report.html'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/ts_bcbio.zip'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.background_pca.json'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.het_check.csv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.html'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.ped_check.csv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.ped_check.rel-difference.csv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.peddy.ped'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.sex_check.csv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/samtools/ts_bcbio-idxstats.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/samtools/ts_bcbio.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/variants/ts_bcbio.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/variants/ts_bcbio_bcftools_stats.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/variants/ts_bcbio_bcftools_stats_germline.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/viral/ts_bcbio-gdc-viral-completeness.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/multiqc/report/metrics/ts_bcbio_bcbio.txt'
[INFO ] bcbio : Found 1 reports
/bcbio/anaconda/lib/python3.6/site-packages/multiqc_bcbio/bcbio.py:77: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details.
add_project_info(yaml.load(target_infos[0]['f']))
[INFO ] samtools : Found 1 idxstats reports
[ERROR ] multiqc : Oops! The 'peddy' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.sex_check.csv
============================================================
Module peddy raised an exception: Traceback (most recent call last):
File "/bcbio/anaconda/lib/python3.6/site-packages/multiqc/modules/peddy/peddy.py", line 58, in __init__
self.peddy_data[s_name].update(parsed_data[s_name])
KeyError: 'ts'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/bcbio/tools/bin/multiqc", line 440, in multiqc
output = mod()
File "/bcbio/anaconda/lib/python3.6/site-packages/multiqc/modules/peddy/peddy.py", line 60, in __init__
self.peddy_data[s_name] = parsed_data[s_name]
KeyError: 'ts'
============================================================
[INFO ] bcftools : Found 1 stats reports
[INFO ] bcftools : Found 1 stats reports
[INFO ] fastqc : Found 1 reports
[INFO ] multiqc : Compressing plot data
[INFO ] multiqc : Report : ../../bcbiotx/tmp1n5rgkh7/multiqc_report.html
[INFO ] multiqc : Data : ../../bcbiotx/tmp1n5rgkh7/multiqc_data
[INFO ] multiqc : MultiQC complete
' returned non-zero exit status 1.
|
KeyError
|
def parse_peddy_csv(self, f, pattern):
"""Parse csv output from peddy"""
parsed_data = dict()
headers = None
s_name_idx = None
for l in f["f"].splitlines():
s = l.split(",")
if headers is None:
headers = s
try:
s_name_idx = [headers.index("sample_id")]
except ValueError:
try:
s_name_idx = [headers.index("sample_a"), headers.index("sample_b")]
except ValueError:
log.warn(
"Could not find sample name in Peddy output: {}".format(f["fn"])
)
return None
else:
s_name = "-".join([s[idx] for idx in s_name_idx])
s_name = self.clean_s_name(s_name, f["root"])
parsed_data[s_name] = dict()
for i, v in enumerate(s):
if i not in s_name_idx:
if headers[i] == "error" and pattern == "sex_check":
v = "True" if v == "False" else "False"
try:
# add the pattern as a suffix to key
parsed_data[s_name][headers[i] + "_" + pattern] = float(v)
except ValueError:
# add the pattern as a suffix to key
parsed_data[s_name][headers[i] + "_" + pattern] = v
if len(parsed_data) == 0:
return None
return parsed_data
|
def parse_peddy_csv(self, f, pattern):
"""Parse csv output from peddy"""
parsed_data = dict()
headers = None
s_name_idx = None
for l in f["f"].splitlines():
s = l.split(",")
if headers is None:
headers = s
try:
s_name_idx = [headers.index("sample_id")]
except ValueError:
try:
s_name_idx = [headers.index("sample_a"), headers.index("sample_b")]
except ValueError:
log.warn(
"Could not find sample name in Peddy output: {}".format(f["fn"])
)
return None
else:
s_name = "-".join([s[idx] for idx in s_name_idx])
parsed_data[s_name] = dict()
for i, v in enumerate(s):
if i not in s_name_idx:
if headers[i] == "error" and pattern == "sex_check":
v = "True" if v == "False" else "False"
try:
# add the pattern as a suffix to key
parsed_data[s_name][headers[i] + "_" + pattern] = float(v)
except ValueError:
# add the pattern as a suffix to key
parsed_data[s_name][headers[i] + "_" + pattern] = v
if len(parsed_data) == 0:
return None
return parsed_data
|
https://github.com/ewels/MultiQC/issues/1024
|
[INFO ] multiqc : This is MultiQC v1.7
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/multiqc/report/metrics/target_info.yaml'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/contamination/ts_bcbio-verifybamid.failed'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_base_N_content.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_base_sequence_content.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_base_sequence_quality.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_sequence_GC_content.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_sequence_quality_scores.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Sequence_Duplication_Levels.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Sequence_Length_Distribution.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/fastqc_data.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/fastqc_report.html'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/ts_bcbio.zip'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.background_pca.json'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.het_check.csv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.html'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.ped_check.csv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.ped_check.rel-difference.csv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.peddy.ped'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.sex_check.csv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/samtools/ts_bcbio-idxstats.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/samtools/ts_bcbio.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/variants/ts_bcbio.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/variants/ts_bcbio_bcftools_stats.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/variants/ts_bcbio_bcftools_stats_germline.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/viral/ts_bcbio-gdc-viral-completeness.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/multiqc/report/metrics/ts_bcbio_bcbio.txt'
[INFO ] bcbio : Found 1 reports
/bcbio/anaconda/lib/python3.6/site-packages/multiqc_bcbio/bcbio.py:77: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details.
add_project_info(yaml.load(target_infos[0]['f']))
[INFO ] samtools : Found 1 idxstats reports
[ERROR ] multiqc : Oops! The 'peddy' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.sex_check.csv
============================================================
Module peddy raised an exception: Traceback (most recent call last):
File "/bcbio/anaconda/lib/python3.6/site-packages/multiqc/modules/peddy/peddy.py", line 58, in __init__
self.peddy_data[s_name].update(parsed_data[s_name])
KeyError: 'ts'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/bcbio/tools/bin/multiqc", line 440, in multiqc
output = mod()
File "/bcbio/anaconda/lib/python3.6/site-packages/multiqc/modules/peddy/peddy.py", line 60, in __init__
self.peddy_data[s_name] = parsed_data[s_name]
KeyError: 'ts'
============================================================
[INFO ] bcftools : Found 1 stats reports
[INFO ] bcftools : Found 1 stats reports
[INFO ] fastqc : Found 1 reports
[INFO ] multiqc : Compressing plot data
[INFO ] multiqc : Report : ../../bcbiotx/tmp1n5rgkh7/multiqc_report.html
[INFO ] multiqc : Data : ../../bcbiotx/tmp1n5rgkh7/multiqc_data
[INFO ] multiqc : MultiQC complete
' returned non-zero exit status 1.
|
KeyError
|
def peddy_pca_plot(self):
ancestry_colors = {
"SAS": "rgb(68,1,81,1)",
"EAS": "rgb(59,81,139,1)",
"AMR": "rgb(33,144,141,1)",
"AFR": "rgb(92,200,99,1)",
"EUR": "rgb(253,231,37,1)",
}
background_ancestry_colors = {
"SAS": "rgb(68,1,81,0.1)",
"EAS": "rgb(59,81,139,0.1)",
"AMR": "rgb(33,144,141,0.1)",
"AFR": "rgb(92,200,99,0.1)",
"EUR": "rgb(253,231,37,0.1)",
}
default_color = "#000000"
default_background_color = "rgb(211,211,211,0.05)"
data = OrderedDict()
# plot the background data first, so it doesn't hide the actual data points
d = self.peddy_data.pop("background_pca", {})
if d:
background = [
{
"x": pc1,
"y": pc2,
"color": default_background_color,
"name": ancestry,
"marker_size": 1,
}
for pc1, pc2, ancestry in zip(d["PC1"], d["PC2"], d["ancestry"])
]
data["background"] = background
for s_name, d in self.peddy_data.items():
if "PC1_het_check" in d and "PC2_het_check" in d:
data[s_name] = {"x": d["PC1_het_check"], "y": d["PC2_het_check"]}
try:
data[s_name]["color"] = ancestry_colors.get(
d["ancestry-prediction"], default_color
)
except KeyError:
pass
pconfig = {
"id": "peddy_pca_plot",
"title": "Peddy: PCA Plot",
"xlab": "PC1",
"ylab": "PC2",
"marker_size": 5,
"marker_line_width": 0,
}
if len(data) > 0:
self.add_section(
name="PCA Plot", anchor="peddy-pca-plot", plot=scatter.plot(data, pconfig)
)
|
def peddy_pca_plot(self):
ancestry_colors = {
"SAS": "rgb(68,1,81,1)",
"EAS": "rgb(59,81,139,1)",
"AMR": "rgb(33,144,141,1)",
"AFR": "rgb(92,200,99,1)",
"EUR": "rgb(253,231,37,1)",
}
background_ancestry_colors = {
"SAS": "rgb(68,1,81,0.1)",
"EAS": "rgb(59,81,139,0.1)",
"AMR": "rgb(33,144,141,0.1)",
"AFR": "rgb(92,200,99,0.1)",
"EUR": "rgb(253,231,37,0.1)",
}
default_color = "#000000"
default_background_color = "rgb(211,211,211,0.05)"
data = OrderedDict()
# plot the background data first, so it doesn't hide the actual data points
d = self.peddy_data.pop("background_pca", {})
if d:
background = [
{
"x": pc1,
"y": pc2,
"color": default_background_color,
"name": ancestry,
"marker_size": 1,
}
for pc1, pc2, ancestry in zip(d["PC1"], d["PC2"], d["ancestry"])
]
data["background"] = background
for s_name, d in self.peddy_data.items():
if "PC1_het_check" in d and "PC2_het_check" in d:
data[s_name] = {
"x": d["PC1_het_check"],
"y": d["PC2_het_check"],
"color": ancestry_colors.get(d["ancestry-prediction"], default_color),
}
pconfig = {
"id": "peddy_pca_plot",
"title": "Peddy: PCA Plot",
"xlab": "PC1",
"ylab": "PC2",
"marker_size": 5,
"marker_line_width": 0,
}
if len(data) > 0:
self.add_section(
name="PCA Plot", anchor="peddy-pca-plot", plot=scatter.plot(data, pconfig)
)
|
https://github.com/ewels/MultiQC/issues/1024
|
[INFO ] multiqc : This is MultiQC v1.7
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/multiqc/report/metrics/target_info.yaml'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/contamination/ts_bcbio-verifybamid.failed'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_base_N_content.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_base_sequence_content.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_base_sequence_quality.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_sequence_GC_content.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_sequence_quality_scores.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Sequence_Duplication_Levels.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Sequence_Length_Distribution.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/fastqc_data.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/fastqc_report.html'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/ts_bcbio.zip'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.background_pca.json'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.het_check.csv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.html'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.ped_check.csv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.ped_check.rel-difference.csv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.peddy.ped'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.sex_check.csv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/samtools/ts_bcbio-idxstats.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/samtools/ts_bcbio.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/variants/ts_bcbio.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/variants/ts_bcbio_bcftools_stats.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/variants/ts_bcbio_bcftools_stats_germline.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/viral/ts_bcbio-gdc-viral-completeness.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/multiqc/report/metrics/ts_bcbio_bcbio.txt'
[INFO ] bcbio : Found 1 reports
/bcbio/anaconda/lib/python3.6/site-packages/multiqc_bcbio/bcbio.py:77: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details.
add_project_info(yaml.load(target_infos[0]['f']))
[INFO ] samtools : Found 1 idxstats reports
[ERROR ] multiqc : Oops! The 'peddy' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.sex_check.csv
============================================================
Module peddy raised an exception: Traceback (most recent call last):
File "/bcbio/anaconda/lib/python3.6/site-packages/multiqc/modules/peddy/peddy.py", line 58, in __init__
self.peddy_data[s_name].update(parsed_data[s_name])
KeyError: 'ts'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/bcbio/tools/bin/multiqc", line 440, in multiqc
output = mod()
File "/bcbio/anaconda/lib/python3.6/site-packages/multiqc/modules/peddy/peddy.py", line 60, in __init__
self.peddy_data[s_name] = parsed_data[s_name]
KeyError: 'ts'
============================================================
[INFO ] bcftools : Found 1 stats reports
[INFO ] bcftools : Found 1 stats reports
[INFO ] fastqc : Found 1 reports
[INFO ] multiqc : Compressing plot data
[INFO ] multiqc : Report : ../../bcbiotx/tmp1n5rgkh7/multiqc_report.html
[INFO ] multiqc : Data : ../../bcbiotx/tmp1n5rgkh7/multiqc_data
[INFO ] multiqc : MultiQC complete
' returned non-zero exit status 1.
|
KeyError
|
def peddy_het_check_plot(self):
"""plot the het_check scatter plot"""
# empty dictionary to add sample names, and dictionary of values
data = {}
# for each sample, and list in self.peddy_data
for s_name, d in self.peddy_data.items():
# check the sample contains the required columns
if "median_depth_het_check" in d and "het_ratio_het_check" in d:
# add sample to dictionary with value as a dictionary of points to plot
data[s_name] = {
"x": d["median_depth_het_check"],
"y": d["het_ratio_het_check"],
}
pconfig = {
"id": "peddy_het_check_plot",
"title": "Peddy: Het Check",
"xlab": "median depth",
"ylab": "proportion het calls",
}
if len(data) > 0:
self.add_section(
name="Het Check",
description="Proportion of sites that were heterozygous against median depth.",
helptext="""
A high proportion of heterozygous sites suggests contamination, a low proportion suggests consanguinity.
See [the main peddy documentation](https://peddy.readthedocs.io/en/latest/output.html#het-check) for more details about the `het_check` command.
""",
anchor="peddy-hetcheck-plot",
plot=scatter.plot(data, pconfig),
)
|
def peddy_het_check_plot(self):
"""plot the het_check scatter plot"""
# empty dictionary to add sample names, and dictionary of values
data = {}
# for each sample, and list in self.peddy_data
for s_name, d in self.peddy_data.items():
# check the sample contains the required columns
if "median_depth_het_check" in d and "het_ratio_het_check" in d:
# add sample to dictionary with value as a dictionary of points to plot
data[s_name] = {
"x": d["median_depth_het_check"],
"y": d["het_ratio_het_check"],
}
pconfig = {
"id": "peddy_het_check_plot",
"title": "Peddy: Het Check",
"xlab": "median depth",
"ylab": "proportion het calls",
}
self.add_section(
name="Het Check",
description="Proportion of sites that were heterozygous against median depth.",
helptext="""
A high proportion of heterozygous sites suggests contamination, a low proportion suggests consanguinity.
See [the main peddy documentation](https://peddy.readthedocs.io/en/latest/output.html#het-check) for more details about the `het_check` command.
""",
anchor="peddy-hetcheck-plot",
plot=scatter.plot(data, pconfig),
)
|
https://github.com/ewels/MultiQC/issues/1024
|
[INFO ] multiqc : This is MultiQC v1.7
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/multiqc/report/metrics/target_info.yaml'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/contamination/ts_bcbio-verifybamid.failed'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_base_N_content.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_base_sequence_content.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_base_sequence_quality.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_sequence_GC_content.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_sequence_quality_scores.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Sequence_Duplication_Levels.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Sequence_Length_Distribution.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/fastqc_data.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/fastqc_report.html'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/ts_bcbio.zip'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.background_pca.json'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.het_check.csv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.html'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.ped_check.csv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.ped_check.rel-difference.csv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.peddy.ped'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.sex_check.csv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/samtools/ts_bcbio-idxstats.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/samtools/ts_bcbio.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/variants/ts_bcbio.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/variants/ts_bcbio_bcftools_stats.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/variants/ts_bcbio_bcftools_stats_germline.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/viral/ts_bcbio-gdc-viral-completeness.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/multiqc/report/metrics/ts_bcbio_bcbio.txt'
[INFO ] bcbio : Found 1 reports
/bcbio/anaconda/lib/python3.6/site-packages/multiqc_bcbio/bcbio.py:77: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details.
add_project_info(yaml.load(target_infos[0]['f']))
[INFO ] samtools : Found 1 idxstats reports
[ERROR ] multiqc : Oops! The 'peddy' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.sex_check.csv
============================================================
Module peddy raised an exception: Traceback (most recent call last):
File "/bcbio/anaconda/lib/python3.6/site-packages/multiqc/modules/peddy/peddy.py", line 58, in __init__
self.peddy_data[s_name].update(parsed_data[s_name])
KeyError: 'ts'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/bcbio/tools/bin/multiqc", line 440, in multiqc
output = mod()
File "/bcbio/anaconda/lib/python3.6/site-packages/multiqc/modules/peddy/peddy.py", line 60, in __init__
self.peddy_data[s_name] = parsed_data[s_name]
KeyError: 'ts'
============================================================
[INFO ] bcftools : Found 1 stats reports
[INFO ] bcftools : Found 1 stats reports
[INFO ] fastqc : Found 1 reports
[INFO ] multiqc : Compressing plot data
[INFO ] multiqc : Report : ../../bcbiotx/tmp1n5rgkh7/multiqc_report.html
[INFO ] multiqc : Data : ../../bcbiotx/tmp1n5rgkh7/multiqc_data
[INFO ] multiqc : MultiQC complete
' returned non-zero exit status 1.
|
KeyError
|
def peddy_sex_check_plot(self):
data = {}
sex_index = {"female": 0, "male": 1, "unknown": 2}
for s_name, d in self.peddy_data.items():
if "sex_het_ratio" in d and "ped_sex_sex_check" in d:
data[s_name] = {
"x": sex_index.get(d["ped_sex_sex_check"], 2),
"y": d["sex_het_ratio"],
}
pconfig = {
"id": "peddy_sex_check_plot",
"title": "Peddy: Sex Check",
"xlab": "Sex From Ped",
"ylab": "Sex Het Ratio",
"categories": ["Female", "Male", "Unknown"],
}
if len(data) > 0:
self.add_section(
name="Sex Check",
description="Predicted sex against heterozygosity ratio",
helptext="""
Higher values of Sex Het Ratio suggests the sample is female, low values suggest male.
See [the main peddy documentation](http://peddy.readthedocs.io/en/latest/#sex-check) for more details about the `het_check` command.
""",
anchor="peddy-sexcheck-plot",
plot=scatter.plot(data, pconfig),
)
|
def peddy_sex_check_plot(self):
data = {}
sex_index = {"female": 0, "male": 1, "unknown": 2}
for s_name, d in self.peddy_data.items():
if "sex_het_ratio" in d and "ped_sex_sex_check" in d:
data[s_name] = {
"x": sex_index.get(d["ped_sex_sex_check"], 2),
"y": d["sex_het_ratio"],
}
pconfig = {
"id": "peddy_sex_check_plot",
"title": "Peddy: Sex Check",
"xlab": "Sex From Ped",
"ylab": "Sex Het Ratio",
"categories": ["Female", "Male", "Unknown"],
}
self.add_section(
name="Sex Check",
description="Predicted sex against heterozygosity ratio",
helptext="""
Higher values of Sex Het Ratio suggests the sample is female, low values suggest male.
See [the main peddy documentation](http://peddy.readthedocs.io/en/latest/#sex-check) for more details about the `het_check` command.
""",
anchor="peddy-sexcheck-plot",
plot=scatter.plot(data, pconfig),
)
|
https://github.com/ewels/MultiQC/issues/1024
|
[INFO ] multiqc : This is MultiQC v1.7
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/multiqc/report/metrics/target_info.yaml'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/contamination/ts_bcbio-verifybamid.failed'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_base_N_content.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_base_sequence_content.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_base_sequence_quality.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_sequence_GC_content.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Per_sequence_quality_scores.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Sequence_Duplication_Levels.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/Sequence_Length_Distribution.tsv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/fastqc_data.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/fastqc_report.html'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/fastqc/ts_bcbio.zip'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.background_pca.json'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.het_check.csv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.html'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.ped_check.csv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.ped_check.rel-difference.csv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.peddy.ped'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.sex_check.csv'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/samtools/ts_bcbio-idxstats.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/samtools/ts_bcbio.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/variants/ts_bcbio.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/variants/ts_bcbio_bcftools_stats.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/variants/ts_bcbio_bcftools_stats_germline.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/viral/ts_bcbio-gdc-viral-completeness.txt'
[INFO ] multiqc : Searching '/home/ubuntu/naumenko/sample_b2/work/qc/multiqc/report/metrics/ts_bcbio_bcbio.txt'
[INFO ] bcbio : Found 1 reports
/bcbio/anaconda/lib/python3.6/site-packages/multiqc_bcbio/bcbio.py:77: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details.
add_project_info(yaml.load(target_infos[0]['f']))
[INFO ] samtools : Found 1 idxstats reports
[ERROR ] multiqc : Oops! The 'peddy' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
/home/ubuntu/naumenko/sample_b2/work/qc/ts_bcbio/peddy/ts_bcbio.sex_check.csv
============================================================
Module peddy raised an exception: Traceback (most recent call last):
File "/bcbio/anaconda/lib/python3.6/site-packages/multiqc/modules/peddy/peddy.py", line 58, in __init__
self.peddy_data[s_name].update(parsed_data[s_name])
KeyError: 'ts'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/bcbio/tools/bin/multiqc", line 440, in multiqc
output = mod()
File "/bcbio/anaconda/lib/python3.6/site-packages/multiqc/modules/peddy/peddy.py", line 60, in __init__
self.peddy_data[s_name] = parsed_data[s_name]
KeyError: 'ts'
============================================================
[INFO ] bcftools : Found 1 stats reports
[INFO ] bcftools : Found 1 stats reports
[INFO ] fastqc : Found 1 reports
[INFO ] multiqc : Compressing plot data
[INFO ] multiqc : Report : ../../bcbiotx/tmp1n5rgkh7/multiqc_report.html
[INFO ] multiqc : Data : ../../bcbiotx/tmp1n5rgkh7/multiqc_data
[INFO ] multiqc : MultiQC complete
' returned non-zero exit status 1.
|
KeyError
|
def __init__(self):
# Initialise the parent object
super(MultiqcModule, self).__init__(
name="QUAST",
anchor="quast",
href="http://quast.bioinf.spbau.ru/",
info="is a quality assessment tool for genome assemblies, written by "
"the Center for Algorithmic Biotechnology.",
)
# Get modifiers from config file
qconfig = getattr(config, "quast_config", {})
self.contig_length_multiplier = qconfig.get("contig_length_multiplier", 0.001)
self.contig_length_suffix = qconfig.get("contig_length_suffix", "Kbp")
self.total_length_multiplier = qconfig.get("total_length_multiplier", 0.000001)
self.total_length_suffix = qconfig.get("total_length_suffix", "Mbp")
self.total_number_contigs_multiplier = qconfig.get(
"total_number_contigs_multiplier", 0.001
)
self.total_number_contigs_suffix = qconfig.get("total_number_contigs_suffix", "K")
# Find and load any QUAST reports
self.quast_data = dict()
for f in self.find_log_files("quast"):
self.parse_quast_log(f)
# Filter to strip out ignored sample names
self.quast_data = self.ignore_samples(self.quast_data)
if len(self.quast_data) == 0:
raise UserWarning
log.info("Found {} reports".format(len(self.quast_data)))
# Write parsed report data to a file
self.write_data_file(self.quast_data, "multiqc_quast")
# Basic Stats Table
self.quast_general_stats_table()
# Quast Stats Table
self.add_section(
name="Assembly Statistics", anchor="quast-stats", plot=self.quast_table()
)
# Number of contigs plot
self.add_section(
name="Number of Contigs",
anchor="quast-contigs",
description="""This plot shows the number of contigs found for each assembly, broken
down by length.""",
plot=self.quast_contigs_barplot(),
)
# Number of genes plot
ng_pdata = self.quast_predicted_genes_barplot()
if ng_pdata:
self.add_section(
name="Number of Predicted Genes",
anchor="quast-genes",
description="""This plot shows the number of predicted genes found for each
assembly, broken down by length.""",
plot=ng_pdata,
)
# Number of partial genes plot
ng_pdata = self.quast_predicted_genes_barplot(partial=True)
if ng_pdata:
self.add_section(
name="Number of Partially Predicted Genes",
anchor="quast-partial-genes",
description="""This plot shows the number of partially predicted genes found for each
assembly, broken down by length.""",
plot=ng_pdata,
)
|
def __init__(self):
# Initialise the parent object
super(MultiqcModule, self).__init__(
name="QUAST",
anchor="quast",
href="http://quast.bioinf.spbau.ru/",
info="is a quality assessment tool for genome assemblies, written by "
"the Center for Algorithmic Biotechnology.",
)
# Get modifiers from config file
qconfig = getattr(config, "quast_config", {})
self.contig_length_multiplier = qconfig.get("contig_length_multiplier", 0.001)
self.contig_length_suffix = qconfig.get("contig_length_suffix", "Kbp")
self.total_length_multiplier = qconfig.get("total_length_multiplier", 0.000001)
self.total_length_suffix = qconfig.get("total_length_suffix", "Mbp")
self.total_number_contigs_multiplier = qconfig.get(
"total_number_contigs_multiplier", 0.001
)
self.total_number_contigs_suffix = qconfig.get("total_number_contigs_suffix", "K")
# Find and load any QUAST reports
self.quast_data = dict()
for f in self.find_log_files("quast"):
self.parse_quast_log(f)
# Filter to strip out ignored sample names
self.quast_data = self.ignore_samples(self.quast_data)
if len(self.quast_data) == 0:
raise UserWarning
log.info("Found {} reports".format(len(self.quast_data)))
# Write parsed report data to a file
self.write_data_file(self.quast_data, "multiqc_quast")
# Basic Stats Table
self.quast_general_stats_table()
# Quast Stats Table
self.add_section(
name="Assembly Statistics", anchor="quast-stats", plot=self.quast_table()
)
# Number of contigs plot
self.add_section(
name="Number of Contigs",
anchor="quast-contigs",
description="""This plot shows the number of contigs found for each assembly, broken
down by length.""",
plot=self.quast_contigs_barplot(),
)
# Number of genes plot
ng_pdata = self.quast_predicted_genes_barplot()
if ng_pdata:
self.add_section(
name="Number of Predicted Genes",
anchor="quast-genes",
description="""This plot shows the number of predicted genes found for each
assembly, broken down by length.""",
plot=ng_pdata,
)
|
https://github.com/ewels/MultiQC/issues/954
|
[INFO ] multiqc : This is MultiQC v1.7
[INFO ] multiqc : Template : default
[INFO ] multiqc : Prepending directory to sample names
[INFO ] multiqc : Searching 'outputs'
[INFO ] multiqc : Only using modules quast
[INFO ] quast : Found 16 reports
[ERROR ] multiqc : Oops! The 'quast' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
outputs/NRS6065/quast/report.tsv
============================================================
Module quast raised an exception: Traceback (most recent call last):
File "/homes/jabbott/NSW_assembly/.snakemake/conda/a820f7a0/bin/multiqc", line 440, in multiqc
output = mod()
File "/homes/jabbott/NSW_assembly/.snakemake/conda/a820f7a0/lib/python3.6/site-packages/multiqc/modules/quast/quast.py", line 75, in __init__
ng_pdata = self.quast_predicted_genes_barplot()
File "/homes/jabbott/NSW_assembly/.snakemake/conda/a820f7a0/lib/python3.6/site-packages/multiqc/modules/quast/quast.py", line 305, in quast_predicted_genes_barplot
for _, d in self.quast_data.items()
File "/homes/jabbott/NSW_assembly/.snakemake/conda/a820f7a0/lib/python3.6/site-packages/multiqc/modules/quast/quast.py", line 307, in <listcomp>
if key.startswith(prefix)
ValueError: invalid literal for int() with base 10: '0 bp)_par'
============================================================
|
ValueError
|
def parse_quast_log(self, f):
lines = f["f"].splitlines()
# Pull out the sample names from the first row
s_names = lines[0].split("\t")
# Prepend directory name(s) to sample names as configured
s_names = [self.clean_s_name(s_name, f["root"]) for s_name in s_names]
for s_name in s_names[1:]:
if s_name in self.quast_data:
log.debug("Duplicate sample name found! Overwriting: {}".format(s_name))
self.add_data_source(f, s_name)
self.quast_data[s_name] = dict()
# Parse remaining stats for each sample
for l in lines[1:]:
s = l.split("\t")
k = s[0]
for i, v in enumerate(s[1:]):
s_name = s_names[i + 1]
partials = re.search(r"(\d+) \+ (\d+) part", v)
if partials:
whole = partials.group(1)
partial = partials.group(2)
try:
self.quast_data[s_name][k] = float(whole)
self.quast_data[s_name]["{}_partial".format(k)] = float(partial)
except ValueError:
self.quast_data[s_name][k] = whole
self.quast_data[s_name]["{}_partial".format(k)] = partial
else:
try:
self.quast_data[s_name][k] = float(v)
except ValueError:
self.quast_data[s_name][k] = v
|
def parse_quast_log(self, f):
lines = f["f"].splitlines()
# Pull out the sample names from the first row
s_names = lines[0].split("\t")
# Prepend directory name(s) to sample names as configured
s_names = [self.clean_s_name(s_name, f["root"]) for s_name in s_names]
for s_name in s_names[1:]:
if s_name in self.quast_data:
log.debug("Duplicate sample name found! Overwriting: {}".format(s_name))
self.add_data_source(f, s_name)
self.quast_data[s_name] = dict()
# Parse remaining stats for each sample
for l in lines[1:]:
s = l.split("\t")
k = s[0]
for i, v in enumerate(s[1:]):
s_name = s_names[i + 1]
partials = re.search("(\d+) \+ (\d+) part", v)
if partials:
whole = partials.group(1)
partial = partials.group(2)
try:
self.quast_data[s_name][k] = float(whole)
self.quast_data[s_name]["{}_partial".format(k)] = float(partial)
except ValueError:
self.quast_data[s_name][k] = whole
self.quast_data[s_name]["{}_partial".format(k)] = partial
else:
try:
self.quast_data[s_name][k] = float(v)
except ValueError:
self.quast_data[s_name][k] = v
|
https://github.com/ewels/MultiQC/issues/954
|
[INFO ] multiqc : This is MultiQC v1.7
[INFO ] multiqc : Template : default
[INFO ] multiqc : Prepending directory to sample names
[INFO ] multiqc : Searching 'outputs'
[INFO ] multiqc : Only using modules quast
[INFO ] quast : Found 16 reports
[ERROR ] multiqc : Oops! The 'quast' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
outputs/NRS6065/quast/report.tsv
============================================================
Module quast raised an exception: Traceback (most recent call last):
File "/homes/jabbott/NSW_assembly/.snakemake/conda/a820f7a0/bin/multiqc", line 440, in multiqc
output = mod()
File "/homes/jabbott/NSW_assembly/.snakemake/conda/a820f7a0/lib/python3.6/site-packages/multiqc/modules/quast/quast.py", line 75, in __init__
ng_pdata = self.quast_predicted_genes_barplot()
File "/homes/jabbott/NSW_assembly/.snakemake/conda/a820f7a0/lib/python3.6/site-packages/multiqc/modules/quast/quast.py", line 305, in quast_predicted_genes_barplot
for _, d in self.quast_data.items()
File "/homes/jabbott/NSW_assembly/.snakemake/conda/a820f7a0/lib/python3.6/site-packages/multiqc/modules/quast/quast.py", line 307, in <listcomp>
if key.startswith(prefix)
ValueError: invalid literal for int() with base 10: '0 bp)_par'
============================================================
|
ValueError
|
def quast_contigs_barplot(self):
"""Make a bar plot showing the number and length of contigs for each assembly"""
# Prep the data
data = dict()
categories = []
for s_name, d in self.quast_data.items():
nums_by_t = dict()
for k, v in d.items():
m = re.match(r"# contigs \(>= (\d+) bp\)", k)
if m and v != "-":
nums_by_t[int(m.groups()[0])] = int(v)
tresholds = sorted(nums_by_t.keys(), reverse=True)
p = dict()
cats = []
for i, t in enumerate(tresholds):
if i == 0:
c = ">= " + str(t) + " bp"
cats.append(c)
p[c] = nums_by_t[t]
else:
c = str(t) + "-" + str(tresholds[i - 1]) + " bp"
cats.append(c)
p[c] = nums_by_t[t] - nums_by_t[tresholds[i - 1]]
if not categories:
categories = cats
data[s_name] = p
pconfig = {
"id": "quast_num_contigs",
"title": "QUAST: Number of Contigs",
"ylab": "# Contigs",
"yDecimals": False,
}
return bargraph.plot(data, categories, pconfig)
|
def quast_contigs_barplot(self):
"""Make a bar plot showing the number and length of contigs for each assembly"""
# Prep the data
data = dict()
categories = []
for s_name, d in self.quast_data.items():
nums_by_t = dict()
for k, v in d.items():
m = re.match("# contigs \(>= (\d+) bp\)", k)
if m and v != "-":
nums_by_t[int(m.groups()[0])] = int(v)
tresholds = sorted(nums_by_t.keys(), reverse=True)
p = dict()
cats = []
for i, t in enumerate(tresholds):
if i == 0:
c = ">= " + str(t) + " bp"
cats.append(c)
p[c] = nums_by_t[t]
else:
c = str(t) + "-" + str(tresholds[i - 1]) + " bp"
cats.append(c)
p[c] = nums_by_t[t] - nums_by_t[tresholds[i - 1]]
if not categories:
categories = cats
data[s_name] = p
pconfig = {
"id": "quast_num_contigs",
"title": "QUAST: Number of Contigs",
"ylab": "# Contigs",
"yDecimals": False,
}
return bargraph.plot(data, categories, pconfig)
|
https://github.com/ewels/MultiQC/issues/954
|
[INFO ] multiqc : This is MultiQC v1.7
[INFO ] multiqc : Template : default
[INFO ] multiqc : Prepending directory to sample names
[INFO ] multiqc : Searching 'outputs'
[INFO ] multiqc : Only using modules quast
[INFO ] quast : Found 16 reports
[ERROR ] multiqc : Oops! The 'quast' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
outputs/NRS6065/quast/report.tsv
============================================================
Module quast raised an exception: Traceback (most recent call last):
File "/homes/jabbott/NSW_assembly/.snakemake/conda/a820f7a0/bin/multiqc", line 440, in multiqc
output = mod()
File "/homes/jabbott/NSW_assembly/.snakemake/conda/a820f7a0/lib/python3.6/site-packages/multiqc/modules/quast/quast.py", line 75, in __init__
ng_pdata = self.quast_predicted_genes_barplot()
File "/homes/jabbott/NSW_assembly/.snakemake/conda/a820f7a0/lib/python3.6/site-packages/multiqc/modules/quast/quast.py", line 305, in quast_predicted_genes_barplot
for _, d in self.quast_data.items()
File "/homes/jabbott/NSW_assembly/.snakemake/conda/a820f7a0/lib/python3.6/site-packages/multiqc/modules/quast/quast.py", line 307, in <listcomp>
if key.startswith(prefix)
ValueError: invalid literal for int() with base 10: '0 bp)_par'
============================================================
|
ValueError
|
def quast_predicted_genes_barplot(self, partial=False):
"""
Make a bar plot showing the number and length of predicted genes
for each assembly
"""
# Prep the data
# extract the ranges given to quast with "--gene-thresholds"
# keys look like:
# `# predicted genes (>= 300 bp)`
# `# predicted genes (>= 300 bp)_partial`
pattern = re.compile(
r"# predicted genes \(>= (\d+) bp\)" + ("_partial" if partial else "")
)
data = {}
all_categories = []
data_key = "# predicted genes (>= {} bp)" + ("_partial" if partial else "")
for s_name, d in self.quast_data.items():
thresholds = []
for k in d.keys():
m = re.match(pattern, k)
if m:
thresholds.append(int(m.groups()[0]))
thresholds = sorted(list(set(thresholds)))
if len(thresholds) < 2:
continue
highest_threshold = thresholds[-1]
highest_cat = (
highest_threshold,
">= {} bp".format(highest_threshold),
) # tuple (key-for-sorting, label)
all_categories.append(highest_cat)
plot_data = {highest_cat[1]: d[data_key.format(highest_threshold)]}
# converting >=T1, >=T2,.. into 0-T1, T1-T2,..
for low, high in zip(thresholds[:-1], thresholds[1:]):
cat = (low, "{}-{} bp".format(low, high))
all_categories.append(cat)
plot_data[cat[1]] = d[data_key.format(low)] - d[data_key.format(high)]
try:
assert sum(plot_data.values()) == d[data_key.format(0)]
except AssertionError:
raise UserWarning(
'Predicted gene counts didn\'t add up properly for "{}"'.format(s_name)
)
data[s_name] = plot_data
all_categories = [label for k, label in sorted(list(set(all_categories)))]
if len(all_categories) > 0:
return bargraph.plot(
data,
all_categories,
{
"id": "quast_" + ("partially_" if partial else "") + "predicted_genes",
"title": "QUAST: Number of "
+ ("partially " if partial else "")
+ "predicted genes",
"ylab": "Number of "
+ ("partially " if partial else "")
+ "predicted genes",
},
)
else:
return None
|
def quast_predicted_genes_barplot(self):
"""
Make a bar plot showing the number and length of predicted genes
for each assembly
"""
# Prep the data
# extract the ranges given to quast with "--gene-thresholds"
prefix = "# predicted genes (>= "
suffix = " bp)"
all_thresholds = sorted(
list(
set(
[
int(key[len(prefix) : -len(suffix)])
for _, d in self.quast_data.items()
for key in d.keys()
if key.startswith(prefix)
]
)
)
)
data = {}
ourpat = ">= {}{} bp"
theirpat = prefix + "{}" + suffix
for s_name, d in self.quast_data.items():
thresholds = sorted(
list(
set(
[
int(key[len(prefix) : -len(suffix)])
for _, x in self.quast_data.items()
for key in x.keys()
if key.startswith(prefix)
]
)
)
)
if len(thresholds) < 2:
continue
p = dict()
try:
p = {ourpat.format(thresholds[-1], ""): d[theirpat.format(thresholds[-1])]}
for low, high in zip(thresholds[:-1], thresholds[1:]):
p[ourpat.format(low, -high)] = (
d[theirpat.format(low)] - d[theirpat.format(high)]
)
assert sum(p.values()) == d[theirpat.format(0)]
except AssertionError:
log.warning(
'Predicted gene counts didn\'t add up properly for "{}"'.format(s_name)
)
except KeyError:
log.warning(
'Not all predicted gene thresholds available for "{}"'.format(s_name)
)
data[s_name] = p
cats = [
ourpat.format(low, -high if high else "")
for low, high in zip(all_thresholds, all_thresholds[1:] + [None])
]
if len(cats) > 0:
return bargraph.plot(
data,
cats,
{
"id": "quast_predicted_genes",
"title": "QUAST: Number of predicted genes",
"ylab": "Number of predicted genes",
},
)
else:
return None
|
https://github.com/ewels/MultiQC/issues/954
|
[INFO ] multiqc : This is MultiQC v1.7
[INFO ] multiqc : Template : default
[INFO ] multiqc : Prepending directory to sample names
[INFO ] multiqc : Searching 'outputs'
[INFO ] multiqc : Only using modules quast
[INFO ] quast : Found 16 reports
[ERROR ] multiqc : Oops! The 'quast' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
outputs/NRS6065/quast/report.tsv
============================================================
Module quast raised an exception: Traceback (most recent call last):
File "/homes/jabbott/NSW_assembly/.snakemake/conda/a820f7a0/bin/multiqc", line 440, in multiqc
output = mod()
File "/homes/jabbott/NSW_assembly/.snakemake/conda/a820f7a0/lib/python3.6/site-packages/multiqc/modules/quast/quast.py", line 75, in __init__
ng_pdata = self.quast_predicted_genes_barplot()
File "/homes/jabbott/NSW_assembly/.snakemake/conda/a820f7a0/lib/python3.6/site-packages/multiqc/modules/quast/quast.py", line 305, in quast_predicted_genes_barplot
for _, d in self.quast_data.items()
File "/homes/jabbott/NSW_assembly/.snakemake/conda/a820f7a0/lib/python3.6/site-packages/multiqc/modules/quast/quast.py", line 307, in <listcomp>
if key.startswith(prefix)
ValueError: invalid literal for int() with base 10: '0 bp)_par'
============================================================
|
ValueError
|
def parse_file_as_json(self, myfile):
try:
content = json.loads(myfile["f"])
except ValueError:
log.warn("Could not parse file as json: {}".format(myfile["fn"]))
return
runId = content["RunId"]
if runId not in self.bcl2fastq_data:
self.bcl2fastq_data[runId] = dict()
run_data = self.bcl2fastq_data[runId]
for conversionResult in content.get("ConversionResults", []):
l = conversionResult["LaneNumber"]
lane = "L{}".format(conversionResult["LaneNumber"])
if lane in run_data:
log.debug(
"Duplicate runId/lane combination found! Overwriting: {}".format(
self.prepend_runid(runId, lane)
)
)
run_data[lane] = {
"total": 0,
"total_yield": 0,
"perfectIndex": 0,
"samples": dict(),
"yieldQ30": 0,
"qscore_sum": 0,
}
# simplify the population of dictionaries
rlane = run_data[lane]
# Add undetermined barcodes
try:
unknown_barcode = content["UnknownBarcodes"][l - 1]["Barcodes"]
except IndexError:
unknown_barcode = next(
(
item["Barcodes"]
for item in content["UnknownBarcodes"]
if item["Lane"] == 8
),
None,
)
run_data[lane]["unknown_barcodes"] = unknown_barcode
for demuxResult in conversionResult.get("DemuxResults", []):
if demuxResult["SampleName"] == demuxResult["SampleName"]:
sample = demuxResult["SampleName"]
else:
sample = "{}-{}".format(
demuxResult["SampleId"], demuxResult["SampleName"]
)
if sample in run_data[lane]["samples"]:
log.debug(
"Duplicate runId/lane/sample combination found! Overwriting: {}, {}".format(
self.prepend_runid(runId, lane), sample
)
)
run_data[lane]["samples"][sample] = {
"total": 0,
"total_yield": 0,
"perfectIndex": 0,
"filename": os.path.join(myfile["root"], myfile["fn"]),
"yieldQ30": 0,
"qscore_sum": 0,
}
# simplify the population of dictionnaries
lsample = run_data[lane]["samples"][sample]
for r in range(1, 5):
lsample["R{}_yield".format(r)] = 0
lsample["R{}_Q30".format(r)] = 0
lsample["R{}_trimmed_bases".format(r)] = 0
rlane["total"] += demuxResult["NumberReads"]
rlane["total_yield"] += demuxResult["Yield"]
lsample["total"] += demuxResult["NumberReads"]
lsample["total_yield"] += demuxResult["Yield"]
for indexMetric in demuxResult.get("IndexMetrics", []):
rlane["perfectIndex"] += indexMetric["MismatchCounts"]["0"]
lsample["perfectIndex"] += indexMetric["MismatchCounts"]["0"]
for readMetric in demuxResult.get("ReadMetrics", []):
r = readMetric["ReadNumber"]
rlane["yieldQ30"] += readMetric["YieldQ30"]
rlane["qscore_sum"] += readMetric["QualityScoreSum"]
lsample["yieldQ30"] += readMetric["YieldQ30"]
lsample["qscore_sum"] += readMetric["QualityScoreSum"]
lsample["R{}_yield".format(r)] += readMetric["Yield"]
lsample["R{}_Q30".format(r)] += readMetric["YieldQ30"]
lsample["R{}_trimmed_bases".format(r)] += readMetric["TrimmedBases"]
for r in range(1, 5):
if (
not lsample["R{}_yield".format(r)]
and not lsample["R{}_Q30".format(r)]
and not lsample["R{}_trimmed_bases".format(r)]
):
lsample.pop("R{}_yield".format(r))
lsample.pop("R{}_Q30".format(r))
lsample.pop("R{}_trimmed_bases".format(r))
undeterminedYieldQ30 = 0
undeterminedQscoreSum = 0
undeterminedTrimmedBases = 0
if "Undetermined" in conversionResult:
for readMetric in conversionResult["Undetermined"]["ReadMetrics"]:
undeterminedYieldQ30 += readMetric["YieldQ30"]
undeterminedQscoreSum += readMetric["QualityScoreSum"]
undeterminedTrimmedBases += readMetric["TrimmedBases"]
run_data[lane]["samples"]["undetermined"] = {
"total": conversionResult["Undetermined"]["NumberReads"],
"total_yield": conversionResult["Undetermined"]["Yield"],
"perfectIndex": 0,
"yieldQ30": undeterminedYieldQ30,
"qscore_sum": undeterminedQscoreSum,
"trimmed_bases": undeterminedTrimmedBases,
}
# Calculate Percents and averages
for lane_id, lane in run_data.items():
try:
lane["percent_Q30"] = (
float(lane["yieldQ30"]) / float(lane["total_yield"])
) * 100.0
except ZeroDivisionError:
lane["percent_Q30"] = "NA"
try:
lane["percent_perfectIndex"] = (
float(lane["perfectIndex"]) / float(lane["total"])
) * 100.0
except ZeroDivisionError:
lane["percent_perfectIndex"] = "NA"
try:
lane["mean_qscore"] = float(lane["qscore_sum"]) / float(lane["total_yield"])
except ZeroDivisionError:
lane["mean_qscore"] = "NA"
for sample_id, sample in lane["samples"].items():
try:
sample["percent_Q30"] = (
float(sample["yieldQ30"]) / float(sample["total_yield"])
) * 100.0
except ZeroDivisionError:
sample["percent_Q30"] = "NA"
try:
sample["percent_perfectIndex"] = (
float(sample["perfectIndex"]) / float(sample["total"])
) * 100.0
except ZeroDivisionError:
sample["percent_perfectIndex"] = "NA"
try:
sample["mean_qscore"] = float(sample["qscore_sum"]) / float(
sample["total_yield"]
)
except ZeroDivisionError:
sample["mean_qscore"] = "NA"
|
def parse_file_as_json(self, myfile):
try:
content = json.loads(myfile["f"])
except ValueError:
log.warn("Could not parse file as json: {}".format(myfile["fn"]))
return
runId = content["RunId"]
if runId not in self.bcl2fastq_data:
self.bcl2fastq_data[runId] = dict()
run_data = self.bcl2fastq_data[runId]
for conversionResult in content.get("ConversionResults", []):
l = conversionResult["LaneNumber"]
lane = "L{}".format(conversionResult["LaneNumber"])
if lane in run_data:
log.debug(
"Duplicate runId/lane combination found! Overwriting: {}".format(
self.prepend_runid(runId, lane)
)
)
run_data[lane] = {
"total": 0,
"total_yield": 0,
"perfectIndex": 0,
"samples": dict(),
"yieldQ30": 0,
"qscore_sum": 0,
}
# simplify the population of dictionaries
rlane = run_data[lane]
# Add undetermined barcodes
try:
unknown_barcode = content["UnknownBarcodes"][l - 1]["Barcodes"]
except IndexError:
unknown_barcode = next(
(
item["Barcodes"]
for item in content["UnknownBarcodes"]
if item["Lane"] == 8
),
None,
)
run_data[lane]["unknown_barcodes"] = unknown_barcode
for demuxResult in conversionResult.get("DemuxResults", []):
if demuxResult["SampleName"] == demuxResult["SampleName"]:
sample = demuxResult["SampleName"]
else:
sample = "{}-{}".format(
demuxResult["SampleId"], demuxResult["SampleName"]
)
if sample in run_data[lane]["samples"]:
log.debug(
"Duplicate runId/lane/sample combination found! Overwriting: {}, {}".format(
self.prepend_runid(runId, lane), sample
)
)
run_data[lane]["samples"][sample] = {
"total": 0,
"total_yield": 0,
"perfectIndex": 0,
"filename": os.path.join(myfile["root"], myfile["fn"]),
"yieldQ30": 0,
"qscore_sum": 0,
"R1_yield": 0,
"R2_yield": 0,
"R1_Q30": 0,
"R2_Q30": 0,
"R1_trimmed_bases": 0,
"R2_trimmed_bases": 0,
}
# simplify the population of dictionnaries
lsample = run_data[lane]["samples"][sample]
rlane["total"] += demuxResult["NumberReads"]
rlane["total_yield"] += demuxResult["Yield"]
lsample["total"] += demuxResult["NumberReads"]
lsample["total_yield"] += demuxResult["Yield"]
for indexMetric in demuxResult.get("IndexMetrics", []):
rlane["perfectIndex"] += indexMetric["MismatchCounts"]["0"]
lsample["perfectIndex"] += indexMetric["MismatchCounts"]["0"]
for readMetric in demuxResult.get("ReadMetrics", []):
r = readMetric["ReadNumber"]
rlane["yieldQ30"] += readMetric["YieldQ30"]
rlane["qscore_sum"] += readMetric["QualityScoreSum"]
lsample["yieldQ30"] += readMetric["YieldQ30"]
lsample["qscore_sum"] += readMetric["QualityScoreSum"]
lsample["R{}_yield".format(r)] += readMetric["Yield"]
lsample["R{}_Q30".format(r)] += readMetric["YieldQ30"]
lsample["R{}_trimmed_bases".format(r)] += readMetric["TrimmedBases"]
undeterminedYieldQ30 = 0
undeterminedQscoreSum = 0
undeterminedTrimmedBases = 0
if "Undetermined" in conversionResult:
for readMetric in conversionResult["Undetermined"]["ReadMetrics"]:
undeterminedYieldQ30 += readMetric["YieldQ30"]
undeterminedQscoreSum += readMetric["QualityScoreSum"]
undeterminedTrimmedBases += readMetric["TrimmedBases"]
run_data[lane]["samples"]["undetermined"] = {
"total": conversionResult["Undetermined"]["NumberReads"],
"total_yield": conversionResult["Undetermined"]["Yield"],
"perfectIndex": 0,
"yieldQ30": undeterminedYieldQ30,
"qscore_sum": undeterminedQscoreSum,
"trimmed_bases": undeterminedTrimmedBases,
}
# Calculate Percents and averages
for lane_id, lane in run_data.items():
try:
lane["percent_Q30"] = (
float(lane["yieldQ30"]) / float(lane["total_yield"])
) * 100.0
except ZeroDivisionError:
lane["percent_Q30"] = "NA"
try:
lane["percent_perfectIndex"] = (
float(lane["perfectIndex"]) / float(lane["total"])
) * 100.0
except ZeroDivisionError:
lane["percent_perfectIndex"] = "NA"
try:
lane["mean_qscore"] = float(lane["qscore_sum"]) / float(lane["total_yield"])
except ZeroDivisionError:
lane["mean_qscore"] = "NA"
for sample_id, sample in lane["samples"].items():
try:
sample["percent_Q30"] = (
float(sample["yieldQ30"]) / float(sample["total_yield"])
) * 100.0
except ZeroDivisionError:
sample["percent_Q30"] = "NA"
try:
sample["percent_perfectIndex"] = (
float(sample["perfectIndex"]) / float(sample["total"])
) * 100.0
except ZeroDivisionError:
sample["percent_perfectIndex"] = "NA"
try:
sample["mean_qscore"] = float(sample["qscore_sum"]) / float(
sample["total_yield"]
)
except ZeroDivisionError:
sample["mean_qscore"] = "NA"
|
https://github.com/ewels/MultiQC/issues/907
|
[ERROR ] multiqc : Oops! The 'bcl2fastq' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
NB500904_0187/fastq/stats/Stats.json
============================================================
Module bcl2fastq raised an exception: Traceback (most recent call last):
File "/software/UHTS/Analysis/MultiQC/1.7/bin/multiqc", line 440, in multiqc
output = mod()
File "/software/UHTS/Analysis/MultiQC/1.7/lib/python2.7/site-packages/multiqc/modules/bcl2fastq/bcl2fastq.py", line 29, in __init__
self.parse_file_as_json(myfile)
File "/software/UHTS/Analysis/MultiQC/1.7/lib/python2.7/site-packages/multiqc/modules/bcl2fastq/bcl2fastq.py", line 215, in parse_file_as_json
lsample["R{}_yield".format(r)] += readMetric["Yield"]
KeyError: 'R3_yield'
|
KeyError
|
def parse_fastp_log(self, f):
"""Parse the JSON output from fastp and save the summary statistics"""
try:
parsed_json = json.load(f["f"])
except:
log.warn("Could not parse fastp JSON: '{}'".format(f["fn"]))
return None
# Fetch a sample name from the command
s_name = f["s_name"]
cmd = parsed_json["command"].split()
for i, v in enumerate(cmd):
if v == "-i":
s_name = self.clean_s_name(cmd[i + 1], f["root"])
if s_name == "fastp":
log.warn("Could not parse sample name from fastp command: {}".format(f["fn"]))
self.add_data_source(f, s_name)
self.fastp_data[s_name] = {}
self.fastp_duplication_plotdata[s_name] = {}
self.fastp_insert_size_data[s_name] = {}
self.fastp_all_data[s_name] = parsed_json
for k in [
"read1_before_filtering",
"read2_before_filtering",
"read1_after_filtering",
"read2_after_filtering",
]:
self.fastp_qual_plotdata[k][s_name] = {}
self.fastp_gc_content_data[k][s_name] = {}
self.fastp_n_content_data[k][s_name] = {}
# Parse filtering_result
try:
for k in parsed_json["filtering_result"]:
self.fastp_data[s_name]["filtering_result_{}".format(k)] = float(
parsed_json["filtering_result"][k]
)
except KeyError:
log.debug(
"fastp JSON did not have 'filtering_result' key: '{}'".format(f["fn"])
)
# Parse duplication
try:
self.fastp_data[s_name]["pct_duplication"] = float(
parsed_json["duplication"]["rate"] * 100.0
)
except KeyError:
log.debug("fastp JSON did not have a 'duplication' key: '{}'".format(f["fn"]))
# Parse after_filtering
try:
for k in parsed_json["summary"]["after_filtering"]:
self.fastp_data[s_name]["after_filtering_{}".format(k)] = float(
parsed_json["summary"]["after_filtering"][k]
)
except KeyError:
log.debug(
"fastp JSON did not have a 'summary'-'after_filtering' keys: '{}'".format(
f["fn"]
)
)
# Parse data required to calculate Pct reads surviving
try:
self.fastp_data[s_name]["before_filtering_total_reads"] = float(
parsed_json["summary"]["before_filtering"]["total_reads"]
)
except KeyError:
log.debug("Could not find pre-filtering # reads: '{}'".format(f["fn"]))
try:
self.fastp_data[s_name]["pct_surviving"] = (
self.fastp_data[s_name]["after_filtering_total_reads"]
/ self.fastp_data[s_name]["before_filtering_total_reads"]
) * 100.0
except KeyError:
log.debug("Could not calculate 'pct_surviving': {}".format(f["fn"]))
# Parse adapter_cutting
try:
for k in parsed_json["adapter_cutting"]:
try:
self.fastp_data[s_name]["adapter_cutting_{}".format(k)] = float(
parsed_json["adapter_cutting"][k]
)
except (ValueError, TypeError):
pass
except KeyError:
log.debug(
"fastp JSON did not have a 'adapter_cutting' key, skipping: '{}'".format(
f["fn"]
)
)
try:
self.fastp_data[s_name]["pct_adapter"] = (
self.fastp_data[s_name]["adapter_cutting_adapter_trimmed_reads"]
/ self.fastp_data[s_name]["before_filtering_total_reads"]
) * 100.0
except KeyError:
log.debug("Could not calculate 'pct_adapter': {}".format(f["fn"]))
# Duplication rate plot data
try:
# First count the total read count in the dup analysis
total_reads = 0
for v in parsed_json["duplication"]["histogram"]:
total_reads += v
if total_reads == 0:
raise KeyError
# Calculate percentages
for i, v in enumerate(parsed_json["duplication"]["histogram"]):
self.fastp_duplication_plotdata[s_name][i + 1] = (
float(v) / float(total_reads)
) * 100.0
except KeyError:
log.debug("No duplication rate plot data: {}".format(f["fn"]))
# Insert size plot data
try:
# First count the total read count in the insert size analysis
total_reads = 0
max_i = 0
for i, v in enumerate(parsed_json["insert_size"]["histogram"]):
total_reads += v
if float(v) > 0:
max_i = i
if total_reads == 0:
raise KeyError
# Calculate percentages
for i, v in enumerate(parsed_json["insert_size"]["histogram"]):
if i <= max_i:
self.fastp_insert_size_data[s_name][i + 1] = (
float(v) / float(total_reads)
) * 100.0
except KeyError:
log.debug("No insert size plot data: {}".format(f["fn"]))
for k in [
"read1_before_filtering",
"read2_before_filtering",
"read1_after_filtering",
"read2_after_filtering",
]:
# Read quality data
try:
for i, v in enumerate(parsed_json[k]["quality_curves"]["mean"]):
self.fastp_qual_plotdata[k][s_name][i + 1] = float(v)
except KeyError:
log.debug("Read quality {} not found: {}".format(k, f["fn"]))
# GC and N content plots
try:
for i, v in enumerate(parsed_json[k]["content_curves"]["GC"]):
self.fastp_gc_content_data[k][s_name][i + 1] = float(v) * 100.0
for i, v in enumerate(parsed_json[k]["content_curves"]["N"]):
self.fastp_n_content_data[k][s_name][i + 1] = float(v) * 100.0
except KeyError:
log.debug("Content curve data {} not found: {}".format(k, f["fn"]))
# Remove empty dicts
if len(self.fastp_data[s_name]) == 0:
del self.fastp_data[s_name]
if len(self.fastp_duplication_plotdata[s_name]) == 0:
del self.fastp_duplication_plotdata[s_name]
if len(self.fastp_insert_size_data[s_name]) == 0:
del self.fastp_insert_size_data[s_name]
if len(self.fastp_all_data[s_name]) == 0:
del self.fastp_all_data[s_name]
|
def parse_fastp_log(self, f):
"""Parse the JSON output from fastp and save the summary statistics"""
try:
parsed_json = json.load(f["f"])
except:
log.warn("Could not parse fastp JSON: '{}'".format(f["fn"]))
return None
# Fetch a sample name from the command
s_name = f["s_name"]
cmd = parsed_json["command"].split()
for i, v in enumerate(cmd):
if v == "-i":
s_name = self.clean_s_name(cmd[i + 1], f["root"])
if s_name == "fastp":
log.warn("Could not parse sample name from fastp command: {}".format(f["fn"]))
self.add_data_source(f, s_name)
self.fastp_data[s_name] = {}
self.fastp_duplication_plotdata[s_name] = {}
self.fastp_insert_size_data[s_name] = {}
self.fastp_all_data[s_name] = parsed_json
for k in [
"read1_before_filtering",
"read2_before_filtering",
"read1_after_filtering",
"read2_after_filtering",
]:
self.fastp_qual_plotdata[k][s_name] = {}
self.fastp_gc_content_data[k][s_name] = {}
self.fastp_n_content_data[k][s_name] = {}
# Parse filtering_result
try:
for k in parsed_json["filtering_result"]:
self.fastp_data[s_name]["filtering_result_{}".format(k)] = float(
parsed_json["filtering_result"][k]
)
except KeyError:
log.debug(
"fastp JSON did not have 'filtering_result' key: '{}'".format(f["fn"])
)
# Parse duplication
try:
self.fastp_data[s_name]["pct_duplication"] = float(
parsed_json["duplication"]["rate"] * 100.0
)
except KeyError:
log.debug("fastp JSON did not have a 'duplication' key: '{}'".format(f["fn"]))
# Parse after_filtering
try:
for k in parsed_json["summary"]["after_filtering"]:
self.fastp_data[s_name]["after_filtering_{}".format(k)] = float(
parsed_json["summary"]["after_filtering"][k]
)
except KeyError:
log.debug(
"fastp JSON did not have a 'summary'-'after_filtering' keys: '{}'".format(
f["fn"]
)
)
# Parse data required to calculate Pct reads surviving
try:
self.fastp_data[s_name]["before_filtering_total_reads"] = float(
parsed_json["summary"]["before_filtering"]["total_reads"]
)
except KeyError:
log.debug("Could not find pre-filtering # reads: '{}'".format(f["fn"]))
try:
self.fastp_data[s_name]["pct_surviving"] = (
self.fastp_data[s_name]["after_filtering_total_reads"]
/ self.fastp_data[s_name]["before_filtering_total_reads"]
) * 100.0
except KeyError:
log.debug("Could not calculate 'pct_surviving': {}".format(f["fn"]))
# Parse adapter_cutting
try:
for k in parsed_json["adapter_cutting"]:
try:
self.fastp_data[s_name]["adapter_cutting_{}".format(k)] = float(
parsed_json["adapter_cutting"][k]
)
except (ValueError, TypeError):
pass
except KeyError:
log.debug(
"fastp JSON did not have a 'adapter_cutting' key, skipping: '{}'".format(
f["fn"]
)
)
try:
self.fastp_data[s_name]["pct_adapter"] = (
self.fastp_data[s_name]["adapter_cutting_adapter_trimmed_reads"]
/ self.fastp_data[s_name]["before_filtering_total_reads"]
) * 100.0
except KeyError:
log.debug("Could not calculate 'pct_adapter': {}".format(f["fn"]))
# Duplication rate plot data
try:
# First count the total read count in the dup analysis
total_reads = 0
for v in parsed_json["duplication"]["histogram"]:
total_reads += v
# Calculate percentages
for i, v in enumerate(parsed_json["duplication"]["histogram"]):
self.fastp_duplication_plotdata[s_name][i + 1] = (
float(v) / float(total_reads)
) * 100.0
except KeyError:
log.debug("No duplication rate plot data: {}".format(f["fn"]))
# Insert size plot data
try:
# First count the total read count in the insert size analysis
total_reads = 0
max_i = 0
for i, v in enumerate(parsed_json["insert_size"]["histogram"]):
total_reads += v
if float(v) > 0:
max_i = i
# Calculate percentages
for i, v in enumerate(parsed_json["insert_size"]["histogram"]):
if i <= max_i:
self.fastp_insert_size_data[s_name][i + 1] = (
float(v) / float(total_reads)
) * 100.0
except KeyError:
log.debug("No insert size plot data: {}".format(f["fn"]))
for k in [
"read1_before_filtering",
"read2_before_filtering",
"read1_after_filtering",
"read2_after_filtering",
]:
# Read quality data
try:
for i, v in enumerate(parsed_json[k]["quality_curves"]["mean"]):
self.fastp_qual_plotdata[k][s_name][i + 1] = float(v)
except KeyError:
log.debug("Read quality {} not found: {}".format(k, f["fn"]))
# GC and N content plots
try:
for i, v in enumerate(parsed_json[k]["content_curves"]["GC"]):
self.fastp_gc_content_data[k][s_name][i + 1] = float(v) * 100.0
for i, v in enumerate(parsed_json[k]["content_curves"]["N"]):
self.fastp_n_content_data[k][s_name][i + 1] = float(v) * 100.0
except KeyError:
log.debug("Content curve data {} not found: {}".format(k, f["fn"]))
# Remove empty dicts
if len(self.fastp_data[s_name]) == 0:
del self.fastp_data[s_name]
if len(self.fastp_duplication_plotdata[s_name]) == 0:
del self.fastp_duplication_plotdata[s_name]
if len(self.fastp_insert_size_data[s_name]) == 0:
del self.fastp_insert_size_data[s_name]
if len(self.fastp_all_data[s_name]) == 0:
del self.fastp_all_data[s_name]
|
https://github.com/ewels/MultiQC/issues/845
|
lucas-maciel@lucasmaciel-Inspiron-7472:~/QC_fastq/fastp$ multiqc .
[INFO ] multiqc : This is MultiQC v1.7.dev0
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching '.'
[ERROR ] multiqc : Oops! The 'fastp' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
./ERR1702979_fastp.json
============================================================
Module fastp raised an exception: Traceback (most recent call last):
File "/home/lucas-maciel/.local/bin/multiqc", line 440, in multiqc
output = mod()
File "/home/lucas-maciel/.local/lib/python2.7/site-packages/multiqc/modules/fastp/fastp.py", line 43, in __init__
self.parse_fastp_log(f)
File "/home/lucas-maciel/.local/lib/python2.7/site-packages/multiqc/modules/fastp/fastp.py", line 237, in parse_fastp_log
self.fastp_insert_size_data[s_name][i+1] = (float(v) / float(total_reads)) * 100.0
ZeroDivisionError: float division by zero
============================================================
[WARNING] multiqc : No analysis results found. Cleaning up..
[INFO ] multiqc : MultiQC complete
|
ZeroDivisionError
|
def parse_samtools_idxstats(self):
"""Find Samtools idxstats logs and parse their data"""
self.samtools_idxstats = dict()
for f in self.find_log_files("samtools/idxstats"):
parsed_data = parse_single_report(f["f"])
if len(parsed_data) > 0:
if f["s_name"] in self.samtools_idxstats:
log.debug(
"Duplicate sample name found! Overwriting: {}".format(f["s_name"])
)
self.add_data_source(f, section="idxstats")
self.samtools_idxstats[f["s_name"]] = parsed_data
# Filter to strip out ignored sample names
self.samtools_idxstats = self.ignore_samples(self.samtools_idxstats)
if len(self.samtools_idxstats) > 0:
# Write parsed report data to a file (restructure first)
self.write_data_file(self.samtools_idxstats, "multiqc_samtools_idxstats")
# Prep the data for the plots
keys = list()
pdata = dict()
pdata_norm = dict()
xy_counts = dict()
# Count the total mapped reads for every chromosome
chrs_mapped = defaultdict(lambda: 0)
sample_mapped = defaultdict(lambda: 0)
total_mapped = 0
# Cutoff, can be customised in config
cutoff = float(getattr(config, "samtools_idxstats_fraction_cutoff", 0.001))
if cutoff != 0.001:
log.info("Setting idxstats cutoff to: {}%".format(cutoff * 100.0))
for s_name in self.samtools_idxstats:
for chrom in self.samtools_idxstats[s_name]:
chrs_mapped[chrom] += self.samtools_idxstats[s_name][chrom]
sample_mapped[s_name] += self.samtools_idxstats[s_name][chrom]
total_mapped += self.samtools_idxstats[s_name][chrom]
req_reads = float(total_mapped) * cutoff
chr_always = getattr(config, "samtools_idxstats_always", [])
if len(chr_always) > 0:
log.info(
"Trying to include these chromosomes in idxstats: {}".format(
", ".join(chr_always)
)
)
chr_ignore = getattr(config, "samtools_idxstats_ignore", [])
if len(chr_ignore) > 0:
log.info(
"Excluding these chromosomes from idxstats: {}".format(
", ".join(chr_ignore)
)
)
xchr = getattr(config, "samtools_idxstats_xchr", False)
if xchr:
log.info('Using "{}" as X chromosome name'.format(xchr))
ychr = getattr(config, "samtools_idxstats_ychr", False)
if ychr:
log.info('Using "{}" as Y chromosome name'.format(ychr))
# Go through again and collect all of the keys that have enough counts
# Also get the X/Y counts if we find them
for s_name in self.samtools_idxstats:
x_count = False
y_count = False
for chrom in self.samtools_idxstats[s_name]:
if float(chrs_mapped[chrom]) > req_reads or chrom in chr_always:
if chrom not in chr_ignore and chrom not in keys:
keys.append(chrom)
# Collect X and Y counts if we have them
mapped = self.samtools_idxstats[s_name][chrom]
if xchr is not False:
if str(xchr) == str(chrom):
x_count = mapped
else:
if chrom.lower() == "x" or chrom.lower() == "chrx":
x_count = mapped
if ychr is not False:
if str(ychr) == str(chrom):
y_count = mapped
else:
if chrom.lower() == "y" or chrom.lower() == "chry":
y_count = mapped
# Only save these counts if we have both x and y
if x_count and y_count:
xy_counts[s_name] = {"x": x_count, "y": y_count}
# Ok, one last time. We have the chromosomes that we want to plot,
# now collect the counts
for s_name in self.samtools_idxstats:
pdata[s_name] = OrderedDict()
pdata_norm[s_name] = OrderedDict()
for k in keys:
try:
pdata[s_name][k] = self.samtools_idxstats[s_name][k]
pdata_norm[s_name][k] = (
float(self.samtools_idxstats[s_name][k]) / sample_mapped[s_name]
)
except (KeyError, ZeroDivisionError):
pdata[s_name][k] = 0
pdata_norm[s_name][k] = 0
# X/Y ratio plot
if len(xy_counts) > 0:
xy_keys = OrderedDict()
xy_keys["x"] = {"name": xchr if xchr else "Chromosome X"}
xy_keys["y"] = {"name": ychr if ychr else "Chromosome Y"}
pconfig = {
"id": "samtools-idxstats-xy-plot",
"title": "Samtools idxstats: chrXY mapped reads",
"ylab": "Percent of X+Y Reads",
"cpswitch_counts_label": "Number of Reads",
"cpswitch_percent_label": "Percent of X+Y Reads",
"cpswitch_c_active": False,
}
self.add_section(
name="XY counts",
anchor="samtools-idxstats-xy-counts",
plot=bargraph.plot(xy_counts, xy_keys, pconfig),
)
# Mapped reads per chr line plot
pconfig = {
"id": "samtools-idxstats-mapped-reads-plot",
"title": "Samtools idxstats: Mapped reads per contig",
"ylab": "# mapped reads",
"xlab": "Chromosome Name",
"categories": True,
"tt_label": "<strong>{point.category}:</strong> {point.y:.2f}",
"data_labels": [
{"name": "Normalised Counts", "ylab": "Fraction of total count"},
{"name": "Counts", "ylab": "# mapped reads"},
],
}
self.add_section(
name="Mapped reads per contig",
anchor="samtools-idxstats",
description="The <code>samtools idxstats</code> tool counts the number of mapped reads per chromosome / contig. "
+ "Chromosomes with < {}% of the total aligned reads are omitted from this plot.".format(
cutoff * 100
),
plot=linegraph.plot([pdata_norm, pdata], pconfig),
)
# Return the number of logs that were found
return len(self.samtools_idxstats)
|
def parse_samtools_idxstats(self):
"""Find Samtools idxstats logs and parse their data"""
self.samtools_idxstats = dict()
for f in self.find_log_files("samtools/idxstats"):
parsed_data = parse_single_report(f["f"])
if len(parsed_data) > 0:
if f["s_name"] in self.samtools_idxstats:
log.debug(
"Duplicate sample name found! Overwriting: {}".format(f["s_name"])
)
self.add_data_source(f, section="idxstats")
self.samtools_idxstats[f["s_name"]] = parsed_data
# Filter to strip out ignored sample names
self.samtools_idxstats = self.ignore_samples(self.samtools_idxstats)
if len(self.samtools_idxstats) > 0:
# Write parsed report data to a file (restructure first)
self.write_data_file(self.samtools_idxstats, "multiqc_samtools_idxstats")
# Prep the data for the plots
keys = list()
pdata = dict()
pdata_norm = dict()
xy_counts = dict()
# Count the total mapped reads for every chromosome
chrs_mapped = defaultdict(lambda: 0)
sample_mapped = defaultdict(lambda: 0)
total_mapped = 0
# Cutoff, can be customised in config
cutoff = float(getattr(config, "samtools_idxstats_fraction_cutoff", 0.001))
if cutoff != 0.001:
log.info("Setting idxstats cutoff to: {}%".format(cutoff * 100.0))
for s_name in self.samtools_idxstats:
for chrom in self.samtools_idxstats[s_name]:
chrs_mapped[chrom] += self.samtools_idxstats[s_name][chrom]
sample_mapped[s_name] += self.samtools_idxstats[s_name][chrom]
total_mapped += self.samtools_idxstats[s_name][chrom]
req_reads = float(total_mapped) * cutoff
chr_always = getattr(config, "samtools_idxstats_always", [])
if len(chr_always) > 0:
log.info(
"Trying to include these chromosomes in idxstats: {}".format(
", ".join(chr_always)
)
)
chr_ignore = getattr(config, "samtools_idxstats_ignore", [])
if len(chr_ignore) > 0:
log.info(
"Excluding these chromosomes from idxstats: {}".format(
", ".join(chr_ignore)
)
)
xchr = getattr(config, "samtools_idxstats_xchr", False)
if xchr:
log.info('Using "{}" as X chromosome name'.format(xchr))
ychr = getattr(config, "samtools_idxstats_ychr", False)
if ychr:
log.info('Using "{}" as Y chromosome name'.format(ychr))
# Go through again and collect all of the keys that have enough counts
# Also get the X/Y counts if we find them
for s_name in self.samtools_idxstats:
x_count = False
y_count = False
for chrom in self.samtools_idxstats[s_name]:
if float(chrs_mapped[chrom]) > req_reads or chrom in chr_always:
if chrom not in chr_ignore and chrom not in keys:
keys.append(chrom)
# Collect X and Y counts if we have them
mapped = self.samtools_idxstats[s_name][chrom]
if xchr is not False:
if str(xchr) == str(chrom):
x_count = mapped
else:
if chrom.lower() == "x" or chrom.lower() == "chrx":
x_count = mapped
if ychr is not False:
if str(ychr) == str(chrom):
y_count = mapped
else:
if chrom.lower() == "y" or chrom.lower() == "chry":
y_count = mapped
# Only save these counts if we have both x and y
if x_count and y_count:
xy_counts[s_name] = {"x": x_count, "y": y_count}
# Ok, one last time. We have the chromosomes that we want to plot,
# now collect the counts
for s_name in self.samtools_idxstats:
pdata[s_name] = OrderedDict()
pdata_norm[s_name] = OrderedDict()
for k in keys:
try:
pdata[s_name][k] = self.samtools_idxstats[s_name][k]
pdata_norm[s_name][k] = (
float(self.samtools_idxstats[s_name][k]) / sample_mapped[s_name]
)
except KeyError:
pdata[s_name][k] = 0
pdata_norm[s_name][k] = 0
# X/Y ratio plot
if len(xy_counts) > 0:
xy_keys = OrderedDict()
xy_keys["x"] = {"name": xchr if xchr else "Chromosome X"}
xy_keys["y"] = {"name": ychr if ychr else "Chromosome Y"}
pconfig = {
"id": "samtools-idxstats-xy-plot",
"title": "Samtools idxstats: chrXY mapped reads",
"ylab": "Percent of X+Y Reads",
"cpswitch_counts_label": "Number of Reads",
"cpswitch_percent_label": "Percent of X+Y Reads",
"cpswitch_c_active": False,
}
self.add_section(
name="XY counts",
anchor="samtools-idxstats-xy-counts",
plot=bargraph.plot(xy_counts, xy_keys, pconfig),
)
# Mapped reads per chr line plot
pconfig = {
"id": "samtools-idxstats-mapped-reads-plot",
"title": "Samtools idxstats: Mapped reads per contig",
"ylab": "# mapped reads",
"xlab": "Chromosome Name",
"categories": True,
"tt_label": "<strong>{point.category}:</strong> {point.y:.2f}",
"data_labels": [
{"name": "Normalised Counts", "ylab": "Fraction of total count"},
{"name": "Counts", "ylab": "# mapped reads"},
],
}
self.add_section(
name="Mapped reads per contig",
anchor="samtools-idxstats",
description="The <code>samtools idxstats</code> tool counts the number of mapped reads per chromosome / contig. "
+ "Chromosomes with < {}% of the total aligned reads are omitted from this plot.".format(
cutoff * 100
),
plot=linegraph.plot([pdata_norm, pdata], pconfig),
)
# Return the number of logs that were found
return len(self.samtools_idxstats)
|
https://github.com/ewels/MultiQC/issues/680
|
#original
multiqc --force ./
[INFO ] multiqc : This is MultiQC v1.0
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching './'
[INFO ] snpeff : Found 28 reports
[WARNING] bargraph : Tried to make bar plot, but had no data
[INFO ] picard : Found 13 BaseDistributionByCycleMetrics reports
[INFO ] picard : Found 13 GcBiasMetrics reports
[INFO ] picard : Found 14 HsMetrics reports
[INFO ] picard : Found 14 AlignmentSummaryMetrics reports
[INFO ] samtools : Found 28 stats reports
[INFO ] samtools : Found 14 flagstat reports
[ERROR ] multiqc : Oops! The 'samtools' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
(if possible, include a log file that triggers the error)
============================================================
Module samtools raised an exception: Traceback (most recent call last):
File "/apps/software/multiqc/1.0-foss-2015b-Python-2.7.11/lib/python2.7/site-packages/multiqc-1.0-py2.7.egg/EGG-INFO/scripts/multiqc", line 373, in multiqc
output = mod()
File "/apps/software/multiqc/1.0-foss-2015b-Python-2.7.11/lib/python2.7/site-packages/multiqc-1.0-py2.7.egg/multiqc/modules/samtools/samtools.py", line 50, in __init__
n['idxstats'] = self.parse_samtools_idxstats()
File "/apps/software/multiqc/1.0-foss-2015b-Python-2.7.11/lib/python2.7/site-packages/multiqc-1.0-py2.7.egg/multiqc/modules/samtools/idxstats.py", line 101, in parse_samtools_idxstats
pdata_norm[s_name][k] = float(self.samtools_idxstats[s_name][k]) / sample_mapped[s_name]
ZeroDivisionError: float division by zero
============================================================
[WARNING] fastqc : Couldn't read 'some_fastqc.zip' - Bad zip file
[INFO ] fastqc : Found 27 reports
[INFO ] multiqc : Compressing plot data
[INFO ] multiqc : Report : multiqc_report.html
[INFO ] multiqc : Data : multiqc_data
[INFO ] multiqc : MultiQC complete
|
ZeroDivisionError
|
def split_data_by_lane_and_sample(self):
for runId in self.bcl2fastq_data.keys():
for lane in self.bcl2fastq_data[runId].keys():
uniqLaneName = self.prepend_runid(runId, lane)
self.bcl2fastq_bylane[uniqLaneName] = {
"total": self.bcl2fastq_data[runId][lane]["total"],
"total_yield": self.bcl2fastq_data[runId][lane]["total_yield"],
"perfectIndex": self.bcl2fastq_data[runId][lane]["perfectIndex"],
"undetermined": self.bcl2fastq_data[runId][lane]["samples"]
.get("undetermined", {})
.get("total", "NA"),
"yieldQ30": self.bcl2fastq_data[runId][lane]["yieldQ30"],
"qscore_sum": self.bcl2fastq_data[runId][lane]["qscore_sum"],
"percent_Q30": self.bcl2fastq_data[runId][lane]["percent_Q30"],
"percent_perfectIndex": self.bcl2fastq_data[runId][lane][
"percent_perfectIndex"
],
"mean_qscore": self.bcl2fastq_data[runId][lane]["mean_qscore"],
}
for sample in self.bcl2fastq_data[runId][lane]["samples"].keys():
if not sample in self.bcl2fastq_bysample:
self.bcl2fastq_bysample[sample] = {
"total": 0,
"total_yield": 0,
"perfectIndex": 0,
"yieldQ30": 0,
"qscore_sum": 0,
"trimmed_bases": 0,
}
if not sample in self.bcl2fastq_bysample_lane:
self.bcl2fastq_bysample_lane[sample] = dict()
self.bcl2fastq_bysample_lane[sample][lane] = self.bcl2fastq_data[runId][
lane
]["samples"][sample]["total"]
self.bcl2fastq_bysample[sample]["total"] += self.bcl2fastq_data[runId][
lane
]["samples"][sample]["total"]
self.bcl2fastq_bysample[sample]["total_yield"] += self.bcl2fastq_data[
runId
][lane]["samples"][sample]["total_yield"]
self.bcl2fastq_bysample[sample]["perfectIndex"] += self.bcl2fastq_data[
runId
][lane]["samples"][sample]["perfectIndex"]
self.bcl2fastq_bysample[sample]["yieldQ30"] += self.bcl2fastq_data[
runId
][lane]["samples"][sample]["yieldQ30"]
self.bcl2fastq_bysample[sample]["qscore_sum"] += self.bcl2fastq_data[
runId
][lane]["samples"][sample]["qscore_sum"]
self.bcl2fastq_bysample[sample]["trimmed_bases"] += self.bcl2fastq_data[
runId
][lane]["samples"][sample]["trimmed_bases"]
try:
self.bcl2fastq_bysample[sample]["percent_Q30"] = (
float(self.bcl2fastq_bysample[sample]["yieldQ30"])
/ float(self.bcl2fastq_bysample[sample]["total_yield"])
) * 100.0
except ZeroDivisionError:
self.bcl2fastq_bysample[sample]["percent_Q30"] = "NA"
try:
self.bcl2fastq_bysample[sample]["percent_perfectIndex"] = (
float(self.bcl2fastq_bysample[sample]["perfectIndex"])
/ float(self.bcl2fastq_bysample[sample]["total"])
) * 100.0
except ZeroDivisionError:
self.bcl2fastq_bysample[sample]["percent_perfectIndex"] = "NA"
try:
self.bcl2fastq_bysample[sample]["mean_qscore"] = float(
self.bcl2fastq_bysample[sample]["qscore_sum"]
) / float(self.bcl2fastq_bysample[sample]["total_yield"])
except ZeroDivisionError:
self.bcl2fastq_bysample[sample]["mean_qscore"] = "NA"
try:
self.bcl2fastq_bysample[sample]["percent_trimmed"] = (
float(self.bcl2fastq_bysample[sample]["trimmed_bases"])
/ float(self.bcl2fastq_bysample[sample]["total_yield"])
* 100.0
)
except ZeroDivisionError:
self.bcl2fastq_bysample[sample]["percent_trimmed"] = "NA"
if sample != "undetermined":
if not sample in self.source_files:
self.source_files[sample] = []
self.source_files[sample].append(
self.bcl2fastq_data[runId][lane]["samples"][sample]["filename"]
)
|
def split_data_by_lane_and_sample(self):
for runId in self.bcl2fastq_data.keys():
for lane in self.bcl2fastq_data[runId].keys():
uniqLaneName = self.prepend_runid(runId, lane)
self.bcl2fastq_bylane[uniqLaneName] = {
"total": self.bcl2fastq_data[runId][lane]["total"],
"total_yield": self.bcl2fastq_data[runId][lane]["total_yield"],
"perfectIndex": self.bcl2fastq_data[runId][lane]["perfectIndex"],
"undetermined": self.bcl2fastq_data[runId][lane]["samples"]
.get("undetermined", {})
.get("total", "NA"),
"yieldQ30": self.bcl2fastq_data[runId][lane]["yieldQ30"],
"qscore_sum": self.bcl2fastq_data[runId][lane]["qscore_sum"],
"percent_Q30": self.bcl2fastq_data[runId][lane]["percent_Q30"],
"percent_perfectIndex": self.bcl2fastq_data[runId][lane][
"percent_perfectIndex"
],
"mean_qscore": self.bcl2fastq_data[runId][lane]["mean_qscore"],
}
for sample in self.bcl2fastq_data[runId][lane]["samples"].keys():
if not sample in self.bcl2fastq_bysample:
self.bcl2fastq_bysample[sample] = {
"total": 0,
"total_yield": 0,
"perfectIndex": 0,
"yieldQ30": 0,
"qscore_sum": 0,
"trimmed_bases": 0,
}
if not sample in self.bcl2fastq_bysample_lane:
self.bcl2fastq_bysample_lane[sample] = dict()
self.bcl2fastq_bysample_lane[sample][lane] = self.bcl2fastq_data[runId][
lane
]["samples"][sample]["total"]
self.bcl2fastq_bysample[sample]["total"] += self.bcl2fastq_data[runId][
lane
]["samples"][sample]["total"]
self.bcl2fastq_bysample[sample]["total_yield"] += self.bcl2fastq_data[
runId
][lane]["samples"][sample]["total_yield"]
self.bcl2fastq_bysample[sample]["perfectIndex"] += self.bcl2fastq_data[
runId
][lane]["samples"][sample]["perfectIndex"]
self.bcl2fastq_bysample[sample]["yieldQ30"] += self.bcl2fastq_data[
runId
][lane]["samples"][sample]["yieldQ30"]
self.bcl2fastq_bysample[sample]["qscore_sum"] += self.bcl2fastq_data[
runId
][lane]["samples"][sample]["qscore_sum"]
self.bcl2fastq_bysample[sample]["trimmed_bases"] += self.bcl2fastq_data[
runId
][lane]["samples"][sample]["trimmed_bases"]
try:
self.bcl2fastq_bysample[sample]["percent_Q30"] = (
float(self.bcl2fastq_bysample[sample]["yieldQ30"])
/ float(self.bcl2fastq_bysample[sample]["total_yield"])
) * 100.0
except ZeroDivisionError:
self.bcl2fastq_bysample[sample]["percent_Q30"] = "NA"
try:
self.bcl2fastq_bysample[sample]["percent_perfectIndex"] = (
float(self.bcl2fastq_bysample[sample]["perfectIndex"])
/ float(self.bcl2fastq_bysample[sample]["total"])
) * 100.0
except ZeroDivisionError:
self.bcl2fastq_bysample[sample]["percent_perfectIndex"] = "NA"
try:
self.bcl2fastq_bysample[sample]["percent_perfectIndex"] = (
float(self.bcl2fastq_bysample[sample]["perfectIndex"])
/ float(self.bcl2fastq_bysample[sample]["total"])
) * 100.0
except ZeroDivisionError:
self.bcl2fastq_bysample[sample]["percent_perfectIndex"] = "NA"
try:
self.bcl2fastq_bysample[sample]["mean_qscore"] = float(
self.bcl2fastq_bysample[sample]["qscore_sum"]
) / float(self.bcl2fastq_bysample[sample]["total_yield"])
except ZeroDivisionError:
self.bcl2fastq_bysample[sample]["mean_qscore"] = "NA"
try:
self.bcl2fastq_bysample[sample]["percent_trimmed"] = (
float(self.bcl2fastq_bysample[sample]["trimmed_bases"])
/ float(self.bcl2fastq_bysample[sample]["total_yield"])
* 100.0
)
except ZeroDivisionError:
self.bcl2fastq_bysample[sample]["mean_qscore"] = "NA"
if sample != "undetermined":
if not sample in self.source_files:
self.source_files[sample] = []
self.source_files[sample].append(
self.bcl2fastq_data[runId][lane]["samples"][sample]["filename"]
)
|
https://github.com/ewels/MultiQC/issues/731
|
multiqc --outdir x .
[INFO ] multiqc : This is MultiQC v1.5
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching '.'
Searching 770 files.. [####################################] 100%
[ERROR ] multiqc : Oops! The 'bcl2fastq' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
./Stats/Stats.json
============================================================
Module bcl2fastq raised an exception: Traceback (most recent call last):
File "/bioseqfs/home/sequencing/opt/miniconda2/envs/sequencing/bin/multiqc", line 442, in multiqc
output = mod()
File "/bioseqfs/home/sequencing/opt/miniconda2/envs/sequencing/lib/python2.7/site-packages/multiqc/modules/bcl2fastq/bcl2fastq.py", line 44, in __init__
self.add_general_stats()
File "/bioseqfs/home/sequencing/opt/miniconda2/envs/sequencing/lib/python2.7/site-packages/multiqc/modules/bcl2fastq/bcl2fastq.py", line 275, in add_general_stats
} for key in self.bcl2fastq_bysample.keys()
File "/bioseqfs/home/sequencing/opt/miniconda2/envs/sequencing/lib/python2.7/site-packages/multiqc/modules/bcl2fastq/bcl2fastq.py", line 275, in <dictcomp>
} for key in self.bcl2fastq_bysample.keys()
ZeroDivisionError: float division by zero
============================================================
[WARNING] multiqc : No analysis results found. Cleaning up..
[INFO ] multiqc : MultiQC complete
|
ZeroDivisionError
|
def add_general_stats(self):
data = {}
for key in self.bcl2fastq_bysample.keys():
try:
perfectPercent = float(
100.0
* self.bcl2fastq_bysample[key]["perfectIndex"]
/ self.bcl2fastq_bysample[key]["total"]
)
except ZeroDivisionError:
perfectPercent = 0
data[key] = {
"yieldQ30": self.bcl2fastq_bysample[key]["yieldQ30"],
"total": self.bcl2fastq_bysample[key]["total"],
"perfectPercent": "{0:.1f}".format(perfectPercent),
"trimmedPercent": self.bcl2fastq_bysample[key]["percent_trimmed"],
}
headers = OrderedDict()
headers["total"] = {
"title": "{} Clusters".format(config.read_count_prefix),
"description": "Total number of reads for this sample as determined by bcl2fastq demultiplexing ({})".format(
config.read_count_desc
),
"scale": "Blues",
"shared_key": "read_count",
}
headers["yieldQ30"] = {
"title": "{} Yield ≥ Q30".format(config.base_count_prefix),
"description": "Number of bases with a Phred score of 30 or higher ({})".format(
config.base_count_desc
),
"scale": "Greens",
"shared_key": "base_count",
}
headers["perfectPercent"] = {
"title": "% Perfect Index",
"description": "Percent of reads with perfect index (0 mismatches)",
"max": 100,
"min": 0,
"scale": "RdYlGn",
"suffix": "%",
}
headers["trimmedPercent"] = {
"title": "% Bases trimmed",
"description": "Percent of bases trimmed",
"max": 100,
"min": 0,
"scale": "Reds",
"suffix": "%",
"hidden": True if all(data[s]["trimmedPercent"] == 0 for s in data) else False,
}
self.general_stats_addcols(data, headers)
|
def add_general_stats(self):
data = {
key: {
"yieldQ30": self.bcl2fastq_bysample[key]["yieldQ30"],
"total": self.bcl2fastq_bysample[key]["total"],
"perfectPercent": "{0:.1f}".format(
float(
100.0
* self.bcl2fastq_bysample[key]["perfectIndex"]
/ self.bcl2fastq_bysample[key]["total"]
)
),
"trimmedPercent": self.bcl2fastq_bysample[key]["percent_trimmed"],
}
for key in self.bcl2fastq_bysample.keys()
}
headers = OrderedDict()
headers["total"] = {
"title": "{} Clusters".format(config.read_count_prefix),
"description": "Total number of reads for this sample as determined by bcl2fastq demultiplexing ({})".format(
config.read_count_desc
),
"scale": "Blues",
"shared_key": "read_count",
}
headers["yieldQ30"] = {
"title": "{} Yield ≥ Q30".format(config.base_count_prefix),
"description": "Number of bases with a Phred score of 30 or higher ({})".format(
config.base_count_desc
),
"scale": "Greens",
"shared_key": "base_count",
}
headers["perfectPercent"] = {
"title": "% Perfect Index",
"description": "Percent of reads with perfect index (0 mismatches)",
"max": 100,
"min": 0,
"scale": "RdYlGn",
"suffix": "%",
}
headers["trimmedPercent"] = {
"title": "% Bases trimmed",
"description": "Percent of bases trimmed",
"max": 100,
"min": 0,
"scale": "Reds",
"suffix": "%",
"hidden": True if all(data[s]["trimmedPercent"] == 0 for s in data) else False,
}
self.general_stats_addcols(data, headers)
|
https://github.com/ewels/MultiQC/issues/731
|
multiqc --outdir x .
[INFO ] multiqc : This is MultiQC v1.5
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching '.'
Searching 770 files.. [####################################] 100%
[ERROR ] multiqc : Oops! The 'bcl2fastq' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
If possible, please include a log file that triggers the error - the last file found was:
./Stats/Stats.json
============================================================
Module bcl2fastq raised an exception: Traceback (most recent call last):
File "/bioseqfs/home/sequencing/opt/miniconda2/envs/sequencing/bin/multiqc", line 442, in multiqc
output = mod()
File "/bioseqfs/home/sequencing/opt/miniconda2/envs/sequencing/lib/python2.7/site-packages/multiqc/modules/bcl2fastq/bcl2fastq.py", line 44, in __init__
self.add_general_stats()
File "/bioseqfs/home/sequencing/opt/miniconda2/envs/sequencing/lib/python2.7/site-packages/multiqc/modules/bcl2fastq/bcl2fastq.py", line 275, in add_general_stats
} for key in self.bcl2fastq_bysample.keys()
File "/bioseqfs/home/sequencing/opt/miniconda2/envs/sequencing/lib/python2.7/site-packages/multiqc/modules/bcl2fastq/bcl2fastq.py", line 275, in <dictcomp>
} for key in self.bcl2fastq_bysample.keys()
ZeroDivisionError: float division by zero
============================================================
[WARNING] multiqc : No analysis results found. Cleaning up..
[INFO ] multiqc : MultiQC complete
|
ZeroDivisionError
|
def quast_contigs_barplot(self):
"""Make a bar plot showing the number and length of contigs for each assembly"""
# Prep the data
data = dict()
categories = []
for s_name, d in self.quast_data.items():
nums_by_t = dict()
for k, v in d.items():
m = re.match("# contigs \(>= (\d+) bp\)", k)
if m and v != "-":
nums_by_t[int(m.groups()[0])] = int(v)
tresholds = sorted(nums_by_t.keys(), reverse=True)
p = dict()
cats = []
for i, t in enumerate(tresholds):
if i == 0:
c = ">= " + str(t) + " bp"
cats.append(c)
p[c] = nums_by_t[t]
else:
c = str(t) + "-" + str(tresholds[i - 1]) + " bp"
cats.append(c)
p[c] = nums_by_t[t] - nums_by_t[tresholds[i - 1]]
if not categories:
categories = cats
data[s_name] = p
pconfig = {
"id": "quast_num_contigs",
"title": "QUAST: Number of Contigs",
"ylab": "# Contigs",
"yDecimals": False,
}
return bargraph.plot(data, categories, pconfig)
|
def quast_contigs_barplot(self):
"""Make a bar plot showing the number and length of contigs for each assembly"""
# Prep the data
data = dict()
categories = []
for s_name, d in self.quast_data.items():
nums_by_t = dict()
for k, v in d.items():
m = re.match("# contigs \(>= (\d+) bp\)", k)
if m:
nums_by_t[int(m.groups()[0])] = v
tresholds = sorted(nums_by_t.keys(), reverse=True)
p = dict()
cats = []
for i, t in enumerate(tresholds):
if i == 0:
c = ">= " + str(t) + " bp"
cats.append(c)
p[c] = nums_by_t[t]
else:
c = str(t) + "-" + str(tresholds[i - 1]) + " bp"
cats.append(c)
p[c] = nums_by_t[t] - nums_by_t[tresholds[i - 1]]
if not categories:
categories = cats
elif set(cats) != set(categories):
log.warning(
"Different contig threshold categories for samples, skip plotting barplot".format(
s_name
)
)
continue
data[s_name] = p
pconfig = {
"id": "quast_num_contigs",
"title": "QUAST: Number of Contigs",
"ylab": "# Contigs",
"yDecimals": False,
}
return bargraph.plot(data, categories, pconfig)
|
https://github.com/ewels/MultiQC/issues/719
|
[INFO ] multiqc : This is MultiQC v1.5
============================================================
Module quast raised an exception: Traceback (most recent call last):
File "/home/kevin/anaconda3/envs/assembly-env/bin/multiqc", line 442, in multiqc
output = mod()
File "/home/kevin/anaconda3/envs/assembly-env/lib/python2.7/site-packages/multiqc/modules/quast/quast.py", line 72, in __init__
plot = self.quast_contigs_barplot()
File "/home/kevin/anaconda3/envs/assembly-env/lib/python2.7/site-packages/multiqc/modules/quast/quast.py", line 279, in quast_contigs_barplot
p[c] = nums_by_t[t] - nums_by_t[tresholds[i - 1]]
TypeError: unsupported operand type(s) for -: 'unicode' and 'float'
============================================================
[WARNING] multiqc : No analysis results found. Cleaning up..
[INFO ] multiqc : MultiQC complete
|
TypeError
|
def parse_tag_info(self, f):
"""Parse HOMER tagdirectory taginfo.txt file to extract statistics in the first 11 lines."""
# General Stats Table
tag_info = dict()
for l in f["f"]:
s = l.split("=")
if len(s) > 1:
if s[0].strip() == "genome":
ss = s[1].split("\t")
if len(ss) > 2:
tag_info["genome"] = ss[0].strip()
try:
tag_info["UniqPositions"] = float(ss[1].strip())
tag_info["TotalPositions"] = float(ss[2].strip())
except:
tag_info["UniqPositions"] = ss[1].strip()
tag_info["TotalPositions"] = ss[2].strip()
try:
tag_info[s[0].strip()] = float(s[1].strip())
except ValueError:
tag_info[s[0].strip()] = s[1].strip()
return tag_info
|
def parse_tag_info(self, f):
"""Parse HOMER tagdirectory taginfo.txt file to extract statistics in the first 11 lines."""
# General Stats Table
tag_info = dict()
counter = 0
for l in f["f"]:
if counter == 1:
s = l.split("\t")
tag_info["UniqPositions"] = float(s[1].strip())
tag_info["TotalPositions"] = float(s[2].strip())
if counter == 4:
s = l.split("\t")
tag_info["fragmentLengthEstimate"] = float(s[0].strip().split("=")[1])
if counter == 5:
s = l.split("\t")
tag_info["peakSizeEstimate"] = float(s[0].strip().split("=")[1])
if counter == 6:
s = l.split("\t")
tag_info["tagsPerBP"] = float(s[0].strip().split("=")[1])
if counter == 7:
s = l.split("\t")
tag_info["averageTagsPerPosition"] = float(s[0].strip().split("=")[1])
if counter == 8:
s = l.split("\t")
tag_info["averageTagLength"] = float(s[0].strip().split("=")[1])
if counter == 9:
s = l.split("\t")
tag_info["gsizeEstimate"] = float(s[0].strip().split("=")[1])
if counter == 10:
s = l.split("\t")
tag_info["averageFragmentGCcontent"] = float(s[0].strip().split("=")[1])
if counter == 11:
break
counter = counter + 1
return tag_info
|
https://github.com/ewels/MultiQC/issues/666
|
szha0069@biomate1:~/test_seq_files/analysis/tags$ multiqc .
[WARNING] multiqc : MultiQC Version v1.4 now available!
[INFO ] multiqc : This is MultiQC v1.3
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching '.'
Searching 33 files.. [####################################] 100%
[ERROR ] multiqc : Oops! The 'homer' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
(if possible, include a log file that triggers the error)
============================================================
Module homer raised an exception: Traceback (most recent call last):
File "/mnt/software/apps/multiqc-1.3/bin/multiqc", line 412, in multiqc
output = mod()
File "/mnt/software/apps/multiqc-1.3/lib/python3.5/site-packages/multiqc/modules/homer/homer.py", line 60, in __init__
n['Homer_tagDir'] = self.homer_tagdirectory()
File "/mnt/software/apps/multiqc-1.3/lib/python3.5/site-packages/multiqc/modules/homer/tagdirectory.py", line 24, in homer_tagdirectory
self.parse_tagInfo_data()
File "/mnt/software/apps/multiqc-1.3/lib/python3.5/site-packages/multiqc/modules/homer/tagdirectory.py", line 142, in parse_tagInfo_data
parsed_data = self.parse_tag_info(f)
File "/mnt/software/apps/multiqc-1.3/lib/python3.5/site-packages/multiqc/modules/homer/tagdirectory.py", line 349, in parse_tag_info
tag_info['gsizeEstimate'] = float(s[0].strip().split("=")[1])
IndexError: list index out of range
============================================================
[WARNING] multiqc : No analysis results found. Cleaning up..
[INFO ] multiqc : MultiQC complete
|
IndexError
|
def parse_tag_info_chrs(self, f, convChr=True):
"""Parse HOMER tagdirectory taginfo.txt file to extract chromosome coverage."""
parsed_data_total = OrderedDict()
parsed_data_uniq = OrderedDict()
remove = ["hap", "random", "chrUn", "cmd", "EBV", "GL", "NT_"]
for l in f["f"]:
s = l.split("\t")
key = s[0].strip()
# skip header
if "=" in l or len(s) != 3:
continue
if convChr:
if any(x in key for x in remove):
continue
try:
vT = float(s[1].strip())
vU = float(s[2].strip())
except ValueError:
continue
parsed_data_total[key] = vT
parsed_data_uniq[key] = vU
return [parsed_data_total, parsed_data_uniq]
|
def parse_tag_info_chrs(self, f, convChr=True):
"""Parse HOMER tagdirectory taginfo.txt file to extract chromosome coverage."""
parsed_data_total = dict()
parsed_data_uniq = dict()
remove = ["hap", "random", "chrUn", "cmd", "EBV", "GL", "NT_"]
## skip first 11 lines
counter = 0
for l in f["f"]:
## skip first 11 lines
if counter < 11:
counter = counter + 1
continue
s = l.split("\t")
key = s[0].strip()
if len(s) > 1:
if convChr:
if any(x in key for x in remove):
continue
vT = float(s[1].strip())
vU = float(s[2].strip())
parsed_data_total[key] = vT
parsed_data_uniq[key] = vU
return [parsed_data_total, parsed_data_uniq]
|
https://github.com/ewels/MultiQC/issues/666
|
szha0069@biomate1:~/test_seq_files/analysis/tags$ multiqc .
[WARNING] multiqc : MultiQC Version v1.4 now available!
[INFO ] multiqc : This is MultiQC v1.3
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching '.'
Searching 33 files.. [####################################] 100%
[ERROR ] multiqc : Oops! The 'homer' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
(if possible, include a log file that triggers the error)
============================================================
Module homer raised an exception: Traceback (most recent call last):
File "/mnt/software/apps/multiqc-1.3/bin/multiqc", line 412, in multiqc
output = mod()
File "/mnt/software/apps/multiqc-1.3/lib/python3.5/site-packages/multiqc/modules/homer/homer.py", line 60, in __init__
n['Homer_tagDir'] = self.homer_tagdirectory()
File "/mnt/software/apps/multiqc-1.3/lib/python3.5/site-packages/multiqc/modules/homer/tagdirectory.py", line 24, in homer_tagdirectory
self.parse_tagInfo_data()
File "/mnt/software/apps/multiqc-1.3/lib/python3.5/site-packages/multiqc/modules/homer/tagdirectory.py", line 142, in parse_tagInfo_data
parsed_data = self.parse_tag_info(f)
File "/mnt/software/apps/multiqc-1.3/lib/python3.5/site-packages/multiqc/modules/homer/tagdirectory.py", line 349, in parse_tag_info
tag_info['gsizeEstimate'] = float(s[0].strip().split("=")[1])
IndexError: list index out of range
============================================================
[WARNING] multiqc : No analysis results found. Cleaning up..
[INFO ] multiqc : MultiQC complete
|
IndexError
|
def parse_qorts(self, f):
s_names = None
for l in f["f"]:
s = l.split("\t")
if s_names is None:
s_names = [self.clean_s_name(s_name, f["root"]) for s_name in s[1:]]
if len(s_names) <= 2 and s_names[0] == "COUNT":
if f["fn"] == "QC.summary.txt":
s_names = [
self.clean_s_name(
os.path.basename(os.path.normpath(f["root"])), f["root"]
)
]
else:
s_names = [f["s_name"]]
for s_name in s_names:
if s_name in self.qorts_data:
log.debug(
"Duplicate sample name found! Overwriting: {}".format(s_name)
)
self.qorts_data[s_name] = dict()
else:
for i, s_name in enumerate(s_names):
self.qorts_data[s_name][s[0]] = float(s[i + 1])
# Add some extra fields
for i, s_name in enumerate(s_names):
if (
"Genes_Total" in self.qorts_data[s_name]
and "Genes_WithNonzeroCounts" in self.qorts_data[s_name]
):
self.qorts_data[s_name]["Genes_PercentWithNonzeroCounts"] = (
self.qorts_data[s_name]["Genes_WithNonzeroCounts"]
/ self.qorts_data[s_name]["Genes_Total"]
) * 100.0
|
def parse_qorts(self, f):
s_names = None
for l in f["f"]:
s = l.split("\t")
if s_names is None:
s_names = [self.clean_s_name(s_name, f["root"]) for s_name in s[1:]]
if len(s_names) == 1 and s_names[0] == "COUNT":
s_names = [f["s_name"]]
for s_name in s_names:
if s_name in self.qorts_data:
log.debug(
"Duplicate sample name found! Overwriting: {}".format(s_name)
)
self.qorts_data[s_name] = dict()
else:
for i, s_name in enumerate(s_names):
self.qorts_data[s_name][s[0]] = float(s[i + 1])
# Add some extra fields
for i, s_name in enumerate(s_names):
if (
"Genes_Total" in self.qorts_data[s_name]
and "Genes_WithNonzeroCounts" in self.qorts_data[s_name]
):
self.qorts_data[s_name]["Genes_PercentWithNonzeroCounts"] = (
self.qorts_data[s_name]["Genes_WithNonzeroCounts"]
/ self.qorts_data[s_name]["Genes_Total"]
) * 100.0
|
https://github.com/ewels/MultiQC/issues/639
|
multiqc input/pipeline5a-exemplar6 -o multiqc-pipeline5a-exemplar6
[INFO ] multiqc : This is MultiQC v1.3
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching 'input/pipeline5a-exemplar6'
Searching 662 files.. [####################################] 100%
[ERROR ] multiqc : Oops! The 'qorts' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
(if possible, include a log file that triggers the error)
============================================================
Module qorts raised an exception: Traceback (most recent call last):
File "/sw/lsa/centos7/python-anaconda2/created-20170421b/bin/multiqc", line 412, in multiqc
output = mod()
File "/sw/lsa/centos7/python-anaconda2/created-20170421b/lib/python2.7/site-packages/multiqc/modules/qorts/qorts.py", line 29, in __init__
self.parse_qorts(f)
File "/sw/lsa/centos7/python-anaconda2/created-20170421b/lib/python2.7/site-packages/multiqc/modules/qorts/qorts.py", line 64, in parse_qorts
self.qorts_data[s_name][s[0]] = float(s[i+1])
ValueError: could not convert string to float: Code for the strandedness rule used. 0 if data is unstranded, 1 if data is fr_firstStrand, 2 if data is fr_secondStrand.
============================================================
[INFO ] star : Found 6 reports
[INFO ] fastqc : Found 24 reports
[INFO ] multiqc : Compressing plot data
[INFO ] multiqc : Report : multiqc-pipeline5a-exemplar6/multiqc_report.html
[INFO ] multiqc : Data : multiqc-pipeline5a-exemplar6/multiqc_data
[INFO ] multiqc : MultiQC complete
|
ValueError
|
def parse_reports(self):
"""Find Picard InsertSizeMetrics reports and parse their data"""
# Set up vars
self.picard_insertSize_data = dict()
self.picard_insertSize_histogram = dict()
self.picard_insertSize_samplestats = dict()
# Go through logs and find Metrics
for f in self.find_log_files("picard/insertsize", filehandles=True):
s_name = None
in_hist = False
for l in f["f"]:
# Catch the histogram values
if s_name is not None and in_hist is True:
try:
sections = l.split("\t")
ins = int(sections[0])
tot_count = sum([int(x) for x in sections[1:]])
self.picard_insertSize_histogram[s_name][ins] = tot_count
self.picard_insertSize_samplestats[s_name]["total_count"] += (
tot_count
)
except ValueError:
# Reset in case we have more in this log file
s_name = None
in_hist = False
# New log starting
if "InsertSizeMetrics" in l and "INPUT" in l:
s_name = None
# Pull sample name from input
fn_search = re.search(r"INPUT=(\[?[^\s]+\]?)", l)
if fn_search:
s_name = os.path.basename(fn_search.group(1).strip("[]"))
s_name = self.clean_s_name(s_name, f["root"])
if s_name is not None:
if "InsertSizeMetrics" in l and "## METRICS CLASS" in l:
if s_name in self.picard_insertSize_data:
log.debug(
"Duplicate sample name found in {}! Overwriting: {}".format(
f["fn"], s_name
)
)
self.add_data_source(f, s_name, section="InsertSizeMetrics")
keys = f["f"].readline().strip("\n").split("\t")
vals = f["f"].readline().strip("\n").split("\t")
self.picard_insertSize_samplestats[s_name] = {
"total_count": 0,
"meansum": 0,
"total_pairs": 0,
}
orientation_idx = keys.index("PAIR_ORIENTATION")
while len(vals) == len(keys):
pair_orientation = vals[orientation_idx]
rowkey = "{}_{}".format(s_name, pair_orientation)
self.picard_insertSize_data[rowkey] = OrderedDict()
self.picard_insertSize_data[rowkey]["SAMPLE_NAME"] = s_name
for i, k in enumerate(keys):
try:
self.picard_insertSize_data[rowkey][k] = float(vals[i])
except ValueError:
try:
self.picard_insertSize_data[rowkey][k] = float(
vals[i].replace(",", ".")
)
log.debug(
"Switching commas for points in '{}': {} - {}".format(
f["fn"], vals[i], vals[i].replace(",", ".")
)
)
except ValueError:
self.picard_insertSize_data[rowkey][k] = vals[i]
except IndexError:
pass # missing data
# Add to mean sums
rp = self.picard_insertSize_data[rowkey]["READ_PAIRS"]
mis = self.picard_insertSize_data[rowkey]["MEAN_INSERT_SIZE"]
self.picard_insertSize_samplestats[s_name]["meansum"] += (
rp * mis
)
self.picard_insertSize_samplestats[s_name]["total_pairs"] += rp
vals = f["f"].readline().strip("\n").split("\t")
# Skip lines on to histogram
l = f["f"].readline().strip("\n")
l = f["f"].readline().strip("\n")
self.picard_insertSize_histogram[s_name] = OrderedDict()
in_hist = True
for key in list(self.picard_insertSize_data.keys()):
if len(self.picard_insertSize_data[key]) == 0:
self.picard_insertSize_data.pop(key, None)
for s_name in list(self.picard_insertSize_histogram.keys()):
if len(self.picard_insertSize_histogram[s_name]) == 0:
self.picard_insertSize_histogram.pop(s_name, None)
log.debug("Ignoring '{}' histogram as no data parsed".format(s_name))
# Calculate summed mean values for all read orientations
for s_name, v in self.picard_insertSize_samplestats.items():
self.picard_insertSize_samplestats[s_name]["summed_mean"] = (
v["meansum"] / v["total_pairs"]
)
# Calculate summed median values for all read orientations
for s_name in self.picard_insertSize_histogram:
j = 0
for idx, c in self.picard_insertSize_histogram[s_name].items():
j += c
if j > (self.picard_insertSize_samplestats[s_name]["total_count"] / 2):
self.picard_insertSize_samplestats[s_name]["summed_median"] = idx
break
# Filter to strip out ignored sample names
self.picard_insertSize_data = self.ignore_samples(self.picard_insertSize_data)
if len(self.picard_insertSize_data) > 0:
# Write parsed data to a file
self.write_data_file(self.picard_insertSize_data, "multiqc_picard_insertSize")
# Do we have median insert sizes?
missing_medians = False
for v in self.picard_insertSize_samplestats.values():
if "summed_median" not in v:
missing_medians = True
# Add to general stats table
self.general_stats_headers["summed_median"] = {
"title": "Insert Size",
"description": "Median Insert Size, all read orientations (bp)",
"min": 0,
"suffix": " bp",
"format": "{:,.0f}",
"scale": "GnBu",
}
self.general_stats_headers["summed_mean"] = {
"title": "Mean Insert Size",
"description": "Mean Insert Size, all read orientations (bp)",
"min": 0,
"suffix": " bp",
"format": "{:,.0f}",
"scale": "GnBu",
"hidden": False if missing_medians else True,
}
for s_name in self.picard_insertSize_samplestats:
if s_name not in self.general_stats_data:
self.general_stats_data[s_name] = dict()
self.general_stats_data[s_name].update(
self.picard_insertSize_samplestats[s_name]
)
# Section with histogram plot
if len(self.picard_insertSize_histogram) > 0:
# Make a normalised percentage version of the data
data_percent = {}
for s_name, data in self.picard_insertSize_histogram.items():
data_percent[s_name] = OrderedDict()
total = float(sum(data.values()))
for k, v in data.items():
data_percent[s_name][k] = (v / total) * 100
# Plot the data and add section
pconfig = {
"smooth_points": 500,
"smooth_points_sumcounts": [True, False],
"id": "picard_insert_size",
"title": "Picard: Insert Size",
"ylab": "Count",
"xlab": "Insert Size (bp)",
"xDecimals": False,
"tt_label": "<b>{point.x} bp</b>: {point.y:.0f}",
"ymin": 0,
"data_labels": [
{"name": "Counts", "ylab": "Coverage"},
{"name": "Percentages", "ylab": "Percentage of Counts"},
],
}
self.add_section(
name="Insert Size",
anchor="picard-insertsize",
description="Plot shows the number of reads at a given insert size. Reads with different orientations are summed.",
plot=linegraph.plot(
[self.picard_insertSize_histogram, data_percent], pconfig
),
)
# Return the number of detected samples to the parent module
return len(self.picard_insertSize_data)
|
def parse_reports(self):
"""Find Picard InsertSizeMetrics reports and parse their data"""
# Set up vars
self.picard_insertSize_data = dict()
self.picard_insertSize_histogram = dict()
self.picard_insertSize_samplestats = dict()
# Go through logs and find Metrics
for f in self.find_log_files("picard/insertsize", filehandles=True):
s_name = None
in_hist = False
for l in f["f"]:
# Catch the histogram values
if s_name is not None and in_hist is True:
try:
sections = l.split("\t")
ins = int(sections[0])
tot_count = sum([int(x) for x in sections[1:]])
self.picard_insertSize_histogram[s_name][ins] = tot_count
self.picard_insertSize_samplestats[s_name]["total_count"] += (
tot_count
)
except ValueError:
# Reset in case we have more in this log file
s_name = None
in_hist = False
# New log starting
if "InsertSizeMetrics" in l and "INPUT" in l:
s_name = None
# Pull sample name from input
fn_search = re.search(r"INPUT=(\[?[^\s]+\]?)", l)
if fn_search:
s_name = os.path.basename(fn_search.group(1).strip("[]"))
s_name = self.clean_s_name(s_name, f["root"])
if s_name is not None:
if "InsertSizeMetrics" in l and "## METRICS CLASS" in l:
if s_name in self.picard_insertSize_data:
log.debug(
"Duplicate sample name found in {}! Overwriting: {}".format(
f["fn"], s_name
)
)
self.add_data_source(f, s_name, section="InsertSizeMetrics")
keys = f["f"].readline().strip("\n").split("\t")
vals = f["f"].readline().strip("\n").split("\t")
self.picard_insertSize_samplestats[s_name] = {
"total_count": 0,
"meansum": 0,
"total_pairs": 0,
}
orientation_idx = keys.index("PAIR_ORIENTATION")
while len(vals) == len(keys):
pair_orientation = vals[orientation_idx]
rowkey = "{}_{}".format(s_name, pair_orientation)
self.picard_insertSize_data[rowkey] = OrderedDict()
self.picard_insertSize_data[rowkey]["SAMPLE_NAME"] = s_name
for i, k in enumerate(keys):
try:
self.picard_insertSize_data[rowkey][k] = float(vals[i])
except ValueError:
self.picard_insertSize_data[rowkey][k] = vals[i]
except IndexError:
pass # missing data
# Add to mean sums
rp = self.picard_insertSize_data[rowkey]["READ_PAIRS"]
mis = self.picard_insertSize_data[rowkey]["MEAN_INSERT_SIZE"]
self.picard_insertSize_samplestats[s_name]["meansum"] += (
rp * mis
)
self.picard_insertSize_samplestats[s_name]["total_pairs"] += rp
vals = f["f"].readline().strip("\n").split("\t")
# Skip lines on to histogram
l = f["f"].readline().strip("\n")
l = f["f"].readline().strip("\n")
self.picard_insertSize_histogram[s_name] = OrderedDict()
in_hist = True
for key in list(self.picard_insertSize_data.keys()):
if len(self.picard_insertSize_data[key]) == 0:
self.picard_insertSize_data.pop(key, None)
for s_name in list(self.picard_insertSize_histogram.keys()):
if len(self.picard_insertSize_histogram[s_name]) == 0:
self.picard_insertSize_histogram.pop(s_name, None)
log.debug("Ignoring '{}' histogram as no data parsed".format(s_name))
# Calculate summed mean values for all read orientations
for s_name, v in self.picard_insertSize_samplestats.items():
self.picard_insertSize_samplestats[s_name]["summed_mean"] = (
v["meansum"] / v["total_pairs"]
)
# Calculate summed median values for all read orientations
for s_name in self.picard_insertSize_histogram:
j = 0
for idx, c in self.picard_insertSize_histogram[s_name].items():
j += c
if j > (self.picard_insertSize_samplestats[s_name]["total_count"] / 2):
self.picard_insertSize_samplestats[s_name]["summed_median"] = idx
break
# Filter to strip out ignored sample names
self.picard_insertSize_data = self.ignore_samples(self.picard_insertSize_data)
if len(self.picard_insertSize_data) > 0:
# Write parsed data to a file
self.write_data_file(self.picard_insertSize_data, "multiqc_picard_insertSize")
# Do we have median insert sizes?
missing_medians = False
for v in self.picard_insertSize_samplestats.values():
if "summed_median" not in v:
missing_medians = True
# Add to general stats table
self.general_stats_headers["summed_median"] = {
"title": "Insert Size",
"description": "Median Insert Size, all read orientations (bp)",
"min": 0,
"suffix": " bp",
"format": "{:,.0f}",
"scale": "GnBu",
}
self.general_stats_headers["summed_mean"] = {
"title": "Mean Insert Size",
"description": "Mean Insert Size, all read orientations (bp)",
"min": 0,
"suffix": " bp",
"format": "{:,.0f}",
"scale": "GnBu",
"hidden": False if missing_medians else True,
}
for s_name in self.picard_insertSize_samplestats:
if s_name not in self.general_stats_data:
self.general_stats_data[s_name] = dict()
self.general_stats_data[s_name].update(
self.picard_insertSize_samplestats[s_name]
)
# Section with histogram plot
if len(self.picard_insertSize_histogram) > 0:
# Make a normalised percentage version of the data
data_percent = {}
for s_name, data in self.picard_insertSize_histogram.items():
data_percent[s_name] = OrderedDict()
total = float(sum(data.values()))
for k, v in data.items():
data_percent[s_name][k] = (v / total) * 100
# Plot the data and add section
pconfig = {
"smooth_points": 500,
"smooth_points_sumcounts": [True, False],
"id": "picard_insert_size",
"title": "Picard: Insert Size",
"ylab": "Count",
"xlab": "Insert Size (bp)",
"xDecimals": False,
"tt_label": "<b>{point.x} bp</b>: {point.y:.0f}",
"ymin": 0,
"data_labels": [
{"name": "Counts", "ylab": "Coverage"},
{"name": "Percentages", "ylab": "Percentage of Counts"},
],
}
self.add_section(
name="Insert Size",
anchor="picard-insertsize",
description="Plot shows the number of reads at a given insert size. Reads with different orientations are summed.",
plot=linegraph.plot(
[self.picard_insertSize_histogram, data_percent], pconfig
),
)
# Return the number of detected samples to the parent module
return len(self.picard_insertSize_data)
|
https://github.com/ewels/MultiQC/issues/630
|
[ERROR ] multiqc : Oops! The 'picard' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
(if possible, include a log file that triggers the error)
============================================================
Module picard raised an exception: Traceback (most recent call last):
File "/usr/local/bin/multiqc", line 412, in multiqc
output = mod()
File "/usr/local/lib/python3.5/dist-packages/multiqc/modules/picard/picard.py", line 64, in __init__
n['InsertSizeMetrics'] = InsertSizeMetrics.parse_reports(self)
File "/usr/local/lib/python3.5/dist-packages/multiqc/modules/picard/InsertSizeMetrics.py", line 76, in parse_reports
self.picard_insertSize_samplestats[s_name]['meansum'] += (rp * mis)
TypeError: can't multiply sequence by non-int of type 'float'
============================================================
|
TypeError
|
def __init__(self):
# Initialise the parent object
super(MultiqcModule, self).__init__(
name="bcl2fastq",
anchor="bcl2fastq",
href="https://support.illumina.com/sequencing/sequencing_software/bcl2fastq-conversion-software.html",
info="can be used to both demultiplex data and convert BCL files to FASTQ file formats for downstream analysis.",
)
# Gather data from all json files
self.bcl2fastq_data = dict()
for myfile in self.find_log_files("bcl2fastq"):
self.parse_file_as_json(myfile)
# Collect counts by lane and sample (+source_files)
self.bcl2fastq_bylane = dict()
self.bcl2fastq_bysample = dict()
self.source_files = dict()
self.split_data_by_lane_and_sample()
# Filter to strip out ignored sample names
self.bcl2fastq_bylane = self.ignore_samples(self.bcl2fastq_bylane)
self.bcl2fastq_bysample = self.ignore_samples(self.bcl2fastq_bysample)
# Return with Warning if no files are found
if len(self.bcl2fastq_bylane) == 0 and len(self.bcl2fastq_bysample) == 0:
raise UserWarning
# Print source files
for s in self.source_files.keys():
self.add_data_source(
s_name=s,
source=",".join(list(set(self.source_files[s]))),
module="bcl2fastq",
section="bcl2fastq-bysample",
)
# Add sample counts to general stats table
self.add_general_stats()
self.write_data_file(
{str(k): self.bcl2fastq_bylane[k] for k in self.bcl2fastq_bylane.keys()},
"multiqc_bcl2fastq_bylane",
)
self.write_data_file(self.bcl2fastq_bysample, "multiqc_bcl2fastq_bysample")
# Add section for summary stats per flow cell
self.add_section(
name="Lane Statistics",
anchor="bcl2fastq-lanestats",
description="Statistics about each lane for each flowcell",
plot=self.lane_stats_table(),
)
# Add section for counts by lane
cats = OrderedDict()
cats["perfect"] = {"name": "Perfect Index Reads"}
cats["imperfect"] = {"name": "Mismatched Index Reads"}
cats["undetermined"] = {"name": "Undetermined Reads"}
self.add_section(
name="Clusters by lane",
anchor="bcl2fastq-bylane",
description="Number of reads per lane (with number of perfect index reads).",
helptext="""Perfect index reads are those that do not have a single mismatch.
All samples of a lane are combined. Undetermined reads are treated as a third category.
To avoid conflicts the run ID is prepended.""",
plot=bargraph.plot(
self.get_bar_data_from_counts(self.bcl2fastq_bylane),
cats,
{
"id": "bcl2fastq_lane_counts",
"title": "bcl2fastq: Clusters by lane",
"hide_zero_cats": False,
},
),
)
# Add section for counts by sample
self.add_section(
name="Clusters by sample",
anchor="bcl2fastq-bysample",
description="Number of reads per sample (with number of perfect index reads)",
helptext="""Perfect index reads are those that do not have a single mismatch.
All samples are aggregated across lanes combinned. Undetermined reads are ignored.
Undetermined reads are treated as a separate sample.
To avoid conflicts the runId is prepended.""",
plot=bargraph.plot(
self.get_bar_data_from_counts(self.bcl2fastq_bysample),
cats,
{
"id": "bcl2fastq_sample_counts",
"title": "bcl2fastq: Clusters by sample",
"hide_zero_cats": False,
},
),
)
|
def __init__(self):
# Initialise the parent object
super(MultiqcModule, self).__init__(
name="bcl2fastq",
anchor="bcl2fastq",
href="https://support.illumina.com/sequencing/sequencing_software/bcl2fastq-conversion-software.html",
info="can be used to both demultiplex data and convert BCL files to FASTQ file formats for downstream analysis.",
)
# Gather data from all json files
self.bcl2fastq_data = dict()
for myfile in self.find_log_files("bcl2fastq"):
self.parse_file_as_json(myfile)
# Collect counts by lane and sample (+source_files)
self.bcl2fastq_bylane = dict()
self.bcl2fastq_bysample = dict()
self.source_files = dict()
self.split_data_by_lane_and_sample()
# Filter to strip out ignored sample names
self.bcl2fastq_bylane = self.ignore_samples(self.bcl2fastq_bylane)
self.bcl2fastq_bysample = self.ignore_samples(self.bcl2fastq_bysample)
# Return with Warning if no files are found
if len(self.bcl2fastq_bylane) == 0 and len(self.bcl2fastq_bysample) == 0:
raise UserWarning
# Print source files
for s in self.source_files.keys():
self.add_data_source(
s_name=s,
source=",".join(list(set(self.source_files[s]))),
module="bcl2fastq",
section="bcl2fastq-bysample",
)
# Add sample counts to general stats table
self.add_general_stats()
self.write_data_file(
{str(k): self.bcl2fastq_bylane[k] for k in self.bcl2fastq_bylane.keys()},
"multiqc_bcl2fastq_bylane",
)
self.write_data_file(self.bcl2fastq_bysample, "multiqc_bcl2fastq_bysample")
# Add section for summary stats per flow cell
self.add_section(
name="Lane Statistics",
anchor="bcl2fastq-lanestats",
description="Statistics about each lane for each flowcell",
plot=self.lane_stats_table(),
)
# Add section for counts by lane
cats = OrderedDict()
cats["perfect"] = {"name": "Perfect Index Reads"}
cats["imperfect"] = {"name": "Mismatched Index Reads"}
cats["undetermined"] = {"name": "Undetermined Reads"}
self.add_section(
name="Clusters by lane",
anchor="bcl2fastq-bylane",
description="Number of reads per lane (with number of perfect index reads).",
helptext="""Perfect index reads are those that do not have a single mismatch.
All samples of a lane are combined. Undetermined reads are treated as a third category.
To avoid conflicts the run ID is prepended.""",
plot=bargraph.plot(
self.get_bar_data_from_counts(self.bcl2fastq_bylane),
cats,
{"id": "bcl2fastq_lane_counts", "title": "bcl2tfastq: Clusters by lane"},
),
)
# Add section for counts by sample
self.add_section(
name="Clusters by sample",
anchor="bcl2fastq-bysample",
description="Number of reads per sample (with number of perfect index reads)",
helptext="""Perfect index reads are those that do not have a single mismatch.
All samples are aggregated across lanes combinned. Undetermined reads are ignored.
Undetermined reads are treated as a separate sample.
To avoid conflicts the runId is prepended.""",
plot=bargraph.plot(
self.get_bar_data_from_counts(self.bcl2fastq_bysample),
cats,
{
"id": "bcl2fastq_sample_counts",
"title": "bcl2tfastq: Clusters by sample",
},
),
)
|
https://github.com/ewels/MultiQC/issues/595
|
[ERROR ] multiqc : Oops! The 'bcl2fastq' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
(if possible, include a log file that triggers the error)
============================================================
Module bcl2fastq raised an exception: Traceback (most recent call last):
File "/camp/stp/babs/working/software/binaries/Python-2.7.12-foss-2016b_packages/multiqc-1.2-2016b-Python-2.7.12/lib/python2.7/site-packages/multiqc-1.2-py2.7.egg/EGG-INFO/scripts/multiqc", line 411, in multiqc
output = mod()
File "/camp/stp/babs/working/software/binaries/Python-2.7.12-foss-2016b_packages/multiqc-1.2-2016b-Python-2.7.12/lib/python2.7/site-packages/multiqc-1.2-py2.7.egg/multiqc/modules/bcl2fastq/bcl2fastq.py", line 21, in __init__
self.parse_file_as_json(myfile)
File "/camp/stp/babs/working/software/binaries/Python-2.7.12-foss-2016b_packages/multiqc-1.2-2016b-Python-2.7.12/lib/python2.7/site-packages/multiqc-1.2-py2.7.egg/multiqc/modules/bcl2fastq/bcl2fastq.py", line 120, in parse_file_as_json
for indexMetric in demuxResult["IndexMetrics"]:
KeyError: 'IndexMetrics'
============================================================
|
KeyError
|
def parse_file_as_json(self, myfile):
try:
content = json.loads(myfile["f"])
except ValueError:
log.warn("Could not parse file as json: {}".format(myfile["fn"]))
return
runId = content["RunId"]
if not runId in self.bcl2fastq_data:
self.bcl2fastq_data[runId] = dict()
run_data = self.bcl2fastq_data[runId]
for conversionResult in content.get("ConversionResults", []):
lane = "L{}".format(conversionResult["LaneNumber"])
if lane in run_data:
log.debug(
"Duplicate runId/lane combination found! Overwriting: {}".format(
self.prepend_runid(runId, lane)
)
)
run_data[lane] = {
"total": 0,
"total_yield": 0,
"perfectIndex": 0,
"samples": dict(),
"yieldQ30": 0,
"qscore_sum": 0,
}
for demuxResult in conversionResult.get("DemuxResults", []):
sample = demuxResult["SampleName"]
if sample in run_data[lane]["samples"]:
log.debug(
"Duplicate runId/lane/sample combination found! Overwriting: {}, {}".format(
self.prepend_runid(runId, lane), sample
)
)
run_data[lane]["samples"][sample] = {
"total": 0,
"total_yield": 0,
"perfectIndex": 0,
"filename": os.path.join(myfile["root"], myfile["fn"]),
"yieldQ30": 0,
"qscore_sum": 0,
}
run_data[lane]["total"] += demuxResult["NumberReads"]
run_data[lane]["total_yield"] += demuxResult["Yield"]
run_data[lane]["samples"][sample]["total"] += demuxResult["NumberReads"]
run_data[lane]["samples"][sample]["total_yield"] += demuxResult["Yield"]
if "IndexMetrics" in demuxResult:
for indexMetric in demuxResult["IndexMetrics"]:
run_data[lane]["perfectIndex"] += indexMetric["MismatchCounts"]["0"]
run_data[lane]["samples"][sample]["perfectIndex"] += indexMetric[
"MismatchCounts"
]["0"]
for readMetric in demuxResult.get("ReadMetrics", []):
run_data[lane]["yieldQ30"] += readMetric["YieldQ30"]
run_data[lane]["qscore_sum"] += readMetric["QualityScoreSum"]
run_data[lane]["samples"][sample]["yieldQ30"] += readMetric["YieldQ30"]
run_data[lane]["samples"][sample]["qscore_sum"] += readMetric[
"QualityScoreSum"
]
undeterminedYieldQ30 = 0
undeterminedQscoreSum = 0
if "Undetermined" in conversionResult:
for readMetric in conversionResult["Undetermined"]["ReadMetrics"]:
undeterminedYieldQ30 += readMetric["YieldQ30"]
undeterminedQscoreSum += readMetric["QualityScoreSum"]
run_data[lane]["samples"]["undetermined"] = {
"total": conversionResult["Undetermined"]["NumberReads"],
"total_yield": conversionResult["Undetermined"]["Yield"],
"perfectIndex": 0,
"yieldQ30": undeterminedYieldQ30,
"qscore_sum": undeterminedQscoreSum,
}
# Calculate Percents and averages
for lane in run_data:
run_data[lane]["percent_Q30"] = (
float(run_data[lane]["yieldQ30"]) / float(run_data[lane]["total_yield"])
) * 100.0
run_data[lane]["percent_perfectIndex"] = (
float(run_data[lane]["perfectIndex"]) / float(run_data[lane]["total"])
) * 100.0
run_data[lane]["mean_qscore"] = float(run_data[lane]["qscore_sum"]) / float(
run_data[lane]["total_yield"]
)
for sample, d in run_data[lane]["samples"].items():
run_data[lane]["samples"][sample]["percent_Q30"] = (
float(d["yieldQ30"]) / float(d["total_yield"])
) * 100.0
run_data[lane]["samples"][sample]["percent_perfectIndex"] = (
float(d["perfectIndex"]) / float(d["total"])
) * 100.0
run_data[lane]["samples"][sample]["mean_qscore"] = float(
d["qscore_sum"]
) / float(d["total_yield"])
|
def parse_file_as_json(self, myfile):
try:
content = json.loads(myfile["f"])
except ValueError:
log.warn("Could not parse file as json: {}".format(myfile["fn"]))
return
runId = content["RunId"]
if not runId in self.bcl2fastq_data:
self.bcl2fastq_data[runId] = dict()
run_data = self.bcl2fastq_data[runId]
for conversionResult in content["ConversionResults"]:
lane = "L{}".format(conversionResult["LaneNumber"])
if lane in run_data:
log.debug(
"Duplicate runId/lane combination found! Overwriting: {}".format(
self.prepend_runid(runId, lane)
)
)
run_data[lane] = {
"total": 0,
"total_yield": 0,
"perfectIndex": 0,
"samples": dict(),
"yieldQ30": 0,
"qscore_sum": 0,
}
for demuxResult in conversionResult["DemuxResults"]:
sample = demuxResult["SampleName"]
if sample in run_data[lane]["samples"]:
log.debug(
"Duplicate runId/lane/sample combination found! Overwriting: {}, {}".format(
self.prepend_runid(runId, lane), sample
)
)
run_data[lane]["samples"][sample] = {
"total": 0,
"total_yield": 0,
"perfectIndex": 0,
"filename": os.path.join(myfile["root"], myfile["fn"]),
"yieldQ30": 0,
"qscore_sum": 0,
}
run_data[lane]["total"] += demuxResult["NumberReads"]
run_data[lane]["total_yield"] += demuxResult["Yield"]
run_data[lane]["samples"][sample]["total"] += demuxResult["NumberReads"]
run_data[lane]["samples"][sample]["total_yield"] += demuxResult["Yield"]
for indexMetric in demuxResult["IndexMetrics"]:
run_data[lane]["perfectIndex"] += indexMetric["MismatchCounts"]["0"]
run_data[lane]["samples"][sample]["perfectIndex"] += indexMetric[
"MismatchCounts"
]["0"]
for readMetric in demuxResult["ReadMetrics"]:
run_data[lane]["yieldQ30"] += readMetric["YieldQ30"]
run_data[lane]["qscore_sum"] += readMetric["QualityScoreSum"]
run_data[lane]["samples"][sample]["yieldQ30"] += readMetric["YieldQ30"]
run_data[lane]["samples"][sample]["qscore_sum"] += readMetric[
"QualityScoreSum"
]
undeterminedYieldQ30 = 0
undeterminedQscoreSum = 0
for readMetric in conversionResult["Undetermined"]["ReadMetrics"]:
undeterminedYieldQ30 += readMetric["YieldQ30"]
undeterminedQscoreSum += readMetric["QualityScoreSum"]
run_data[lane]["samples"]["undetermined"] = {
"total": conversionResult["Undetermined"]["NumberReads"],
"total_yield": conversionResult["Undetermined"]["Yield"],
"perfectIndex": 0,
"yieldQ30": undeterminedYieldQ30,
"qscore_sum": undeterminedQscoreSum,
}
# Calculate Percents and averages
for lane in run_data:
run_data[lane]["percent_Q30"] = (
float(run_data[lane]["yieldQ30"]) / float(run_data[lane]["total_yield"])
) * 100.0
run_data[lane]["percent_perfectIndex"] = (
float(run_data[lane]["perfectIndex"]) / float(run_data[lane]["total"])
) * 100.0
run_data[lane]["mean_qscore"] = float(run_data[lane]["qscore_sum"]) / float(
run_data[lane]["total_yield"]
)
for sample, d in run_data[lane]["samples"].items():
run_data[lane]["samples"][sample]["percent_Q30"] = (
float(d["yieldQ30"]) / float(d["total_yield"])
) * 100.0
run_data[lane]["samples"][sample]["percent_perfectIndex"] = (
float(d["perfectIndex"]) / float(d["total"])
) * 100.0
run_data[lane]["samples"][sample]["mean_qscore"] = float(
d["qscore_sum"]
) / float(d["total_yield"])
|
https://github.com/ewels/MultiQC/issues/595
|
[ERROR ] multiqc : Oops! The 'bcl2fastq' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
(if possible, include a log file that triggers the error)
============================================================
Module bcl2fastq raised an exception: Traceback (most recent call last):
File "/camp/stp/babs/working/software/binaries/Python-2.7.12-foss-2016b_packages/multiqc-1.2-2016b-Python-2.7.12/lib/python2.7/site-packages/multiqc-1.2-py2.7.egg/EGG-INFO/scripts/multiqc", line 411, in multiqc
output = mod()
File "/camp/stp/babs/working/software/binaries/Python-2.7.12-foss-2016b_packages/multiqc-1.2-2016b-Python-2.7.12/lib/python2.7/site-packages/multiqc-1.2-py2.7.egg/multiqc/modules/bcl2fastq/bcl2fastq.py", line 21, in __init__
self.parse_file_as_json(myfile)
File "/camp/stp/babs/working/software/binaries/Python-2.7.12-foss-2016b_packages/multiqc-1.2-2016b-Python-2.7.12/lib/python2.7/site-packages/multiqc-1.2-py2.7.egg/multiqc/modules/bcl2fastq/bcl2fastq.py", line 120, in parse_file_as_json
for indexMetric in demuxResult["IndexMetrics"]:
KeyError: 'IndexMetrics'
============================================================
|
KeyError
|
def split_data_by_lane_and_sample(self):
for runId in self.bcl2fastq_data.keys():
for lane in self.bcl2fastq_data[runId].keys():
uniqLaneName = self.prepend_runid(runId, lane)
self.bcl2fastq_bylane[uniqLaneName] = {
"total": self.bcl2fastq_data[runId][lane]["total"],
"total_yield": self.bcl2fastq_data[runId][lane]["total_yield"],
"perfectIndex": self.bcl2fastq_data[runId][lane]["perfectIndex"],
"undetermined": self.bcl2fastq_data[runId][lane]["samples"]
.get("undetermined", {})
.get("total", "NA"),
"yieldQ30": self.bcl2fastq_data[runId][lane]["yieldQ30"],
"qscore_sum": self.bcl2fastq_data[runId][lane]["qscore_sum"],
"percent_Q30": self.bcl2fastq_data[runId][lane]["percent_Q30"],
"percent_perfectIndex": self.bcl2fastq_data[runId][lane][
"percent_perfectIndex"
],
"mean_qscore": self.bcl2fastq_data[runId][lane]["mean_qscore"],
}
for sample in self.bcl2fastq_data[runId][lane]["samples"].keys():
if not sample in self.bcl2fastq_bysample:
self.bcl2fastq_bysample[sample] = {
"total": 0,
"total_yield": 0,
"perfectIndex": 0,
"yieldQ30": 0,
"qscore_sum": 0,
}
self.bcl2fastq_bysample[sample]["total"] += self.bcl2fastq_data[runId][
lane
]["samples"][sample]["total"]
self.bcl2fastq_bysample[sample]["total_yield"] += self.bcl2fastq_data[
runId
][lane]["samples"][sample]["total_yield"]
self.bcl2fastq_bysample[sample]["perfectIndex"] += self.bcl2fastq_data[
runId
][lane]["samples"][sample]["perfectIndex"]
self.bcl2fastq_bysample[sample]["yieldQ30"] += self.bcl2fastq_data[
runId
][lane]["samples"][sample]["yieldQ30"]
self.bcl2fastq_bysample[sample]["qscore_sum"] += self.bcl2fastq_data[
runId
][lane]["samples"][sample]["qscore_sum"]
self.bcl2fastq_bysample[sample]["percent_Q30"] = (
float(self.bcl2fastq_bysample[sample]["yieldQ30"])
/ float(self.bcl2fastq_bysample[sample]["total_yield"])
) * 100.0
self.bcl2fastq_bysample[sample]["percent_perfectIndex"] = (
float(self.bcl2fastq_bysample[sample]["perfectIndex"])
/ float(self.bcl2fastq_bysample[sample]["total"])
) * 100.0
self.bcl2fastq_bysample[sample]["mean_qscore"] = float(
self.bcl2fastq_bysample[sample]["qscore_sum"]
) / float(self.bcl2fastq_bysample[sample]["total_yield"])
if sample != "undetermined":
if not sample in self.source_files:
self.source_files[sample] = []
self.source_files[sample].append(
self.bcl2fastq_data[runId][lane]["samples"][sample]["filename"]
)
|
def split_data_by_lane_and_sample(self):
for runId in self.bcl2fastq_data.keys():
for lane in self.bcl2fastq_data[runId].keys():
uniqLaneName = self.prepend_runid(runId, lane)
self.bcl2fastq_bylane[uniqLaneName] = {
"total": self.bcl2fastq_data[runId][lane]["total"],
"total_yield": self.bcl2fastq_data[runId][lane]["total_yield"],
"perfectIndex": self.bcl2fastq_data[runId][lane]["perfectIndex"],
"undetermined": self.bcl2fastq_data[runId][lane]["samples"][
"undetermined"
]["total"],
"yieldQ30": self.bcl2fastq_data[runId][lane]["yieldQ30"],
"qscore_sum": self.bcl2fastq_data[runId][lane]["qscore_sum"],
"percent_Q30": self.bcl2fastq_data[runId][lane]["percent_Q30"],
"percent_perfectIndex": self.bcl2fastq_data[runId][lane][
"percent_perfectIndex"
],
"mean_qscore": self.bcl2fastq_data[runId][lane]["mean_qscore"],
}
for sample in self.bcl2fastq_data[runId][lane]["samples"].keys():
if not sample in self.bcl2fastq_bysample:
self.bcl2fastq_bysample[sample] = {
"total": 0,
"total_yield": 0,
"perfectIndex": 0,
"yieldQ30": 0,
"qscore_sum": 0,
}
self.bcl2fastq_bysample[sample]["total"] += self.bcl2fastq_data[runId][
lane
]["samples"][sample]["total"]
self.bcl2fastq_bysample[sample]["total_yield"] += self.bcl2fastq_data[
runId
][lane]["samples"][sample]["total_yield"]
self.bcl2fastq_bysample[sample]["perfectIndex"] += self.bcl2fastq_data[
runId
][lane]["samples"][sample]["perfectIndex"]
self.bcl2fastq_bysample[sample]["yieldQ30"] += self.bcl2fastq_data[
runId
][lane]["samples"][sample]["yieldQ30"]
self.bcl2fastq_bysample[sample]["qscore_sum"] += self.bcl2fastq_data[
runId
][lane]["samples"][sample]["qscore_sum"]
self.bcl2fastq_bysample[sample]["percent_Q30"] = (
float(self.bcl2fastq_bysample[sample]["yieldQ30"])
/ float(self.bcl2fastq_bysample[sample]["total_yield"])
) * 100.0
self.bcl2fastq_bysample[sample]["percent_perfectIndex"] = (
float(self.bcl2fastq_bysample[sample]["perfectIndex"])
/ float(self.bcl2fastq_bysample[sample]["total"])
) * 100.0
self.bcl2fastq_bysample[sample]["mean_qscore"] = float(
self.bcl2fastq_bysample[sample]["qscore_sum"]
) / float(self.bcl2fastq_bysample[sample]["total_yield"])
if sample != "undetermined":
if not sample in self.source_files:
self.source_files[sample] = []
self.source_files[sample].append(
self.bcl2fastq_data[runId][lane]["samples"][sample]["filename"]
)
|
https://github.com/ewels/MultiQC/issues/595
|
[ERROR ] multiqc : Oops! The 'bcl2fastq' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
(if possible, include a log file that triggers the error)
============================================================
Module bcl2fastq raised an exception: Traceback (most recent call last):
File "/camp/stp/babs/working/software/binaries/Python-2.7.12-foss-2016b_packages/multiqc-1.2-2016b-Python-2.7.12/lib/python2.7/site-packages/multiqc-1.2-py2.7.egg/EGG-INFO/scripts/multiqc", line 411, in multiqc
output = mod()
File "/camp/stp/babs/working/software/binaries/Python-2.7.12-foss-2016b_packages/multiqc-1.2-2016b-Python-2.7.12/lib/python2.7/site-packages/multiqc-1.2-py2.7.egg/multiqc/modules/bcl2fastq/bcl2fastq.py", line 21, in __init__
self.parse_file_as_json(myfile)
File "/camp/stp/babs/working/software/binaries/Python-2.7.12-foss-2016b_packages/multiqc-1.2-2016b-Python-2.7.12/lib/python2.7/site-packages/multiqc-1.2-py2.7.egg/multiqc/modules/bcl2fastq/bcl2fastq.py", line 120, in parse_file_as_json
for indexMetric in demuxResult["IndexMetrics"]:
KeyError: 'IndexMetrics'
============================================================
|
KeyError
|
def fqscreen_simple_plot(self):
"""Makes a simple bar plot with summed alignment counts for
each species, stacked."""
# First, sum the different types of alignment counts
data = OrderedDict()
cats = OrderedDict()
for s_name in self.fq_screen_data:
data[s_name] = OrderedDict()
sum_alignments = 0
for org in self.fq_screen_data[s_name]:
if org == "total_reads":
continue
try:
data[s_name][org] = self.fq_screen_data[s_name][org]["counts"][
"one_hit_one_library"
]
except KeyError:
log.error(
"No counts found for '{}' ('{}'). Could be malformed or very old FastQ Screen results.".format(
org, s_name
)
)
continue
try:
data[s_name][org] += self.fq_screen_data[s_name][org]["counts"][
"multiple_hits_one_library"
]
except KeyError:
pass
sum_alignments += data[s_name][org]
if org not in cats and org != "No hits":
cats[org] = {"name": org}
# Calculate hits in multiple genomes
if "total_reads" in self.fq_screen_data[s_name]:
data[s_name]["Multiple Genomes"] = (
self.fq_screen_data[s_name]["total_reads"] - sum_alignments
)
# Strip empty dicts
data = [data[s_name] for s_name in data if len(data[s_name]) > 0]
pconfig = {
"id": "fastq_screen",
"title": "FastQ Screen",
"cpswitch_c_active": False,
}
cats["Multiple Genomes"] = {"name": "Multiple Genomes", "color": "#820000"}
cats["No hits"] = {"name": "No hits", "color": "#cccccc"}
return bargraph.plot(data, cats, pconfig)
|
def fqscreen_simple_plot(self):
"""Makes a simple bar plot with summed alignment counts for
each species, stacked."""
# First, sum the different types of alignment counts
data = OrderedDict()
cats = OrderedDict()
for s_name in self.fq_screen_data:
data[s_name] = OrderedDict()
sum_alignments = 0
for org in self.fq_screen_data[s_name]:
if org == "total_reads":
continue
data[s_name][org] = self.fq_screen_data[s_name][org]["counts"][
"one_hit_one_library"
]
try:
data[s_name][org] += self.fq_screen_data[s_name][org]["counts"][
"multiple_hits_one_library"
]
except KeyError:
pass
sum_alignments += data[s_name][org]
if org not in cats and org != "No hits":
cats[org] = {"name": org}
# Calculate hits in multiple genomes
data[s_name]["Multiple Genomes"] = (
self.fq_screen_data[s_name]["total_reads"] - sum_alignments
)
pconfig = {
"id": "fastq_screen",
"title": "FastQ Screen",
"cpswitch_c_active": False,
}
cats["Multiple Genomes"] = {"name": "Multiple Genomes", "color": "#820000"}
cats["No hits"] = {"name": "No hits", "color": "#cccccc"}
return bargraph.plot(data, cats, pconfig)
|
https://github.com/ewels/MultiQC/issues/537
|
Module fastq_screen raised an exception: Traceback (most recent call last):
File "/home/es249628/bin/mypyenv-py3/bin/multiqc", line 364, in multiqc
output = mod()
File "/home/es249628/bin/mypyenv-py3/lib/python3.4/site-packages/multiqc/modules/fastq_screen/fastq_screen.py", line 55, in __init__
self.add_section( plot = self.fqscreen_simple_plot() )
File "/home/es249628/bin/mypyenv-py3/lib/python3.4/site-packages/multiqc/modules/fastq_screen/fastq_screen.py", line 213, in fqscreen_simple_plot
data[s_name][org] = self.fq_screen_data[s_name][org]['counts']['one_hit_one_library']
KeyError: 'counts'
|
KeyError
|
def __init__(self, data, headers=None, pconfig=None):
"""Prepare data for use in a table or plot"""
if headers is None:
headers = []
if pconfig is None:
pconfig = {}
# Given one dataset - turn it into a list
if type(data) is not list:
data = [data]
if type(headers) is not list:
headers = [headers]
sectcols = [
"55,126,184",
"77,175,74",
"152,78,163",
"255,127,0",
"228,26,28",
"255,255,51",
"166,86,40",
"247,129,191",
"153,153,153",
]
shared_keys = defaultdict(lambda: dict())
# Go through each table section
for idx, d in enumerate(data):
# Get the header keys
try:
keys = headers[idx].keys()
assert len(keys) > 0
except (IndexError, AttributeError, AssertionError):
keys = list()
for samp in d.values():
for k in samp.keys():
if k not in keys:
keys.append(k)
try:
headers[idx]
except IndexError:
headers.append(list)
headers[idx] = OrderedDict()
for k in keys:
headers[idx][k] = {}
# Ensure that keys are strings, not numeric
keys = [str(k) for k in keys]
for k in list(headers[idx].keys()):
headers[idx][str(k)] = headers[idx].pop(k)
# Ensure that all sample names are strings as well
data[idx] = {str(k): v for k, v in data[idx].items()}
for s_name in data[idx].keys():
for k in list(data[idx][s_name].keys()):
data[idx][s_name][str(k)] = data[idx][s_name].pop(k)
# Check that we have some data in each column
empties = list()
for k in keys:
n = 0
for samp in d.values():
if k in samp:
n += 1
if n == 0:
empties.append(k)
for k in empties:
keys = [j for j in keys if j != k]
del headers[idx][k]
for k in keys:
# Unique id to avoid overwriting by other datasets
headers[idx][k]["rid"] = "{}_{}".format(
id(headers[idx]), re.sub(r"\W+", "_", k)
)
# Use defaults / data keys if headers not given
headers[idx][k]["namespace"] = headers[idx][k].get(
"namespace", pconfig.get("namespace", "")
)
headers[idx][k]["title"] = headers[idx][k].get("title", k)
headers[idx][k]["description"] = headers[idx][k].get(
"description", headers[idx][k]["title"]
)
headers[idx][k]["scale"] = headers[idx][k].get(
"scale", pconfig.get("scale", "GnBu")
)
headers[idx][k]["format"] = headers[idx][k].get(
"format", pconfig.get("format", "{:,.1f}")
)
headers[idx][k]["colour"] = headers[idx][k].get(
"colour", pconfig.get("colour", None)
)
headers[idx][k]["hidden"] = headers[idx][k].get(
"hidden", pconfig.get("hidden", None)
)
headers[idx][k]["max"] = headers[idx][k].get(
"max", pconfig.get("max", None)
)
headers[idx][k]["min"] = headers[idx][k].get(
"min", pconfig.get("min", None)
)
headers[idx][k]["ceiling"] = headers[idx][k].get(
"ceiling", pconfig.get("ceiling", None)
)
headers[idx][k]["floor"] = headers[idx][k].get(
"floor", pconfig.get("floor", None)
)
headers[idx][k]["minRange"] = headers[idx][k].get(
"minRange", pconfig.get("minRange", None)
)
headers[idx][k]["shared_key"] = headers[idx][k].get(
"shared_key", pconfig.get("shared_key", None)
)
headers[idx][k]["modify"] = headers[idx][k].get(
"modify", pconfig.get("modify", None)
)
headers[idx][k]["placement"] = float(headers[idx][k].get("placement", 1000))
if headers[idx][k]["colour"] is None:
cidx = idx
while cidx >= len(sectcols):
cidx -= len(sectcols)
headers[idx][k]["colour"] = sectcols[cidx]
# Overwrite hidden if set in user config
try:
# Config has True = visibile, False = Hidden. Here we're setting "hidden" which is inverse
headers[idx][k]["hidden"] = not config.table_columns_visible[
headers[idx][k]["namespace"]
][k]
except KeyError:
pass
# Also overwite placement if set in config
try:
headers[idx][k]["placement"] = float(
config.table_columns_placement[headers[idx][k]["namespace"]][k]
)
except (KeyError, ValueError):
pass
# Work out max and min value if not given
setdmax = False
setdmin = False
try:
headers[idx][k]["dmax"] = float(headers[idx][k]["max"])
except TypeError:
headers[idx][k]["dmax"] = 0
setdmax = True
try:
headers[idx][k]["dmin"] = float(headers[idx][k]["min"])
except TypeError:
headers[idx][k]["dmin"] = 0
setdmin = True
# Figure out the min / max if not supplied
if setdmax or setdmin:
for s_name, samp in data[idx].items():
try:
val = float(samp[k])
if callable(headers[idx][k]["modify"]):
val = float(headers[idx][k]["modify"](val))
if setdmax:
headers[idx][k]["dmax"] = max(headers[idx][k]["dmax"], val)
if setdmin:
headers[idx][k]["dmin"] = min(headers[idx][k]["dmin"], val)
except ValueError:
val = samp[k] # couldn't convert to float - keep as a string
except KeyError:
pass # missing data - skip
# Limit auto-generated scales with floor, ceiling and minRange.
if (
headers[idx][k]["ceiling"] is not None
and headers[idx][k]["max"] is None
):
headers[idx][k]["dmax"] = min(
headers[idx][k]["dmax"], float(headers[idx][k]["ceiling"])
)
if (
headers[idx][k]["floor"] is not None
and headers[idx][k]["min"] is None
):
headers[idx][k]["dmin"] = max(
headers[idx][k]["dmin"], float(headers[idx][k]["floor"])
)
if headers[idx][k]["minRange"] is not None:
drange = headers[idx][k]["dmax"] - headers[idx][k]["dmin"]
if drange < float(headers[idx][k]["minRange"]):
headers[idx][k]["dmax"] = headers[idx][k]["dmin"] + float(
headers[idx][k]["minRange"]
)
# Collect settings for shared keys
shared_keys = defaultdict(lambda: dict())
for idx, hs in enumerate(headers):
for k in hs.keys():
sk = headers[idx][k]["shared_key"]
if sk is not None:
shared_keys[sk]["dmax"] = max(
headers[idx][k]["dmax"],
shared_keys[sk].get("dmax", headers[idx][k]["dmax"]),
)
shared_keys[sk]["dmin"] = max(
headers[idx][k]["dmin"],
shared_keys[sk].get("dmin", headers[idx][k]["dmin"]),
)
# Overwrite shared key settings and at the same time assign to buckets for sorting
# Within each section of headers, sort explicitly by 'title' if the dict
# is not already ordered, so the final ordering is by:
# placement > section > explicit_ordering > title
# Of course, the user can shuffle these manually.
self.headers_in_order = defaultdict(list)
for idx, hs in enumerate(headers):
keys_in_section = hs.keys()
if type(hs) is not OrderedDict:
keys_in_section = sorted(
keys_in_section, key=lambda k: headers[idx][k]["title"]
)
for k in keys_in_section:
sk = headers[idx][k]["shared_key"]
if sk is not None:
headers[idx][k]["dmax"] = shared_keys[sk]["dmax"]
headers[idx][k]["dmin"] = shared_keys[sk]["dmin"]
self.headers_in_order[headers[idx][k]["placement"]].append((idx, k))
# Assign to class
self.data = data
self.headers = headers
self.pconfig = pconfig
|
def __init__(self, data, headers=None, pconfig=None):
"""Prepare data for use in a table or plot"""
if headers is None:
headers = []
if pconfig is None:
pconfig = {}
# Given one dataset - turn it into a list
if type(data) is not list:
data = [data]
if type(headers) is not list:
headers = [headers]
sectcols = [
"55,126,184",
"77,175,74",
"152,78,163",
"255,127,0",
"228,26,28",
"255,255,51",
"166,86,40",
"247,129,191",
"153,153,153",
]
shared_keys = defaultdict(lambda: dict())
# Go through each table section
for idx, d in enumerate(data):
# Get the header keys
try:
keys = headers[idx].keys()
assert len(keys) > 0
except (IndexError, AttributeError, AssertionError):
keys = list()
for samp in d.values():
for k in samp.keys():
if k not in keys:
keys.append(k)
try:
headers[idx]
except IndexError:
headers.append(list)
headers[idx] = OrderedDict()
for k in keys:
headers[idx][k] = {}
# Ensure that keys are strings, not numeric
keys = [str(k) for k in keys]
for k in list(headers[idx].keys()):
headers[idx][str(k)] = headers[idx].pop(k)
for s_name in data[idx].keys():
for k in list(data[idx][s_name].keys()):
data[idx][s_name][str(k)] = data[idx][s_name].pop(k)
# Check that we have some data in each column
empties = list()
for k in keys:
n = 0
for samp in d.values():
if k in samp:
n += 1
if n == 0:
empties.append(k)
for k in empties:
keys = [j for j in keys if j != k]
del headers[idx][k]
for k in keys:
# Unique id to avoid overwriting by other datasets
headers[idx][k]["rid"] = "{}_{}".format(
id(headers[idx]), re.sub(r"\W+", "_", k)
)
# Use defaults / data keys if headers not given
headers[idx][k]["namespace"] = headers[idx][k].get(
"namespace", pconfig.get("namespace", "")
)
headers[idx][k]["title"] = headers[idx][k].get("title", k)
headers[idx][k]["description"] = headers[idx][k].get(
"description", headers[idx][k]["title"]
)
headers[idx][k]["scale"] = headers[idx][k].get(
"scale", pconfig.get("scale", "GnBu")
)
headers[idx][k]["format"] = headers[idx][k].get(
"format", pconfig.get("format", "{:,.1f}")
)
headers[idx][k]["colour"] = headers[idx][k].get(
"colour", pconfig.get("colour", None)
)
headers[idx][k]["hidden"] = headers[idx][k].get(
"hidden", pconfig.get("hidden", None)
)
headers[idx][k]["max"] = headers[idx][k].get(
"max", pconfig.get("max", None)
)
headers[idx][k]["min"] = headers[idx][k].get(
"min", pconfig.get("min", None)
)
headers[idx][k]["ceiling"] = headers[idx][k].get(
"ceiling", pconfig.get("ceiling", None)
)
headers[idx][k]["floor"] = headers[idx][k].get(
"floor", pconfig.get("floor", None)
)
headers[idx][k]["minRange"] = headers[idx][k].get(
"minRange", pconfig.get("minRange", None)
)
headers[idx][k]["shared_key"] = headers[idx][k].get(
"shared_key", pconfig.get("shared_key", None)
)
headers[idx][k]["modify"] = headers[idx][k].get(
"modify", pconfig.get("modify", None)
)
headers[idx][k]["placement"] = float(headers[idx][k].get("placement", 1000))
if headers[idx][k]["colour"] is None:
cidx = idx
while cidx >= len(sectcols):
cidx -= len(sectcols)
headers[idx][k]["colour"] = sectcols[cidx]
# Overwrite hidden if set in user config
try:
# Config has True = visibile, False = Hidden. Here we're setting "hidden" which is inverse
headers[idx][k]["hidden"] = not config.table_columns_visible[
headers[idx][k]["namespace"]
][k]
except KeyError:
pass
# Also overwite placement if set in config
try:
headers[idx][k]["placement"] = float(
config.table_columns_placement[headers[idx][k]["namespace"]][k]
)
except (KeyError, ValueError):
pass
# Work out max and min value if not given
setdmax = False
setdmin = False
try:
headers[idx][k]["dmax"] = float(headers[idx][k]["max"])
except TypeError:
headers[idx][k]["dmax"] = 0
setdmax = True
try:
headers[idx][k]["dmin"] = float(headers[idx][k]["min"])
except TypeError:
headers[idx][k]["dmin"] = 0
setdmin = True
# Figure out the min / max if not supplied
if setdmax or setdmin:
for s_name, samp in data[idx].items():
try:
val = float(samp[k])
if callable(headers[idx][k]["modify"]):
val = float(headers[idx][k]["modify"](val))
if setdmax:
headers[idx][k]["dmax"] = max(headers[idx][k]["dmax"], val)
if setdmin:
headers[idx][k]["dmin"] = min(headers[idx][k]["dmin"], val)
except ValueError:
val = samp[k] # couldn't convert to float - keep as a string
except KeyError:
pass # missing data - skip
# Limit auto-generated scales with floor, ceiling and minRange.
if (
headers[idx][k]["ceiling"] is not None
and headers[idx][k]["max"] is None
):
headers[idx][k]["dmax"] = min(
headers[idx][k]["dmax"], float(headers[idx][k]["ceiling"])
)
if (
headers[idx][k]["floor"] is not None
and headers[idx][k]["min"] is None
):
headers[idx][k]["dmin"] = max(
headers[idx][k]["dmin"], float(headers[idx][k]["floor"])
)
if headers[idx][k]["minRange"] is not None:
drange = headers[idx][k]["dmax"] - headers[idx][k]["dmin"]
if drange < float(headers[idx][k]["minRange"]):
headers[idx][k]["dmax"] = headers[idx][k]["dmin"] + float(
headers[idx][k]["minRange"]
)
# Collect settings for shared keys
shared_keys = defaultdict(lambda: dict())
for idx, hs in enumerate(headers):
for k in hs.keys():
sk = headers[idx][k]["shared_key"]
if sk is not None:
shared_keys[sk]["dmax"] = max(
headers[idx][k]["dmax"],
shared_keys[sk].get("dmax", headers[idx][k]["dmax"]),
)
shared_keys[sk]["dmin"] = max(
headers[idx][k]["dmin"],
shared_keys[sk].get("dmin", headers[idx][k]["dmin"]),
)
# Overwrite shared key settings and at the same time assign to buckets for sorting
# Within each section of headers, sort explicitly by 'title' if the dict
# is not already ordered, so the final ordering is by:
# placement > section > explicit_ordering > title
# Of course, the user can shuffle these manually.
self.headers_in_order = defaultdict(list)
for idx, hs in enumerate(headers):
keys_in_section = hs.keys()
if type(hs) is not OrderedDict:
keys_in_section = sorted(
keys_in_section, key=lambda k: headers[idx][k]["title"]
)
for k in keys_in_section:
sk = headers[idx][k]["shared_key"]
if sk is not None:
headers[idx][k]["dmax"] = shared_keys[sk]["dmax"]
headers[idx][k]["dmin"] = shared_keys[sk]["dmin"]
self.headers_in_order[headers[idx][k]["placement"]].append((idx, k))
# Assign to class
self.data = data
self.headers = headers
self.pconfig = pconfig
|
https://github.com/ewels/MultiQC/issues/521
|
[INFO ] multiqc : This is MultiQC v1.2.dev0 (e86ba77)
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching '.'
Searching 3 files.. [####################################] 100%
Traceback (most recent call last):
File "/tank/home/s216121/miniconda3/bin/multiqc", line 6, in <module>
exec(compile(open(__file__).read(), __file__, 'exec'))
File "/tank/home/s216121/software/MultiQC/scripts/multiqc", line 669, in <module>
multiqc()
File "/tank/home/s216121/miniconda3/lib/python3.5/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/tank/home/s216121/miniconda3/lib/python3.5/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/tank/home/s216121/miniconda3/lib/python3.5/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/tank/home/s216121/miniconda3/lib/python3.5/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/tank/home/s216121/software/MultiQC/scripts/multiqc", line 459, in multiqc
report.general_stats_html = table.plot(report.general_stats_data, report.general_stats_headers, pconfig)
File "/tank/home/s216121/software/MultiQC/multiqc/plots/table.py", line 44, in plot
return make_table ( dt )
File "/tank/home/s216121/software/MultiQC/multiqc/plots/table.py", line 249, in make_table
t_row_keys = sorted(t_row_keys)
TypeError: unorderable types: int() < str()
|
TypeError
|
def write_data_file(data, fn, sort_cols=False, data_format=None):
"""Write a data file to the report directory. Will not do anything
if config.data_dir is not set.
:param: data - a 2D dict, first key sample name (row header),
second key field (column header).
:param: fn - Desired filename. Directory will be prepended automatically.
:param: sort_cols - Sort columns alphabetically
:param: data_format - Output format. Defaults to config.data_format (usually tsv)
:return: None"""
if config.data_dir is not None:
# Add relevant file extension to filename
if data_format is None:
data_format = config.data_format
fn = "{}.{}".format(fn, config.data_format_extensions[data_format])
# Save file
with io.open(os.path.join(config.data_dir, fn), "w", encoding="utf-8") as f:
if data_format == "json":
jsonstr = json.dumps(data, indent=4, ensure_ascii=False)
print(jsonstr.encode("utf-8", "ignore").decode("utf-8"), file=f)
elif data_format == "yaml":
yaml.dump(data, f, default_flow_style=False)
else:
# Default - tab separated output
# Get all headers
h = ["Sample"]
for sn in sorted(data.keys()):
for k in data[sn].keys():
if type(data[sn][k]) is not dict and k not in h:
h.append(str(k))
if sort_cols:
h = sorted(h)
# Get the rows
rows = ["\t".join(h)]
for sn in sorted(data.keys()):
# Make a list starting with the sample name, then each field in order of the header cols
l = [str(sn)] + [str(data[sn].get(k, "")) for k in h[1:]]
rows.append("\t".join(l))
body = "\n".join(rows)
print(body.encode("utf-8", "ignore").decode("utf-8"), file=f)
|
def write_data_file(data, fn, sort_cols=False, data_format=None):
"""Write a data file to the report directory. Will not do anything
if config.data_dir is not set.
:param: data - a 2D dict, first key sample name (row header),
second key field (column header).
:param: fn - Desired filename. Directory will be prepended automatically.
:param: sort_cols - Sort columns alphabetically
:param: data_format - Output format. Defaults to config.data_format (usually tsv)
:return: None"""
if config.data_dir is not None:
# Add relevant file extension to filename
if data_format is None:
data_format = config.data_format
fn = "{}.{}".format(fn, config.data_format_extensions[data_format])
# Save file
with io.open(os.path.join(config.data_dir, fn), "w", encoding="utf-8") as f:
if data_format == "json":
jsonstr = json.dumps(data, indent=4, ensure_ascii=False)
print(jsonstr.encode("utf-8", "ignore").decode("utf-8"), file=f)
elif data_format == "yaml":
yaml.dump(data, f, default_flow_style=False)
else:
# Default - tab separated output
# Get all headers
h = ["Sample"]
for sn in sorted(data.keys()):
for k in data[sn].keys():
if type(data[sn][k]) is not dict and k not in h:
h.append(str(k))
if sort_cols:
h = sorted(h)
# Get the rows
rows = ["\t".join(h)]
for sn in sorted(data.keys()):
# Make a list starting with the sample name, then each field in order of the header cols
l = [sn] + [str(data[sn].get(k, "")) for k in h[1:]]
rows.append("\t".join(l))
body = "\n".join(rows)
print(body.encode("utf-8", "ignore").decode("utf-8"), file=f)
|
https://github.com/ewels/MultiQC/issues/519
|
[INFO ] multiqc : This is MultiQC v1.2.dev0 (e86ba77)
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching '.'
Searching 3 files.. [####################################] 100%
[ERROR ] multiqc : Oops! The 'bcl2fastq' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
(if possible, include a log file that triggers the error)
============================================================
Module bcl2fastq raised an exception: Traceback (most recent call last):
File "/tank/home/s216121/software/MultiQC/scripts/multiqc", line 365, in multiqc
output = mod()
File "/tank/home/s216121/software/MultiQC/multiqc/modules/bcl2fastq/bcl2fastq.py", line 88, in __init__
self.write_data_file(data, 'multiqc_mymod')
File "/tank/home/s216121/software/MultiQC/multiqc/modules/base_module.py", line 267, in write_data_file
util_functions.write_data_file(data, fn, sort_cols, data_format)
File "/tank/home/s216121/software/MultiQC/multiqc/utils/util_functions.py", line 80, in write_data_file
rows.append( "\t".join(l) )
TypeError: sequence item 0: expected str instance, int found
============================================================
[WARNING] multiqc : No analysis results found. Cleaning up..
[INFO ] multiqc : MultiQC complete
|
TypeError
|
def parse_htseq_report(self, f):
"""Parse the HTSeq Count log file."""
keys = [
"__no_feature",
"__ambiguous",
"__too_low_aQual",
"__not_aligned",
"__alignment_not_unique",
]
parsed_data = dict()
assigned_counts = 0
for l in f["f"]:
s = l.split("\t")
if s[0] in keys:
parsed_data[s[0][2:]] = int(s[1])
else:
try:
assigned_counts += int(s[1])
except (ValueError, IndexError):
pass
if len(parsed_data) > 0:
parsed_data["assigned"] = assigned_counts
parsed_data["total_count"] = sum([v for v in parsed_data.values()])
parsed_data["percent_assigned"] = (
float(parsed_data["assigned"]) / float(parsed_data["total_count"])
) * 100.0
return parsed_data
return None
|
def parse_htseq_report(self, f):
"""Parse the HTSeq Count log file."""
keys = [
"__no_feature",
"__ambiguous",
"__too_low_aQual",
"__not_aligned",
"__alignment_not_unique",
]
parsed_data = dict()
assigned_counts = 0
for l in f["f"]:
s = l.split("\t")
if s[0] in keys:
parsed_data[s[0][2:]] = int(s[1])
else:
assigned_counts += int(s[1])
if len(parsed_data) > 0:
parsed_data["assigned"] = assigned_counts
parsed_data["total_count"] = sum([v for v in parsed_data.values()])
parsed_data["percent_assigned"] = (
float(parsed_data["assigned"]) / float(parsed_data["total_count"])
) * 100.0
return parsed_data
return None
|
https://github.com/ewels/MultiQC/issues/391
|
[INFO ] multiqc : This is MultiQC v1.0.dev0
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching '/home/mlotshwa/RNA_Seq_TB_project/Run2'
[ERROR ] multiqc : Oops! The 'htseq' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
(if possible, include a log file that triggers the error)
============================================================
Module htseq raised an exception: Traceback (most recent call last):
File "/home/mlotshwa/miniconda2/envs/snowflakes/lib/python2.7/site-packages/multiqc-1.0.dev0-py2.7.egg/EGG-INFO/scripts/multiqc", line 346, in multiqc
output = mod()
File "/home/mlotshwa/miniconda2/envs/snowflakes/lib/python2.7/site-packages/multiqc-1.0.dev0-py2.7.egg/multiqc/modules/htseq/htseq.py", line 31, in __init__
parsed_data = self.parse_htseq_report(f)
File "/home/mlotshwa/miniconda2/envs/snowflakes/lib/python2.7/site-packages/multiqc-1.0.dev0-py2.7.egg/multiqc/modules/htseq/htseq.py", line 62, in parse_htseq_report
assigned_counts += int(s[1])
IndexError: list index out of range
============================================================
[INFO ] samtools : Found 1 rmdup reports
[ERROR ] multiqc : Oops! The 'slamdunk' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
(if possible, include a log file that triggers the error)
============================================================
Module slamdunk raised an exception: Traceback (most recent call last):
File "/home/mlotshwa/miniconda2/envs/snowflakes/lib/python2.7/site-packages/multiqc-1.0.dev0-py2.7.egg/EGG-INFO/scripts/multiqc", line 346, in multiqc
output = mod()
File "/home/mlotshwa/miniconda2/envs/snowflakes/lib/python2.7/site-packages/multiqc-1.0.dev0-py2.7.egg/multiqc/modules/slamdunk/slamdunk.py", line 42, in __init__
self.parseSummary(f)
File "/home/mlotshwa/miniconda2/envs/snowflakes/lib/python2.7/site-packages/multiqc-1.0.dev0-py2.7.egg/multiqc/modules/slamdunk/slamdunk.py", line 295, in parseSummary
self.slamdunk_data[s_name]['sequenced'] = int(fields[4])
IndexError: list index out of range
============================================================
[INFO ] fastqc : Found 112 reports
[WARNING] fastqc : Multiple FastQC Theoretical GC Content files found, now using fastqc_theoretical_gc_hg38_genome.txt
[WARNING] fastqc : Multiple FastQC Theoretical GC Content files found, now using fastqc_theoretical_gc_hg38_txome.txt
[WARNING] fastqc : Multiple FastQC Theoretical GC Content files found, now using fastqc_theoretical_gc_mm10_txome.txt
[INFO ] multiqc : Report : multiqc_report.html
[INFO ] multiqc : Data : multiqc_data
[INFO ] multiqc : MultiQC complete
|
IndexError
|
def plot(data, pconfig={}):
"""Plot a line graph with X,Y data.
:param data: 2D dict, first keys as sample names, then x:y data pairs
:param pconfig: optional dict with config key:value pairs. See CONTRIBUTING.md
:return: HTML and JS, ready to be inserted into the page
"""
# Given one dataset - turn it into a list
if type(data) is not list:
data = [data]
# Smooth dataset if requested in config
if pconfig.get("smooth_points", None) is not None:
sumcounts = pconfig.get("smooth_points_sumcounts", True)
for i, d in enumerate(data):
sumc = sumcounts
if type(sumcounts) is list:
sumc = sumcounts[i]
data[i] = smooth_line_data(d, pconfig["smooth_points"], sumc)
# Generate the data dict structure expected by HighCharts series
plotdata = list()
for d in data:
thisplotdata = list()
for s in sorted(d.keys()):
pairs = list()
maxval = 0
if "categories" in pconfig:
pconfig["categories"] = list()
for k in d[s].keys():
pconfig["categories"].append(k)
pairs.append(d[s][k])
maxval = max(maxval, d[s][k])
else:
for k in sorted(d[s].keys()):
pairs.append([k, d[s][k]])
try:
maxval = max(maxval, d[s][k])
except TypeError:
pass
if maxval > 0 or pconfig.get("hide_empty") is not True:
this_series = {"name": s, "data": pairs}
try:
this_series["color"] = pconfig["colors"][s]
except:
pass
thisplotdata.append(this_series)
plotdata.append(thisplotdata)
# Add on annotation data series
try:
if pconfig.get("extra_series"):
extra_series = pconfig["extra_series"]
if type(pconfig["extra_series"]) == dict:
extra_series = [[pconfig["extra_series"]]]
elif (
type(pconfig["extra_series"]) == list
and type(pconfig["extra_series"][0]) == dict
):
extra_series = [pconfig["extra_series"]]
for i, es in enumerate(extra_series):
for s in es:
plotdata[i].append(s)
except (KeyError, IndexError):
pass
# Make a plot - template custom, or interactive or flat
try:
return get_template_mod().linegraph(plotdata, pconfig)
except (AttributeError, TypeError):
if config.plots_force_flat or (
not config.plots_force_interactive
and len(plotdata[0]) > config.plots_flat_numseries
):
try:
return matplotlib_linegraph(plotdata, pconfig)
except:
logger.error(
"############### Error making MatPlotLib figure! Falling back to HighCharts."
)
return highcharts_linegraph(plotdata, pconfig)
else:
# Use MatPlotLib to generate static plots if requested
if config.export_plots:
matplotlib_linegraph(plotdata, pconfig)
# Return HTML for HighCharts dynamic plot
return highcharts_linegraph(plotdata, pconfig)
|
def plot(data, pconfig={}):
"""Plot a line graph with X,Y data.
:param data: 2D dict, first keys as sample names, then x:y data pairs
:param pconfig: optional dict with config key:value pairs. See CONTRIBUTING.md
:return: HTML and JS, ready to be inserted into the page
"""
# Given one dataset - turn it into a list
if type(data) is not list:
data = [data]
# Smooth dataset if requested in config
if pconfig.get("smooth_points", None) is not None:
sumcounts = pconfig.get("smooth_points_sumcounts", True)
for i, d in enumerate(data):
sumc = sumcounts
if type(sumcounts) is list:
sumc = sumcounts[i]
data[i] = smooth_line_data(d, pconfig["smooth_points"], sumc)
# Generate the data dict structure expected by HighCharts series
plotdata = list()
for d in data:
thisplotdata = list()
for s in sorted(d.keys()):
pairs = list()
maxval = 0
if "categories" in pconfig:
pconfig["categories"] = list()
for k in d[s].keys():
pconfig["categories"].append(k)
pairs.append(d[s][k])
maxval = max(maxval, d[s][k])
else:
for k in sorted(d[s].keys()):
pairs.append([k, d[s][k]])
try:
maxval = max(maxval, d[s][k])
except TypeError:
pass
if maxval > 0 or pconfig.get("hide_empty") is not True:
this_series = {"name": s, "data": pairs}
try:
this_series["color"] = pconfig["colors"][s]
except:
pass
thisplotdata.append(this_series)
plotdata.append(thisplotdata)
# Add on annotation data series
try:
if pconfig.get("extra_series"):
extra_series = pconfig["extra_series"]
if type(pconfig["extra_series"]) == dict:
extra_series = [[pconfig["extra_series"]]]
elif (
type(pconfig["extra_series"]) == list
and type(pconfig["extra_series"][0]) == dict
):
extra_series = [pconfig["extra_series"]]
for i, es in enumerate(extra_series):
for s in es:
plotdata[i].append(s)
except KeyError:
pass
# Make a plot - template custom, or interactive or flat
try:
return get_template_mod().linegraph(plotdata, pconfig)
except (AttributeError, TypeError):
if config.plots_force_flat or (
not config.plots_force_interactive
and len(plotdata[0]) > config.plots_flat_numseries
):
try:
return matplotlib_linegraph(plotdata, pconfig)
except:
logger.error(
"############### Error making MatPlotLib figure! Falling back to HighCharts."
)
return highcharts_linegraph(plotdata, pconfig)
else:
# Use MatPlotLib to generate static plots if requested
if config.export_plots:
matplotlib_linegraph(plotdata, pconfig)
# Return HTML for HighCharts dynamic plot
return highcharts_linegraph(plotdata, pconfig)
|
https://github.com/ewels/MultiQC/issues/392
|
[INFO ] multiqc : This is MultiQC v0.9
[INFO ] multiqc : Template : default
[INFO ] multiqc : Searching '.'
[ERROR ] multiqc : Oops! The 'qualimap' MultiQC module broke...
Please copy the following traceback and report it at https://github.com/ewels/MultiQC/issues
(if possible, include a log file that triggers the error)
============================================================
Module qualimap raised an exception: Traceback (most recent call last):
File "/Users/tthurman/miniconda3/lib/python3.5/site-packages/multiqc-0.9-py3.5.egg/EGG-INFO/scripts/multiqc", line 344, in multiqc
output = mod()
File "/Users/tthurman/miniconda3/lib/python3.5/site-packages/multiqc-0.9-py3.5.egg/multiqc/modules/qualimap/qualimap.py", line 40, in __init__
n['BamQC'] = QM_BamQC.parse_reports(self)
File "/Users/tthurman/miniconda3/lib/python3.5/site-packages/multiqc-0.9-py3.5.egg/multiqc/modules/qualimap/QM_BamQC.py", line 54, in parse_reports
report_sections(self)
File "/Users/tthurman/miniconda3/lib/python3.5/site-packages/multiqc-0.9-py3.5.egg/multiqc/modules/qualimap/QM_BamQC.py", line 324, in report_sections
'extra_series': extra_series,
File "/Users/tthurman/miniconda3/lib/python3.5/site-packages/multiqc-0.9-py3.5.egg/multiqc/plots/linegraph.py", line 89, in plot
elif type(pconfig['extra_series']) == list and type(pconfig['extra_series'][0]) == dict:
IndexError: list index out of range
============================================================
[WARNING] multiqc : No analysis results found. Cleaning up..
[INFO ] multiqc : MultiQC complete
|
IndexError
|
def get_stack(self, name_or_stack_id):
all_stacks = dict(self.deleted_stacks, **self.stacks)
if name_or_stack_id in all_stacks:
# Lookup by stack id - deleted stacks incldued
return all_stacks[name_or_stack_id]
else:
# Lookup by stack name - undeleted stacks only
for stack in self.stacks.values():
if stack.name == name_or_stack_id:
return stack
raise ValidationError(name_or_stack_id)
|
def get_stack(self, name_or_stack_id):
all_stacks = dict(self.deleted_stacks, **self.stacks)
if name_or_stack_id in all_stacks:
# Lookup by stack id - deleted stacks incldued
return all_stacks[name_or_stack_id]
else:
# Lookup by stack name - undeleted stacks only
for stack in self.stacks.values():
if stack.name == name_or_stack_id:
return stack
|
https://github.com/spulec/moto/issues/3558
|
Traceback (most recent call last):
File "/home/user/git/modamod/inspiring-murdock/.venv/bin/inv", line 33, in <module>
sys.exit(load_entry_point('invoke==1.4.1', 'console_scripts', 'inv')())
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/invoke-1.4.1-py3.8.egg/invoke/program.py", line 384, in run
self.execute()
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/invoke-1.4.1-py3.8.egg/invoke/program.py", line 566, in execute
executor.execute(*self.tasks)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/invoke-1.4.1-py3.8.egg/invoke/executor.py", line 129, in execute
result = call.task(*args, **call.kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/invoke-1.4.1-py3.8.egg/invoke/tasks.py", line 127, in __call__
result = self.body(*args, **kwargs)
File "/home/user/git/modamod/inspiring-murdock/tasks.py", line 69, in wait
wait4cf(session(), 'test', 1, 10)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/core/models.py", line 102, in wrapper
result = func(*args, **kwargs)
File "/home/user/git/modamod/inspiring-murdock/inspiring_murdock/aws/cloudformation.py", line 86, in wait4cf
events = get_cf_events(session, stack_name, logger=logger)
File "/home/user/git/modamod/inspiring-murdock/inspiring_murdock/aws/cloudformation.py", line 76, in get_cf_events
events = itertools.chain(*map(lambda x: x.get("StackEvents"), pages)) if pages else iter(())
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/paginate.py", line 255, in __iter__
response = self._make_request(current_kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/paginate.py", line 332, in _make_request
return self._method(**current_kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/client.py", line 662, in _make_api_call
http, parsed_response = self._make_request(
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/client.py", line 682, in _make_request
return self._endpoint.make_request(operation_model, request_dict)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/endpoint.py", line 102, in make_request
return self._send_request(request_dict, operation_model)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/endpoint.py", line 136, in _send_request
while self._needs_retry(attempts, operation_model, request_dict,
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/endpoint.py", line 253, in _needs_retry
responses = self._event_emitter.emit(
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 356, in emit
return self._emitter.emit(aliased_event_name, **kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 228, in emit
return self._emit(event_name, kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 211, in _emit
response = handler(**kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 183, in __call__
if self._checker(attempts, response, caught_exception):
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 250, in __call__
should_retry = self._should_retry(attempt_number, response,
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 269, in _should_retry
return self._checker(attempt_number, response, caught_exception)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 316, in __call__
checker_response = checker(attempt_number, response,
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 222, in __call__
return self._check_caught_exception(
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 359, in _check_caught_exception
raise caught_exception
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/endpoint.py", line 197, in _do_get_response
responses = self._event_emitter.emit(event_name, request=request)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 356, in emit
return self._emitter.emit(aliased_event_name, **kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 228, in emit
return self._emit(event_name, kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 211, in _emit
response = handler(**kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/core/models.py", line 341, in __call__
status, headers, body = response_callback(
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/core/responses.py", line 202, in dispatch
return cls()._dispatch(*args, **kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/core/responses.py", line 312, in _dispatch
return self.call_action()
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/core/responses.py", line 397, in call_action
response = method()
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/cloudformation/responses.py", line 254, in describe_stack_events
return template.render(stack=stack)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/Jinja2-3.0.0a1-py3.8.egg/jinja2/environment.py", line 1045, in render
self.environment.handle_exception()
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/Jinja2-3.0.0a1-py3.8.egg/jinja2/environment.py", line 787, in handle_exception
raise rewrite_traceback_stack(source=source)
File "<template>", line 1, in top-level template code
jinja2.exceptions.UndefinedError: 'None' has no attribute 'events'
|
jinja2.exceptions.UndefinedError
|
def list_stack_resources(self, stack_name_or_id):
stack = self.get_stack(stack_name_or_id)
return stack.stack_resources
|
def list_stack_resources(self, stack_name_or_id):
stack = self.get_stack(stack_name_or_id)
if stack is None:
return None
return stack.stack_resources
|
https://github.com/spulec/moto/issues/3558
|
Traceback (most recent call last):
File "/home/user/git/modamod/inspiring-murdock/.venv/bin/inv", line 33, in <module>
sys.exit(load_entry_point('invoke==1.4.1', 'console_scripts', 'inv')())
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/invoke-1.4.1-py3.8.egg/invoke/program.py", line 384, in run
self.execute()
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/invoke-1.4.1-py3.8.egg/invoke/program.py", line 566, in execute
executor.execute(*self.tasks)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/invoke-1.4.1-py3.8.egg/invoke/executor.py", line 129, in execute
result = call.task(*args, **call.kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/invoke-1.4.1-py3.8.egg/invoke/tasks.py", line 127, in __call__
result = self.body(*args, **kwargs)
File "/home/user/git/modamod/inspiring-murdock/tasks.py", line 69, in wait
wait4cf(session(), 'test', 1, 10)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/core/models.py", line 102, in wrapper
result = func(*args, **kwargs)
File "/home/user/git/modamod/inspiring-murdock/inspiring_murdock/aws/cloudformation.py", line 86, in wait4cf
events = get_cf_events(session, stack_name, logger=logger)
File "/home/user/git/modamod/inspiring-murdock/inspiring_murdock/aws/cloudformation.py", line 76, in get_cf_events
events = itertools.chain(*map(lambda x: x.get("StackEvents"), pages)) if pages else iter(())
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/paginate.py", line 255, in __iter__
response = self._make_request(current_kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/paginate.py", line 332, in _make_request
return self._method(**current_kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/client.py", line 662, in _make_api_call
http, parsed_response = self._make_request(
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/client.py", line 682, in _make_request
return self._endpoint.make_request(operation_model, request_dict)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/endpoint.py", line 102, in make_request
return self._send_request(request_dict, operation_model)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/endpoint.py", line 136, in _send_request
while self._needs_retry(attempts, operation_model, request_dict,
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/endpoint.py", line 253, in _needs_retry
responses = self._event_emitter.emit(
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 356, in emit
return self._emitter.emit(aliased_event_name, **kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 228, in emit
return self._emit(event_name, kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 211, in _emit
response = handler(**kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 183, in __call__
if self._checker(attempts, response, caught_exception):
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 250, in __call__
should_retry = self._should_retry(attempt_number, response,
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 269, in _should_retry
return self._checker(attempt_number, response, caught_exception)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 316, in __call__
checker_response = checker(attempt_number, response,
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 222, in __call__
return self._check_caught_exception(
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 359, in _check_caught_exception
raise caught_exception
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/endpoint.py", line 197, in _do_get_response
responses = self._event_emitter.emit(event_name, request=request)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 356, in emit
return self._emitter.emit(aliased_event_name, **kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 228, in emit
return self._emit(event_name, kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 211, in _emit
response = handler(**kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/core/models.py", line 341, in __call__
status, headers, body = response_callback(
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/core/responses.py", line 202, in dispatch
return cls()._dispatch(*args, **kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/core/responses.py", line 312, in _dispatch
return self.call_action()
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/core/responses.py", line 397, in call_action
response = method()
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/cloudformation/responses.py", line 254, in describe_stack_events
return template.render(stack=stack)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/Jinja2-3.0.0a1-py3.8.egg/jinja2/environment.py", line 1045, in render
self.environment.handle_exception()
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/Jinja2-3.0.0a1-py3.8.egg/jinja2/environment.py", line 787, in handle_exception
raise rewrite_traceback_stack(source=source)
File "<template>", line 1, in top-level template code
jinja2.exceptions.UndefinedError: 'None' has no attribute 'events'
|
jinja2.exceptions.UndefinedError
|
def describe_stack_resource(self):
stack_name = self._get_param("StackName")
stack = self.cloudformation_backend.get_stack(stack_name)
logical_resource_id = self._get_param("LogicalResourceId")
for stack_resource in stack.stack_resources:
if stack_resource.logical_resource_id == logical_resource_id:
resource = stack_resource
break
template = self.response_template(DESCRIBE_STACK_RESOURCE_RESPONSE_TEMPLATE)
return template.render(stack=stack, resource=resource)
|
def describe_stack_resource(self):
stack_name = self._get_param("StackName")
stack = self.cloudformation_backend.get_stack(stack_name)
logical_resource_id = self._get_param("LogicalResourceId")
for stack_resource in stack.stack_resources:
if stack_resource.logical_resource_id == logical_resource_id:
resource = stack_resource
break
else:
raise ValidationError(logical_resource_id)
template = self.response_template(DESCRIBE_STACK_RESOURCE_RESPONSE_TEMPLATE)
return template.render(stack=stack, resource=resource)
|
https://github.com/spulec/moto/issues/3558
|
Traceback (most recent call last):
File "/home/user/git/modamod/inspiring-murdock/.venv/bin/inv", line 33, in <module>
sys.exit(load_entry_point('invoke==1.4.1', 'console_scripts', 'inv')())
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/invoke-1.4.1-py3.8.egg/invoke/program.py", line 384, in run
self.execute()
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/invoke-1.4.1-py3.8.egg/invoke/program.py", line 566, in execute
executor.execute(*self.tasks)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/invoke-1.4.1-py3.8.egg/invoke/executor.py", line 129, in execute
result = call.task(*args, **call.kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/invoke-1.4.1-py3.8.egg/invoke/tasks.py", line 127, in __call__
result = self.body(*args, **kwargs)
File "/home/user/git/modamod/inspiring-murdock/tasks.py", line 69, in wait
wait4cf(session(), 'test', 1, 10)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/core/models.py", line 102, in wrapper
result = func(*args, **kwargs)
File "/home/user/git/modamod/inspiring-murdock/inspiring_murdock/aws/cloudformation.py", line 86, in wait4cf
events = get_cf_events(session, stack_name, logger=logger)
File "/home/user/git/modamod/inspiring-murdock/inspiring_murdock/aws/cloudformation.py", line 76, in get_cf_events
events = itertools.chain(*map(lambda x: x.get("StackEvents"), pages)) if pages else iter(())
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/paginate.py", line 255, in __iter__
response = self._make_request(current_kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/paginate.py", line 332, in _make_request
return self._method(**current_kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/client.py", line 662, in _make_api_call
http, parsed_response = self._make_request(
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/client.py", line 682, in _make_request
return self._endpoint.make_request(operation_model, request_dict)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/endpoint.py", line 102, in make_request
return self._send_request(request_dict, operation_model)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/endpoint.py", line 136, in _send_request
while self._needs_retry(attempts, operation_model, request_dict,
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/endpoint.py", line 253, in _needs_retry
responses = self._event_emitter.emit(
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 356, in emit
return self._emitter.emit(aliased_event_name, **kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 228, in emit
return self._emit(event_name, kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 211, in _emit
response = handler(**kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 183, in __call__
if self._checker(attempts, response, caught_exception):
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 250, in __call__
should_retry = self._should_retry(attempt_number, response,
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 269, in _should_retry
return self._checker(attempt_number, response, caught_exception)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 316, in __call__
checker_response = checker(attempt_number, response,
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 222, in __call__
return self._check_caught_exception(
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 359, in _check_caught_exception
raise caught_exception
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/endpoint.py", line 197, in _do_get_response
responses = self._event_emitter.emit(event_name, request=request)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 356, in emit
return self._emitter.emit(aliased_event_name, **kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 228, in emit
return self._emit(event_name, kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 211, in _emit
response = handler(**kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/core/models.py", line 341, in __call__
status, headers, body = response_callback(
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/core/responses.py", line 202, in dispatch
return cls()._dispatch(*args, **kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/core/responses.py", line 312, in _dispatch
return self.call_action()
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/core/responses.py", line 397, in call_action
response = method()
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/cloudformation/responses.py", line 254, in describe_stack_events
return template.render(stack=stack)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/Jinja2-3.0.0a1-py3.8.egg/jinja2/environment.py", line 1045, in render
self.environment.handle_exception()
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/Jinja2-3.0.0a1-py3.8.egg/jinja2/environment.py", line 787, in handle_exception
raise rewrite_traceback_stack(source=source)
File "<template>", line 1, in top-level template code
jinja2.exceptions.UndefinedError: 'None' has no attribute 'events'
|
jinja2.exceptions.UndefinedError
|
def list_stack_resources(self):
stack_name_or_id = self._get_param("StackName")
resources = self.cloudformation_backend.list_stack_resources(stack_name_or_id)
template = self.response_template(LIST_STACKS_RESOURCES_RESPONSE)
return template.render(resources=resources)
|
def list_stack_resources(self):
stack_name_or_id = self._get_param("StackName")
resources = self.cloudformation_backend.list_stack_resources(stack_name_or_id)
if resources is None:
raise ValidationError(stack_name_or_id)
template = self.response_template(LIST_STACKS_RESOURCES_RESPONSE)
return template.render(resources=resources)
|
https://github.com/spulec/moto/issues/3558
|
Traceback (most recent call last):
File "/home/user/git/modamod/inspiring-murdock/.venv/bin/inv", line 33, in <module>
sys.exit(load_entry_point('invoke==1.4.1', 'console_scripts', 'inv')())
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/invoke-1.4.1-py3.8.egg/invoke/program.py", line 384, in run
self.execute()
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/invoke-1.4.1-py3.8.egg/invoke/program.py", line 566, in execute
executor.execute(*self.tasks)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/invoke-1.4.1-py3.8.egg/invoke/executor.py", line 129, in execute
result = call.task(*args, **call.kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/invoke-1.4.1-py3.8.egg/invoke/tasks.py", line 127, in __call__
result = self.body(*args, **kwargs)
File "/home/user/git/modamod/inspiring-murdock/tasks.py", line 69, in wait
wait4cf(session(), 'test', 1, 10)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/core/models.py", line 102, in wrapper
result = func(*args, **kwargs)
File "/home/user/git/modamod/inspiring-murdock/inspiring_murdock/aws/cloudformation.py", line 86, in wait4cf
events = get_cf_events(session, stack_name, logger=logger)
File "/home/user/git/modamod/inspiring-murdock/inspiring_murdock/aws/cloudformation.py", line 76, in get_cf_events
events = itertools.chain(*map(lambda x: x.get("StackEvents"), pages)) if pages else iter(())
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/paginate.py", line 255, in __iter__
response = self._make_request(current_kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/paginate.py", line 332, in _make_request
return self._method(**current_kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/client.py", line 662, in _make_api_call
http, parsed_response = self._make_request(
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/client.py", line 682, in _make_request
return self._endpoint.make_request(operation_model, request_dict)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/endpoint.py", line 102, in make_request
return self._send_request(request_dict, operation_model)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/endpoint.py", line 136, in _send_request
while self._needs_retry(attempts, operation_model, request_dict,
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/endpoint.py", line 253, in _needs_retry
responses = self._event_emitter.emit(
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 356, in emit
return self._emitter.emit(aliased_event_name, **kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 228, in emit
return self._emit(event_name, kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 211, in _emit
response = handler(**kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 183, in __call__
if self._checker(attempts, response, caught_exception):
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 250, in __call__
should_retry = self._should_retry(attempt_number, response,
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 269, in _should_retry
return self._checker(attempt_number, response, caught_exception)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 316, in __call__
checker_response = checker(attempt_number, response,
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 222, in __call__
return self._check_caught_exception(
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/retryhandler.py", line 359, in _check_caught_exception
raise caught_exception
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/endpoint.py", line 197, in _do_get_response
responses = self._event_emitter.emit(event_name, request=request)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 356, in emit
return self._emitter.emit(aliased_event_name, **kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 228, in emit
return self._emit(event_name, kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/botocore-1.19.40-py3.8.egg/botocore/hooks.py", line 211, in _emit
response = handler(**kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/core/models.py", line 341, in __call__
status, headers, body = response_callback(
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/core/responses.py", line 202, in dispatch
return cls()._dispatch(*args, **kwargs)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/core/responses.py", line 312, in _dispatch
return self.call_action()
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/core/responses.py", line 397, in call_action
response = method()
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/moto/cloudformation/responses.py", line 254, in describe_stack_events
return template.render(stack=stack)
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/Jinja2-3.0.0a1-py3.8.egg/jinja2/environment.py", line 1045, in render
self.environment.handle_exception()
File "/home/user/git/modamod/inspiring-murdock/.venv/lib/python3.8/site-packages/Jinja2-3.0.0a1-py3.8.egg/jinja2/environment.py", line 787, in handle_exception
raise rewrite_traceback_stack(source=source)
File "<template>", line 1, in top-level template code
jinja2.exceptions.UndefinedError: 'None' has no attribute 'events'
|
jinja2.exceptions.UndefinedError
|
def run(self):
"""
Run the container.
Logic is as follows:
Generate container info (eventually from task definition)
Start container
Loop whilst not asked to stop and the container is running.
Get all logs from container between the last time I checked and now.
Convert logs into cloudwatch format
Put logs into cloudwatch
:return:
"""
try:
self.job_state = "PENDING"
image = self.job_definition.container_properties.get("image", "alpine:latest")
privileged = self.job_definition.container_properties.get("privileged", False)
cmd = self._get_container_property(
"command",
'/bin/sh -c "for a in `seq 1 10`; do echo Hello World; sleep 1; done"',
)
environment = {
e["name"]: e["value"]
for e in self._get_container_property("environment", [])
}
volumes = {
v["name"]: v["host"] for v in self._get_container_property("volumes", [])
}
mounts = [
docker.types.Mount(
m["containerPath"],
volumes[m["sourceVolume"]]["sourcePath"],
type="bind",
read_only=m["readOnly"],
)
for m in self._get_container_property("mountPoints", [])
]
name = "{0}-{1}".format(self.job_name, self.job_id)
self.job_state = "RUNNABLE"
# TODO setup ecs container instance
self.job_started_at = datetime.datetime.now()
self.job_state = "STARTING"
log_config = docker.types.LogConfig(type=docker.types.LogConfig.types.JSON)
container = self.docker_client.containers.run(
image,
cmd,
detach=True,
name=name,
log_config=log_config,
environment=environment,
mounts=mounts,
privileged=privileged,
)
self.job_state = "RUNNING"
try:
container.reload()
while container.status == "running" and not self.stop:
container.reload()
# Container should be stopped by this point... unless asked to stop
if container.status == "running":
container.kill()
# Log collection
logs_stdout = []
logs_stderr = []
logs_stderr.extend(
container.logs(
stdout=False,
stderr=True,
timestamps=True,
since=datetime2int(self.job_started_at),
)
.decode()
.split("\n")
)
logs_stdout.extend(
container.logs(
stdout=True,
stderr=False,
timestamps=True,
since=datetime2int(self.job_started_at),
)
.decode()
.split("\n")
)
# Process logs
logs_stdout = [x for x in logs_stdout if len(x) > 0]
logs_stderr = [x for x in logs_stderr if len(x) > 0]
logs = []
for line in logs_stdout + logs_stderr:
date, line = line.split(" ", 1)
date = dateutil.parser.parse(date)
# TODO: Replace with int(date.timestamp()) once we yeet Python2 out of the window
date = int(
(time.mktime(date.timetuple()) + date.microsecond / 1000000.0)
)
logs.append({"timestamp": date, "message": line.strip()})
# Send to cloudwatch
log_group = "/aws/batch/job"
stream_name = "{0}/default/{1}".format(
self.job_definition.name, self.job_id
)
self.log_stream_name = stream_name
self._log_backend.ensure_log_group(log_group, None)
self._log_backend.create_log_stream(log_group, stream_name)
self._log_backend.put_log_events(log_group, stream_name, logs, None)
self.job_state = "SUCCEEDED" if not self.stop else "FAILED"
except Exception as err:
logger.error(
"Failed to run AWS Batch container {0}. Error {1}".format(
self.name, err
)
)
self.job_state = "FAILED"
container.kill()
finally:
container.remove()
except Exception as err:
logger.error(
"Failed to run AWS Batch container {0}. Error {1}".format(self.name, err)
)
self.job_state = "FAILED"
self.job_stopped = True
self.job_stopped_at = datetime.datetime.now()
|
def run(self):
"""
Run the container.
Logic is as follows:
Generate container info (eventually from task definition)
Start container
Loop whilst not asked to stop and the container is running.
Get all logs from container between the last time I checked and now.
Convert logs into cloudwatch format
Put logs into cloudwatch
:return:
"""
try:
self.job_state = "PENDING"
time.sleep(1)
image = self.job_definition.container_properties.get("image", "alpine:latest")
privileged = self.job_definition.container_properties.get("privileged", False)
cmd = self._get_container_property(
"command",
'/bin/sh -c "for a in `seq 1 10`; do echo Hello World; sleep 1; done"',
)
environment = {
e["name"]: e["value"]
for e in self._get_container_property("environment", [])
}
volumes = {
v["name"]: v["host"] for v in self._get_container_property("volumes", [])
}
mounts = [
docker.types.Mount(
m["containerPath"],
volumes[m["sourceVolume"]]["sourcePath"],
type="bind",
read_only=m["readOnly"],
)
for m in self._get_container_property("mountPoints", [])
]
name = "{0}-{1}".format(self.job_name, self.job_id)
self.job_state = "RUNNABLE"
# TODO setup ecs container instance
time.sleep(1)
self.job_state = "STARTING"
log_config = docker.types.LogConfig(type=docker.types.LogConfig.types.JSON)
container = self.docker_client.containers.run(
image,
cmd,
detach=True,
name=name,
log_config=log_config,
environment=environment,
mounts=mounts,
privileged=privileged,
)
self.job_state = "RUNNING"
self.job_started_at = datetime.datetime.now()
try:
# Log collection
logs_stdout = []
logs_stderr = []
container.reload()
# Dodgy hack, we can only check docker logs once a second, but we want to loop more
# so we can stop if asked to in a quick manner, should all go away if we go async
# There also be some dodgyness when sending an integer to docker logs and some
# events seem to be duplicated.
now = datetime.datetime.now()
i = 1
while container.status == "running" and not self.stop:
time.sleep(0.2)
if i % 5 == 0:
logs_stderr.extend(
container.logs(
stdout=False,
stderr=True,
timestamps=True,
since=datetime2int(now),
)
.decode()
.split("\n")
)
logs_stdout.extend(
container.logs(
stdout=True,
stderr=False,
timestamps=True,
since=datetime2int(now),
)
.decode()
.split("\n")
)
now = datetime.datetime.now()
container.reload()
i += 1
# Container should be stopped by this point... unless asked to stop
if container.status == "running":
container.kill()
self.job_stopped_at = datetime.datetime.now()
# Get final logs
logs_stderr.extend(
container.logs(
stdout=False,
stderr=True,
timestamps=True,
since=datetime2int(now),
)
.decode()
.split("\n")
)
logs_stdout.extend(
container.logs(
stdout=True,
stderr=False,
timestamps=True,
since=datetime2int(now),
)
.decode()
.split("\n")
)
self.job_state = "SUCCEEDED" if not self.stop else "FAILED"
# Process logs
logs_stdout = [x for x in logs_stdout if len(x) > 0]
logs_stderr = [x for x in logs_stderr if len(x) > 0]
logs = []
for line in logs_stdout + logs_stderr:
date, line = line.split(" ", 1)
date = dateutil.parser.parse(date)
# TODO: Replace with int(date.timestamp()) once we yeet Python2 out of the window
date = int(
(time.mktime(date.timetuple()) + date.microsecond / 1000000.0)
)
logs.append({"timestamp": date, "message": line.strip()})
# Send to cloudwatch
log_group = "/aws/batch/job"
stream_name = "{0}/default/{1}".format(
self.job_definition.name, self.job_id
)
self.log_stream_name = stream_name
self._log_backend.ensure_log_group(log_group, None)
self._log_backend.create_log_stream(log_group, stream_name)
self._log_backend.put_log_events(log_group, stream_name, logs, None)
except Exception as err:
logger.error(
"Failed to run AWS Batch container {0}. Error {1}".format(
self.name, err
)
)
self.job_state = "FAILED"
container.kill()
finally:
container.remove()
except Exception as err:
logger.error(
"Failed to run AWS Batch container {0}. Error {1}".format(self.name, err)
)
self.job_state = "FAILED"
self.job_stopped = True
self.job_stopped_at = datetime.datetime.now()
|
https://github.com/spulec/moto/issues/3475
|
======================================================================
FAIL: test_batch.test_submit_job
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/virtualenv/python3.7.6/lib/python3.7/site-packages/nose/case.py", line 198, in runTest
self.test(*self.arg)
File "/home/travis/build/spulec/moto/moto/core/models.py", line 99, in wrapper
result = func(*args, **kwargs)
File "/home/travis/build/spulec/moto/moto/core/models.py", line 99, in wrapper
result = func(*args, **kwargs)
File "/home/travis/build/spulec/moto/moto/core/models.py", line 99, in wrapper
result = func(*args, **kwargs)
[Previous line repeated 2 more times]
File "/home/travis/build/spulec/moto/tests/test_batch/test_batch.py", line 750, in test_submit_job
[event["message"] for event in resp["events"]].should.equal(["hello"])
File "/home/travis/virtualenv/python3.7.6/lib/python3.7/site-packages/sure/__init__.py", line 387, in wrapper
raise AssertionError(e)
AssertionError: given
X = []
and
Y = ['hello']
Y has 1 items whereas X has only 0
raise AssertionError(e)
|
AssertionError
|
def describe_log_groups(self, limit, log_group_name_prefix, next_token):
if log_group_name_prefix is None:
log_group_name_prefix = ""
groups = [
group.to_describe_dict()
for name, group in self.groups.items()
if name.startswith(log_group_name_prefix)
]
groups = sorted(groups, key=lambda x: x["logGroupName"])
index_start = 0
if next_token:
try:
index_start = (
next(
index
for (index, d) in enumerate(groups)
if d["logGroupName"] == next_token
)
+ 1
)
except StopIteration:
index_start = 0
# AWS returns an empty list if it receives an invalid token.
groups = []
index_end = index_start + limit
if index_end > len(groups):
index_end = len(groups)
groups_page = groups[index_start:index_end]
next_token = None
if groups_page and index_end < len(groups):
next_token = groups_page[-1]["logGroupName"]
return groups_page, next_token
|
def describe_log_groups(self, limit, log_group_name_prefix, next_token):
if log_group_name_prefix is None:
log_group_name_prefix = ""
if next_token is None:
next_token = 0
groups = [
group.to_describe_dict()
for name, group in self.groups.items()
if name.startswith(log_group_name_prefix)
]
groups = sorted(groups, key=lambda x: x["creationTime"], reverse=True)
groups_page = groups[next_token : next_token + limit]
next_token += limit
if next_token >= len(groups):
next_token = None
return groups_page, next_token
|
https://github.com/spulec/moto/issues/3395
|
Traceback (most recent call last):
File "foo.py", line 13, in <module>
client.describe_log_groups(nextToken=result['nextToken'])
File "/home/ubuntu/.venvs/moto-test-case/lib/python3.8/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/home/ubuntu/.venvs/moto-test-case/lib/python3.8/site-packages/botocore/client.py", line 648, in _make_api_call
request_dict = self._convert_to_request_dict(
File "/home/ubuntu/.venvs/moto-test-case/lib/python3.8/site-packages/botocore/client.py", line 696, in _convert_to_request_dict
request_dict = self._serializer.serialize_to_request(
File "/home/ubuntu/.venvs/moto-test-case/lib/python3.8/site-packages/botocore/validate.py", line 297, in serialize_to_request
raise ParamValidationError(report=report.generate_report())
botocore.exceptions.ParamValidationError: Parameter validation failed:
Invalid type for parameter nextToken, value: 50, type: <class 'int'>, valid types: <class 'str'>
|
botocore.exceptions.ParamValidationError
|
def describe_log_groups(self):
log_group_name_prefix = self._get_param("logGroupNamePrefix")
next_token = self._get_param("nextToken")
limit = self._get_param("limit", 50)
assert limit <= 50
groups, next_token = self.logs_backend.describe_log_groups(
limit, log_group_name_prefix, next_token
)
result = {"logGroups": groups}
if next_token:
result["nextToken"] = next_token
return json.dumps(result)
|
def describe_log_groups(self):
log_group_name_prefix = self._get_param("logGroupNamePrefix")
next_token = self._get_param("nextToken")
limit = self._get_param("limit", 50)
assert limit <= 50
groups, next_token = self.logs_backend.describe_log_groups(
limit, log_group_name_prefix, next_token
)
return json.dumps({"logGroups": groups, "nextToken": next_token})
|
https://github.com/spulec/moto/issues/3395
|
Traceback (most recent call last):
File "foo.py", line 13, in <module>
client.describe_log_groups(nextToken=result['nextToken'])
File "/home/ubuntu/.venvs/moto-test-case/lib/python3.8/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/home/ubuntu/.venvs/moto-test-case/lib/python3.8/site-packages/botocore/client.py", line 648, in _make_api_call
request_dict = self._convert_to_request_dict(
File "/home/ubuntu/.venvs/moto-test-case/lib/python3.8/site-packages/botocore/client.py", line 696, in _convert_to_request_dict
request_dict = self._serializer.serialize_to_request(
File "/home/ubuntu/.venvs/moto-test-case/lib/python3.8/site-packages/botocore/validate.py", line 297, in serialize_to_request
raise ParamValidationError(report=report.generate_report())
botocore.exceptions.ParamValidationError: Parameter validation failed:
Invalid type for parameter nextToken, value: 50, type: <class 'int'>, valid types: <class 'str'>
|
botocore.exceptions.ParamValidationError
|
def create_subnet(
self,
vpc_id,
cidr_block,
availability_zone=None,
availability_zone_id=None,
context=None,
tags=[],
):
subnet_id = random_subnet_id()
vpc = self.get_vpc(
vpc_id
) # Validate VPC exists and the supplied CIDR block is a subnet of the VPC's
vpc_cidr_blocks = [
ipaddress.IPv4Network(
six.text_type(cidr_block_association["cidr_block"]), strict=False
)
for cidr_block_association in vpc.get_cidr_block_association_set()
]
try:
subnet_cidr_block = ipaddress.IPv4Network(
six.text_type(cidr_block), strict=False
)
except ValueError:
raise InvalidCIDRBlockParameterError(cidr_block)
subnet_in_vpc_cidr_range = False
for vpc_cidr_block in vpc_cidr_blocks:
if (
vpc_cidr_block.network_address <= subnet_cidr_block.network_address
and vpc_cidr_block.broadcast_address >= subnet_cidr_block.broadcast_address
):
subnet_in_vpc_cidr_range = True
break
if not subnet_in_vpc_cidr_range:
raise InvalidSubnetRangeError(cidr_block)
for subnet in self.get_all_subnets(filters={"vpc-id": vpc_id}):
if subnet.cidr.overlaps(subnet_cidr_block):
raise InvalidSubnetConflictError(cidr_block)
# if this is the first subnet for an availability zone,
# consider it the default
default_for_az = str(availability_zone not in self.subnets).lower()
map_public_ip_on_launch = default_for_az
if availability_zone is None and not availability_zone_id:
availability_zone = "us-east-1a"
try:
if availability_zone:
availability_zone_data = next(
zone
for zones in RegionsAndZonesBackend.zones.values()
for zone in zones
if zone.name == availability_zone
)
elif availability_zone_id:
availability_zone_data = next(
zone
for zones in RegionsAndZonesBackend.zones.values()
for zone in zones
if zone.zone_id == availability_zone_id
)
except StopIteration:
raise InvalidAvailabilityZoneError(
availability_zone,
", ".join(
[
zone.name
for zones in RegionsAndZonesBackend.zones.values()
for zone in zones
]
),
)
subnet = Subnet(
self,
subnet_id,
vpc_id,
cidr_block,
availability_zone_data,
default_for_az,
map_public_ip_on_launch,
owner_id=context.get_current_user() if context else OWNER_ID,
assign_ipv6_address_on_creation=False,
)
for tag in tags:
tag_key = tag.get("Key")
tag_value = tag.get("Value")
subnet.add_tag(tag_key, tag_value)
# AWS associates a new subnet with the default Network ACL
self.associate_default_network_acl_with_subnet(subnet_id, vpc_id)
self.subnets[availability_zone][subnet_id] = subnet
return subnet
|
def create_subnet(
self,
vpc_id,
cidr_block,
availability_zone=None,
availability_zone_id=None,
context=None,
tags=[],
):
subnet_id = random_subnet_id()
vpc = self.get_vpc(
vpc_id
) # Validate VPC exists and the supplied CIDR block is a subnet of the VPC's
vpc_cidr_block = ipaddress.IPv4Network(six.text_type(vpc.cidr_block), strict=False)
try:
subnet_cidr_block = ipaddress.IPv4Network(
six.text_type(cidr_block), strict=False
)
except ValueError:
raise InvalidCIDRBlockParameterError(cidr_block)
if not (
vpc_cidr_block.network_address <= subnet_cidr_block.network_address
and vpc_cidr_block.broadcast_address >= subnet_cidr_block.broadcast_address
):
raise InvalidSubnetRangeError(cidr_block)
for subnet in self.get_all_subnets(filters={"vpc-id": vpc_id}):
if subnet.cidr.overlaps(subnet_cidr_block):
raise InvalidSubnetConflictError(cidr_block)
# if this is the first subnet for an availability zone,
# consider it the default
default_for_az = str(availability_zone not in self.subnets).lower()
map_public_ip_on_launch = default_for_az
if availability_zone is None and not availability_zone_id:
availability_zone = "us-east-1a"
try:
if availability_zone:
availability_zone_data = next(
zone
for zones in RegionsAndZonesBackend.zones.values()
for zone in zones
if zone.name == availability_zone
)
elif availability_zone_id:
availability_zone_data = next(
zone
for zones in RegionsAndZonesBackend.zones.values()
for zone in zones
if zone.zone_id == availability_zone_id
)
except StopIteration:
raise InvalidAvailabilityZoneError(
availability_zone,
", ".join(
[
zone.name
for zones in RegionsAndZonesBackend.zones.values()
for zone in zones
]
),
)
subnet = Subnet(
self,
subnet_id,
vpc_id,
cidr_block,
availability_zone_data,
default_for_az,
map_public_ip_on_launch,
owner_id=context.get_current_user() if context else OWNER_ID,
assign_ipv6_address_on_creation=False,
)
for tag in tags:
tag_key = tag.get("Key")
tag_value = tag.get("Value")
subnet.add_tag(tag_key, tag_value)
# AWS associates a new subnet with the default Network ACL
self.associate_default_network_acl_with_subnet(subnet_id, vpc_id)
self.subnets[availability_zone][subnet_id] = subnet
return subnet
|
https://github.com/spulec/moto/issues/3385
|
{'Subnet': {'AvailabilityZone': 'us-west-2a', 'AvailabilityZoneId': 'usw2-az2', 'AvailableIpAddressCount': 251, 'CidrBlock': '10.0.0.0/24', 'DefaultForAz': False, 'MapPublicIpOnLaunch': False, 'State': 'pending', 'SubnetId': 'subnet-89f4d757', 'VpcId': 'vpc-5c9431df', 'OwnerId': '111122223333', 'AssignIpv6AddressOnCreation': False, 'Ipv6CidrBlockAssociationSet': [], 'SubnetArn': 'arn:aws:ec2:us-west-2:111122223333:subnet/subnet-89f4d757'}, 'ResponseMetadata': {'RequestId': '7a62c49f-347e-4fc4-9331-6e8eEXAMPLE', 'HTTPStatusCode': 200, 'HTTPHeaders': {'server': 'amazon.com'}, 'RetryAttempts': 0}}
Traceback (most recent call last):
File "test.py", line 35, in <module>
main()
File "/path/to/.venv/lib/python3.7/site-packages/moto/core/models.py", line 100, in wrapper
result = func(*args, **kwargs)
File "/path/to/.venv/lib/python3.7/site-packages/moto/core/models.py", line 100, in wrapper
result = func(*args, **kwargs)
File "test.py", line 29, in main
VpcId=vpc["Vpc"]["VpcId"],
File "/path/to/.venv/lib/python3.7/site-packages/botocore/client.py", line 316, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/path/to/.venv/lib/python3.7/site-packages/botocore/client.py", line 635, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (InvalidSubnet.Range) when calling the CreateSubnet operation: The CIDR '10.1.0.0/24' is invalid.
|
botocore.exceptions.ClientError
|
def __init__(
self,
name,
value,
storage="STANDARD",
etag=None,
is_versioned=False,
version_id=0,
max_buffer_size=DEFAULT_KEY_BUFFER_SIZE,
multipart=None,
):
self.name = name
self.last_modified = datetime.datetime.utcnow()
self.acl = get_canned_acl("private")
self.website_redirect_location = None
self._storage_class = storage if storage else "STANDARD"
self._metadata = {}
self._expiry = None
self._etag = etag
self._version_id = version_id
self._is_versioned = is_versioned
self._tagging = FakeTagging()
self.multipart = multipart
self._value_buffer = tempfile.SpooledTemporaryFile(max_size=max_buffer_size)
self._max_buffer_size = max_buffer_size
self.value = value
self.lock = threading.Lock()
|
def __init__(
self,
name,
value,
storage="STANDARD",
etag=None,
is_versioned=False,
version_id=0,
max_buffer_size=DEFAULT_KEY_BUFFER_SIZE,
multipart=None,
):
self.name = name
self.last_modified = datetime.datetime.utcnow()
self.acl = get_canned_acl("private")
self.website_redirect_location = None
self._storage_class = storage if storage else "STANDARD"
self._metadata = {}
self._expiry = None
self._etag = etag
self._version_id = version_id
self._is_versioned = is_versioned
self._tagging = FakeTagging()
self.multipart = multipart
self._value_buffer = tempfile.SpooledTemporaryFile(max_size=max_buffer_size)
self._max_buffer_size = max_buffer_size
self.value = value
|
https://github.com/spulec/moto/issues/2789
|
Traceback (most recent call last):
File "/Users/myuser/projects/project/web/pdf_file_downloader.py", line 16, in download
self.s3.download_file(self.bucket_name, self.file_name, self.local_path())
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/boto3/s3/inject.py", line 172, in download_file
extra_args=ExtraArgs, callback=Callback)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/boto3/s3/transfer.py", line 307, in download_file
future.result()
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/futures.py", line 106, in result
return self._coordinator.result()
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/futures.py", line 265, in result
raise self._exception
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/tasks.py", line 126, in __call__
return self._execute_main(kwargs)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/tasks.py", line 150, in _execute_main
return_value = self._main(**kwargs)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/download.py", line 512, in _main
Bucket=bucket, Key=key, **extra_args)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/botocore/client.py", line 316, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/botocore/client.py", line 626, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (416) when calling the GetObject operation: Requested Range Not Satisfiable
|
botocore.exceptions.ClientError
|
def value(self, new_value):
self._value_buffer.seek(0)
self._value_buffer.truncate()
# Hack for working around moto's own unit tests; this probably won't
# actually get hit in normal use.
if isinstance(new_value, six.text_type):
new_value = new_value.encode(DEFAULT_TEXT_ENCODING)
self._value_buffer.write(new_value)
self.contentsize = len(new_value)
|
def value(self, new_value):
self._value_buffer.seek(0)
self._value_buffer.truncate()
# Hack for working around moto's own unit tests; this probably won't
# actually get hit in normal use.
if isinstance(new_value, six.text_type):
new_value = new_value.encode(DEFAULT_TEXT_ENCODING)
self._value_buffer.write(new_value)
|
https://github.com/spulec/moto/issues/2789
|
Traceback (most recent call last):
File "/Users/myuser/projects/project/web/pdf_file_downloader.py", line 16, in download
self.s3.download_file(self.bucket_name, self.file_name, self.local_path())
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/boto3/s3/inject.py", line 172, in download_file
extra_args=ExtraArgs, callback=Callback)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/boto3/s3/transfer.py", line 307, in download_file
future.result()
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/futures.py", line 106, in result
return self._coordinator.result()
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/futures.py", line 265, in result
raise self._exception
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/tasks.py", line 126, in __call__
return self._execute_main(kwargs)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/tasks.py", line 150, in _execute_main
return_value = self._main(**kwargs)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/download.py", line 512, in _main
Bucket=bucket, Key=key, **extra_args)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/botocore/client.py", line 316, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/botocore/client.py", line 626, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (416) when calling the GetObject operation: Requested Range Not Satisfiable
|
botocore.exceptions.ClientError
|
def append_to_value(self, value):
self.contentsize += len(value)
self._value_buffer.seek(0, os.SEEK_END)
self._value_buffer.write(value)
self.last_modified = datetime.datetime.utcnow()
self._etag = None # must recalculate etag
if self._is_versioned:
self._version_id = str(uuid.uuid4())
else:
self._version_id = None
|
def append_to_value(self, value):
self._value_buffer.seek(0, os.SEEK_END)
self._value_buffer.write(value)
self.last_modified = datetime.datetime.utcnow()
self._etag = None # must recalculate etag
if self._is_versioned:
self._version_id = str(uuid.uuid4())
else:
self._version_id = None
|
https://github.com/spulec/moto/issues/2789
|
Traceback (most recent call last):
File "/Users/myuser/projects/project/web/pdf_file_downloader.py", line 16, in download
self.s3.download_file(self.bucket_name, self.file_name, self.local_path())
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/boto3/s3/inject.py", line 172, in download_file
extra_args=ExtraArgs, callback=Callback)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/boto3/s3/transfer.py", line 307, in download_file
future.result()
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/futures.py", line 106, in result
return self._coordinator.result()
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/futures.py", line 265, in result
raise self._exception
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/tasks.py", line 126, in __call__
return self._execute_main(kwargs)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/tasks.py", line 150, in _execute_main
return_value = self._main(**kwargs)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/download.py", line 512, in _main
Bucket=bucket, Key=key, **extra_args)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/botocore/client.py", line 316, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/botocore/client.py", line 626, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (416) when calling the GetObject operation: Requested Range Not Satisfiable
|
botocore.exceptions.ClientError
|
def size(self):
return self.contentsize
|
def size(self):
self._value_buffer.seek(0, os.SEEK_END)
return self._value_buffer.tell()
|
https://github.com/spulec/moto/issues/2789
|
Traceback (most recent call last):
File "/Users/myuser/projects/project/web/pdf_file_downloader.py", line 16, in download
self.s3.download_file(self.bucket_name, self.file_name, self.local_path())
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/boto3/s3/inject.py", line 172, in download_file
extra_args=ExtraArgs, callback=Callback)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/boto3/s3/transfer.py", line 307, in download_file
future.result()
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/futures.py", line 106, in result
return self._coordinator.result()
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/futures.py", line 265, in result
raise self._exception
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/tasks.py", line 126, in __call__
return self._execute_main(kwargs)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/tasks.py", line 150, in _execute_main
return_value = self._main(**kwargs)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/download.py", line 512, in _main
Bucket=bucket, Key=key, **extra_args)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/botocore/client.py", line 316, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/botocore/client.py", line 626, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (416) when calling the GetObject operation: Requested Range Not Satisfiable
|
botocore.exceptions.ClientError
|
def __getstate__(self):
state = self.__dict__.copy()
state["value"] = self.value
del state["_value_buffer"]
del state["lock"]
return state
|
def __getstate__(self):
state = self.__dict__.copy()
state["value"] = self.value
del state["_value_buffer"]
return state
|
https://github.com/spulec/moto/issues/2789
|
Traceback (most recent call last):
File "/Users/myuser/projects/project/web/pdf_file_downloader.py", line 16, in download
self.s3.download_file(self.bucket_name, self.file_name, self.local_path())
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/boto3/s3/inject.py", line 172, in download_file
extra_args=ExtraArgs, callback=Callback)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/boto3/s3/transfer.py", line 307, in download_file
future.result()
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/futures.py", line 106, in result
return self._coordinator.result()
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/futures.py", line 265, in result
raise self._exception
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/tasks.py", line 126, in __call__
return self._execute_main(kwargs)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/tasks.py", line 150, in _execute_main
return_value = self._main(**kwargs)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/download.py", line 512, in _main
Bucket=bucket, Key=key, **extra_args)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/botocore/client.py", line 316, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/botocore/client.py", line 626, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (416) when calling the GetObject operation: Requested Range Not Satisfiable
|
botocore.exceptions.ClientError
|
def __setstate__(self, state):
self.__dict__.update({k: v for k, v in six.iteritems(state) if k != "value"})
self._value_buffer = tempfile.SpooledTemporaryFile(max_size=self._max_buffer_size)
self.value = state["value"]
self.lock = threading.Lock()
|
def __setstate__(self, state):
self.__dict__.update({k: v for k, v in six.iteritems(state) if k != "value"})
self._value_buffer = tempfile.SpooledTemporaryFile(max_size=self._max_buffer_size)
self.value = state["value"]
|
https://github.com/spulec/moto/issues/2789
|
Traceback (most recent call last):
File "/Users/myuser/projects/project/web/pdf_file_downloader.py", line 16, in download
self.s3.download_file(self.bucket_name, self.file_name, self.local_path())
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/boto3/s3/inject.py", line 172, in download_file
extra_args=ExtraArgs, callback=Callback)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/boto3/s3/transfer.py", line 307, in download_file
future.result()
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/futures.py", line 106, in result
return self._coordinator.result()
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/futures.py", line 265, in result
raise self._exception
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/tasks.py", line 126, in __call__
return self._execute_main(kwargs)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/tasks.py", line 150, in _execute_main
return_value = self._main(**kwargs)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/download.py", line 512, in _main
Bucket=bucket, Key=key, **extra_args)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/botocore/client.py", line 316, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/botocore/client.py", line 626, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (416) when calling the GetObject operation: Requested Range Not Satisfiable
|
botocore.exceptions.ClientError
|
def complete(self, body):
decode_hex = codecs.getdecoder("hex_codec")
total = bytearray()
md5s = bytearray()
last = None
count = 0
for pn, etag in body:
part = self.parts.get(pn)
part_etag = None
if part is not None:
part_etag = part.etag.replace('"', "")
etag = etag.replace('"', "")
if part is None or part_etag != etag:
raise InvalidPart()
if last is not None and last.contentsize < UPLOAD_PART_MIN_SIZE:
raise EntityTooSmall()
md5s.extend(decode_hex(part_etag)[0])
total.extend(part.value)
last = part
count += 1
etag = hashlib.md5()
etag.update(bytes(md5s))
return total, "{0}-{1}".format(etag.hexdigest(), count)
|
def complete(self, body):
decode_hex = codecs.getdecoder("hex_codec")
total = bytearray()
md5s = bytearray()
last = None
count = 0
for pn, etag in body:
part = self.parts.get(pn)
part_etag = None
if part is not None:
part_etag = part.etag.replace('"', "")
etag = etag.replace('"', "")
if part is None or part_etag != etag:
raise InvalidPart()
if last is not None and len(last.value) < UPLOAD_PART_MIN_SIZE:
raise EntityTooSmall()
md5s.extend(decode_hex(part_etag)[0])
total.extend(part.value)
last = part
count += 1
etag = hashlib.md5()
etag.update(bytes(md5s))
return total, "{0}-{1}".format(etag.hexdigest(), count)
|
https://github.com/spulec/moto/issues/2789
|
Traceback (most recent call last):
File "/Users/myuser/projects/project/web/pdf_file_downloader.py", line 16, in download
self.s3.download_file(self.bucket_name, self.file_name, self.local_path())
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/boto3/s3/inject.py", line 172, in download_file
extra_args=ExtraArgs, callback=Callback)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/boto3/s3/transfer.py", line 307, in download_file
future.result()
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/futures.py", line 106, in result
return self._coordinator.result()
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/futures.py", line 265, in result
raise self._exception
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/tasks.py", line 126, in __call__
return self._execute_main(kwargs)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/tasks.py", line 150, in _execute_main
return_value = self._main(**kwargs)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/s3transfer/download.py", line 512, in _main
Bucket=bucket, Key=key, **extra_args)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/botocore/client.py", line 316, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/Users/myuser/Library/Caches/pypoetry/virtualenvs/project-X5XCMv3p-py3.6/lib/python3.6/site-packages/botocore/client.py", line 626, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (416) when calling the GetObject operation: Requested Range Not Satisfiable
|
botocore.exceptions.ClientError
|
def __init__(self, name, job_def, job_queue, log_backend, container_overrides):
"""
Docker Job
:param name: Job Name
:param job_def: Job definition
:type: job_def: JobDefinition
:param job_queue: Job Queue
:param log_backend: Log backend
:type log_backend: moto.logs.models.LogsBackend
"""
threading.Thread.__init__(self)
self.job_name = name
self.job_id = str(uuid.uuid4())
self.job_definition = job_def
self.container_overrides = container_overrides or {}
self.job_queue = job_queue
self.job_state = "SUBMITTED" # One of SUBMITTED | PENDING | RUNNABLE | STARTING | RUNNING | SUCCEEDED | FAILED
self.job_queue.jobs.append(self)
self.job_started_at = datetime.datetime(1970, 1, 1)
self.job_stopped_at = datetime.datetime(1970, 1, 1)
self.job_stopped = False
self.job_stopped_reason = None
self.stop = False
self.daemon = True
self.name = "MOTO-BATCH-" + self.job_id
self.docker_client = docker.from_env()
self._log_backend = log_backend
self.log_stream_name = None
# Unfortunately mocking replaces this method w/o fallback enabled, so we
# need to replace it if we detect it's been mocked
if requests.adapters.HTTPAdapter.send != _orig_adapter_send:
_orig_get_adapter = self.docker_client.api.get_adapter
def replace_adapter_send(*args, **kwargs):
adapter = _orig_get_adapter(*args, **kwargs)
if isinstance(adapter, requests.adapters.HTTPAdapter):
adapter.send = functools.partial(_orig_adapter_send, adapter)
return adapter
self.docker_client.api.get_adapter = replace_adapter_send
|
def __init__(self, name, job_def, job_queue, log_backend, container_overrides):
"""
Docker Job
:param name: Job Name
:param job_def: Job definition
:type: job_def: JobDefinition
:param job_queue: Job Queue
:param log_backend: Log backend
:type log_backend: moto.logs.models.LogsBackend
"""
threading.Thread.__init__(self)
self.job_name = name
self.job_id = str(uuid.uuid4())
self.job_definition = job_def
self.container_overrides = container_overrides
self.job_queue = job_queue
self.job_state = "SUBMITTED" # One of SUBMITTED | PENDING | RUNNABLE | STARTING | RUNNING | SUCCEEDED | FAILED
self.job_queue.jobs.append(self)
self.job_started_at = datetime.datetime(1970, 1, 1)
self.job_stopped_at = datetime.datetime(1970, 1, 1)
self.job_stopped = False
self.job_stopped_reason = None
self.stop = False
self.daemon = True
self.name = "MOTO-BATCH-" + self.job_id
self.docker_client = docker.from_env()
self._log_backend = log_backend
# Unfortunately mocking replaces this method w/o fallback enabled, so we
# need to replace it if we detect it's been mocked
if requests.adapters.HTTPAdapter.send != _orig_adapter_send:
_orig_get_adapter = self.docker_client.api.get_adapter
def replace_adapter_send(*args, **kwargs):
adapter = _orig_get_adapter(*args, **kwargs)
if isinstance(adapter, requests.adapters.HTTPAdapter):
adapter.send = functools.partial(_orig_adapter_send, adapter)
return adapter
self.docker_client.api.get_adapter = replace_adapter_send
|
https://github.com/spulec/moto/issues/2777
|
2020-02-28 23:16:27 - werkzeug - INFO - 127.0.0.1 - - [28/Feb/2020 23:16:27] "POST /v1/describejobs HTTP/1.1" 500 -
2020-02-28 23:16:27 - werkzeug - ERROR - Error on request:
Traceback (most recent call last):
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/werkzeug/serving.py", line 323, in run_wsgi
execute(self.server.app)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/werkzeug/serving.py", line 312, in execute
application_iter = app(environ, start_response)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/server.py", line 135, in __call__
return backend_app(environ, start_response)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 2463, in __call__
return self.wsgi_app(environ, start_response)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 2449, in wsgi_app
response = self.handle_exception(e)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 1866, in handle_exception
reraise(exc_type, exc_value, tb)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 2446, in wsgi_app
response = self.full_dispatch_request()
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 1951, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 1820, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 1949, in full_dispatch_request
rv = self.dispatch_request()
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 1935, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/core/utils.py", line 144, in __call__
result = self.callback(request, request.url, {})
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/core/responses.py", line 197, in dispatch
return cls()._dispatch(*args, **kwargs)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/core/responses.py", line 295, in _dispatch
return self.call_action()
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/core/responses.py", line 380, in call_action
response = method()
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/batch/responses.py", line 257, in describejobs
return json.dumps({"jobs": self.batch_backend.describe_jobs(jobs)})
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/batch/models.py", line 1229, in describe_jobs
result.append(job.describe())
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/batch/models.py", line 355, in describe
result["container"]["logStreamName"] = self.log_stream_name
AttributeError: 'Job' object has no attribute 'log_stream_name'
|
AttributeError
|
def describe(self):
result = {
"jobDefinition": self.job_definition.arn,
"jobId": self.job_id,
"jobName": self.job_name,
"jobQueue": self.job_queue.arn,
"status": self.job_state,
"dependsOn": [],
}
if result["status"] not in ["SUBMITTED", "PENDING", "RUNNABLE", "STARTING"]:
result["startedAt"] = datetime2int(self.job_started_at)
if self.job_stopped:
result["stoppedAt"] = datetime2int(self.job_stopped_at)
result["container"] = {}
result["container"]["command"] = [
'/bin/sh -c "for a in `seq 1 10`; do echo Hello World; sleep 1; done"'
]
result["container"]["privileged"] = False
result["container"]["readonlyRootFilesystem"] = False
result["container"]["ulimits"] = {}
result["container"]["vcpus"] = 1
result["container"]["volumes"] = ""
result["container"]["logStreamName"] = self.log_stream_name
if self.job_stopped_reason is not None:
result["statusReason"] = self.job_stopped_reason
return result
|
def describe(self):
result = {
"jobDefinition": self.job_definition.arn,
"jobId": self.job_id,
"jobName": self.job_name,
"jobQueue": self.job_queue.arn,
"startedAt": datetime2int(self.job_started_at),
"status": self.job_state,
"dependsOn": [],
}
if self.job_stopped:
result["stoppedAt"] = datetime2int(self.job_stopped_at)
result["container"] = {}
result["container"]["command"] = [
'/bin/sh -c "for a in `seq 1 10`; do echo Hello World; sleep 1; done"'
]
result["container"]["privileged"] = False
result["container"]["readonlyRootFilesystem"] = False
result["container"]["ulimits"] = {}
result["container"]["vcpus"] = 1
result["container"]["volumes"] = ""
result["container"]["logStreamName"] = self.log_stream_name
if self.job_stopped_reason is not None:
result["statusReason"] = self.job_stopped_reason
return result
|
https://github.com/spulec/moto/issues/2777
|
2020-02-28 23:16:27 - werkzeug - INFO - 127.0.0.1 - - [28/Feb/2020 23:16:27] "POST /v1/describejobs HTTP/1.1" 500 -
2020-02-28 23:16:27 - werkzeug - ERROR - Error on request:
Traceback (most recent call last):
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/werkzeug/serving.py", line 323, in run_wsgi
execute(self.server.app)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/werkzeug/serving.py", line 312, in execute
application_iter = app(environ, start_response)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/server.py", line 135, in __call__
return backend_app(environ, start_response)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 2463, in __call__
return self.wsgi_app(environ, start_response)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 2449, in wsgi_app
response = self.handle_exception(e)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 1866, in handle_exception
reraise(exc_type, exc_value, tb)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 2446, in wsgi_app
response = self.full_dispatch_request()
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 1951, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 1820, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 1949, in full_dispatch_request
rv = self.dispatch_request()
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 1935, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/core/utils.py", line 144, in __call__
result = self.callback(request, request.url, {})
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/core/responses.py", line 197, in dispatch
return cls()._dispatch(*args, **kwargs)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/core/responses.py", line 295, in _dispatch
return self.call_action()
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/core/responses.py", line 380, in call_action
response = method()
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/batch/responses.py", line 257, in describejobs
return json.dumps({"jobs": self.batch_backend.describe_jobs(jobs)})
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/batch/models.py", line 1229, in describe_jobs
result.append(job.describe())
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/batch/models.py", line 355, in describe
result["container"]["logStreamName"] = self.log_stream_name
AttributeError: 'Job' object has no attribute 'log_stream_name'
|
AttributeError
|
def run(self):
"""
Run the container.
Logic is as follows:
Generate container info (eventually from task definition)
Start container
Loop whilst not asked to stop and the container is running.
Get all logs from container between the last time I checked and now.
Convert logs into cloudwatch format
Put logs into cloudwatch
:return:
"""
try:
self.job_state = "PENDING"
time.sleep(1)
image = self.job_definition.container_properties.get("image", "alpine:latest")
privileged = self.job_definition.container_properties.get("privileged", False)
cmd = self._get_container_property(
"command",
'/bin/sh -c "for a in `seq 1 10`; do echo Hello World; sleep 1; done"',
)
environment = {
e["name"]: e["value"]
for e in self._get_container_property("environment", [])
}
volumes = {
v["name"]: v["host"] for v in self._get_container_property("volumes", [])
}
mounts = [
docker.types.Mount(
m["containerPath"],
volumes[m["sourceVolume"]]["sourcePath"],
type="bind",
read_only=m["readOnly"],
)
for m in self._get_container_property("mountPoints", [])
]
name = "{0}-{1}".format(self.job_name, self.job_id)
self.job_state = "RUNNABLE"
# TODO setup ecs container instance
time.sleep(1)
self.job_state = "STARTING"
log_config = docker.types.LogConfig(type=docker.types.LogConfig.types.JSON)
container = self.docker_client.containers.run(
image,
cmd,
detach=True,
name=name,
log_config=log_config,
environment=environment,
mounts=mounts,
privileged=privileged,
)
self.job_state = "RUNNING"
self.job_started_at = datetime.datetime.now()
try:
# Log collection
logs_stdout = []
logs_stderr = []
container.reload()
# Dodgy hack, we can only check docker logs once a second, but we want to loop more
# so we can stop if asked to in a quick manner, should all go away if we go async
# There also be some dodgyness when sending an integer to docker logs and some
# events seem to be duplicated.
now = datetime.datetime.now()
i = 1
while container.status == "running" and not self.stop:
time.sleep(0.15)
if i % 10 == 0:
logs_stderr.extend(
container.logs(
stdout=False,
stderr=True,
timestamps=True,
since=datetime2int(now),
)
.decode()
.split("\n")
)
logs_stdout.extend(
container.logs(
stdout=True,
stderr=False,
timestamps=True,
since=datetime2int(now),
)
.decode()
.split("\n")
)
now = datetime.datetime.now()
container.reload()
i += 1
# Container should be stopped by this point... unless asked to stop
if container.status == "running":
container.kill()
self.job_stopped_at = datetime.datetime.now()
# Get final logs
logs_stderr.extend(
container.logs(
stdout=False,
stderr=True,
timestamps=True,
since=datetime2int(now),
)
.decode()
.split("\n")
)
logs_stdout.extend(
container.logs(
stdout=True,
stderr=False,
timestamps=True,
since=datetime2int(now),
)
.decode()
.split("\n")
)
self.job_state = "SUCCEEDED" if not self.stop else "FAILED"
# Process logs
logs_stdout = [x for x in logs_stdout if len(x) > 0]
logs_stderr = [x for x in logs_stderr if len(x) > 0]
logs = []
for line in logs_stdout + logs_stderr:
date, line = line.split(" ", 1)
date = dateutil.parser.parse(date)
# TODO: Replace with int(date.timestamp()) once we yeet Python2 out of the window
date = int(
(time.mktime(date.timetuple()) + date.microsecond / 1000000.0)
)
logs.append({"timestamp": date, "message": line.strip()})
# Send to cloudwatch
log_group = "/aws/batch/job"
stream_name = "{0}/default/{1}".format(
self.job_definition.name, self.job_id
)
self.log_stream_name = stream_name
self._log_backend.ensure_log_group(log_group, None)
self._log_backend.create_log_stream(log_group, stream_name)
self._log_backend.put_log_events(log_group, stream_name, logs, None)
except Exception as err:
logger.error(
"Failed to run AWS Batch container {0}. Error {1}".format(
self.name, err
)
)
self.job_state = "FAILED"
container.kill()
finally:
container.remove()
except Exception as err:
logger.error(
"Failed to run AWS Batch container {0}. Error {1}".format(self.name, err)
)
self.job_state = "FAILED"
self.job_stopped = True
self.job_stopped_at = datetime.datetime.now()
|
def run(self):
"""
Run the container.
Logic is as follows:
Generate container info (eventually from task definition)
Start container
Loop whilst not asked to stop and the container is running.
Get all logs from container between the last time I checked and now.
Convert logs into cloudwatch format
Put logs into cloudwatch
:return:
"""
try:
self.job_state = "PENDING"
time.sleep(1)
image = self.job_definition.container_properties.get("image", "alpine:latest")
privileged = self.job_definition.container_properties.get("privileged", False)
cmd = self._get_container_property(
"command",
'/bin/sh -c "for a in `seq 1 10`; do echo Hello World; sleep 1; done"',
)
environment = {
e["name"]: e["value"]
for e in self._get_container_property("environment", [])
}
volumes = {
v["name"]: v["host"] for v in self._get_container_property("volumes", [])
}
mounts = [
docker.types.Mount(
m["containerPath"],
volumes[m["sourceVolume"]]["sourcePath"],
type="bind",
read_only=m["readOnly"],
)
for m in self._get_container_property("mountPoints", [])
]
name = "{0}-{1}".format(self.job_name, self.job_id)
self.job_state = "RUNNABLE"
# TODO setup ecs container instance
time.sleep(1)
self.job_state = "STARTING"
log_config = docker.types.LogConfig(type=docker.types.LogConfig.types.JSON)
container = self.docker_client.containers.run(
image,
cmd,
detach=True,
name=name,
log_config=log_config,
environment=environment,
mounts=mounts,
privileged=privileged,
)
self.job_state = "RUNNING"
self.job_started_at = datetime.datetime.now()
try:
# Log collection
logs_stdout = []
logs_stderr = []
container.reload()
# Dodgy hack, we can only check docker logs once a second, but we want to loop more
# so we can stop if asked to in a quick manner, should all go away if we go async
# There also be some dodgyness when sending an integer to docker logs and some
# events seem to be duplicated.
now = datetime.datetime.now()
i = 1
while container.status == "running" and not self.stop:
time.sleep(0.15)
if i % 10 == 0:
logs_stderr.extend(
container.logs(
stdout=False,
stderr=True,
timestamps=True,
since=datetime2int(now),
)
.decode()
.split("\n")
)
logs_stdout.extend(
container.logs(
stdout=True,
stderr=False,
timestamps=True,
since=datetime2int(now),
)
.decode()
.split("\n")
)
now = datetime.datetime.now()
container.reload()
i += 1
# Container should be stopped by this point... unless asked to stop
if container.status == "running":
container.kill()
self.job_stopped_at = datetime.datetime.now()
# Get final logs
logs_stderr.extend(
container.logs(
stdout=False,
stderr=True,
timestamps=True,
since=datetime2int(now),
)
.decode()
.split("\n")
)
logs_stdout.extend(
container.logs(
stdout=True,
stderr=False,
timestamps=True,
since=datetime2int(now),
)
.decode()
.split("\n")
)
self.job_state = "SUCCEEDED" if not self.stop else "FAILED"
# Process logs
logs_stdout = [x for x in logs_stdout if len(x) > 0]
logs_stderr = [x for x in logs_stderr if len(x) > 0]
logs = []
for line in logs_stdout + logs_stderr:
date, line = line.split(" ", 1)
date = dateutil.parser.parse(date)
date = int(date.timestamp())
logs.append({"timestamp": date, "message": line.strip()})
# Send to cloudwatch
log_group = "/aws/batch/job"
stream_name = "{0}/default/{1}".format(
self.job_definition.name, self.job_id
)
self.log_stream_name = stream_name
self._log_backend.ensure_log_group(log_group, None)
self._log_backend.create_log_stream(log_group, stream_name)
self._log_backend.put_log_events(log_group, stream_name, logs, None)
except Exception as err:
logger.error(
"Failed to run AWS Batch container {0}. Error {1}".format(
self.name, err
)
)
self.job_state = "FAILED"
container.kill()
finally:
container.remove()
except Exception as err:
logger.error(
"Failed to run AWS Batch container {0}. Error {1}".format(self.name, err)
)
self.job_state = "FAILED"
self.job_stopped = True
self.job_stopped_at = datetime.datetime.now()
|
https://github.com/spulec/moto/issues/2777
|
2020-02-28 23:16:27 - werkzeug - INFO - 127.0.0.1 - - [28/Feb/2020 23:16:27] "POST /v1/describejobs HTTP/1.1" 500 -
2020-02-28 23:16:27 - werkzeug - ERROR - Error on request:
Traceback (most recent call last):
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/werkzeug/serving.py", line 323, in run_wsgi
execute(self.server.app)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/werkzeug/serving.py", line 312, in execute
application_iter = app(environ, start_response)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/server.py", line 135, in __call__
return backend_app(environ, start_response)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 2463, in __call__
return self.wsgi_app(environ, start_response)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 2449, in wsgi_app
response = self.handle_exception(e)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 1866, in handle_exception
reraise(exc_type, exc_value, tb)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 2446, in wsgi_app
response = self.full_dispatch_request()
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 1951, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 1820, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 1949, in full_dispatch_request
rv = self.dispatch_request()
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/flask/app.py", line 1935, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/core/utils.py", line 144, in __call__
result = self.callback(request, request.url, {})
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/core/responses.py", line 197, in dispatch
return cls()._dispatch(*args, **kwargs)
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/core/responses.py", line 295, in _dispatch
return self.call_action()
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/core/responses.py", line 380, in call_action
response = method()
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/batch/responses.py", line 257, in describejobs
return json.dumps({"jobs": self.batch_backend.describe_jobs(jobs)})
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/batch/models.py", line 1229, in describe_jobs
result.append(job.describe())
File "/builds/dazza-codes/python-notes/.venv/lib/python3.6/site-packages/moto/batch/models.py", line 355, in describe
result["container"]["logStreamName"] = self.log_stream_name
AttributeError: 'Job' object has no attribute 'log_stream_name'
|
AttributeError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.