after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def lookupPduClass(self, function_code):
"""Use `function_code` to determine the class of the PDU.
:param function_code: The function code specified in a frame.
:returns: The class of the PDU that has a matching `function_code`.
"""
raise NotImplementedException("Method not implemented by derived class")
|
def lookupPduClass(self, function_code):
"""Use `function_code` to determine the class of the PDU.
:param function_code: The function code specified in a frame.
:returns: The class of the PDU that has a matching `function_code`.
"""
raise NotImplementedException("Method not implemented by derived class")
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def checkFrame(self):
"""Check and decode the next frame
:returns: True if we successful, False otherwise
"""
raise NotImplementedException("Method not implemented by derived class")
|
def checkFrame(self):
"""Check and decode the next frame
:returns: True if we successful, False otherwise
"""
raise NotImplementedException("Method not implemented by derived class")
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def advanceFrame(self):
"""Skip over the current framed message
This allows us to skip over the current message after we have processed
it or determined that it contains an error. It also has to reset the
current frame header handle
"""
raise NotImplementedException("Method not implemented by derived class")
|
def advanceFrame(self):
"""Skip over the current framed message
This allows us to skip over the current message after we have processed
it or determined that it contains an error. It also has to reset the
current frame header handle
"""
raise NotImplementedException("Method not implemented by derived class")
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def addToFrame(self, message):
"""Add the next message to the frame buffer
This should be used before the decoding while loop to add the received
data to the buffer handle.
:param message: The most recent packet
"""
raise NotImplementedException("Method not implemented by derived class")
|
def addToFrame(self, message):
"""Add the next message to the frame buffer
This should be used before the decoding while loop to add the received
data to the buffer handle.
:param message: The most recent packet
"""
raise NotImplementedException("Method not implemented by derived class")
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def isFrameReady(self):
"""Check if we should continue decode logic
This is meant to be used in a while loop in the decoding phase to let
the decoder know that there is still data in the buffer.
:returns: True if ready, False otherwise
"""
raise NotImplementedException("Method not implemented by derived class")
|
def isFrameReady(self):
"""Check if we should continue decode logic
This is meant to be used in a while loop in the decoding phase to let
the decoder know that there is still data in the buffer.
:returns: True if ready, False otherwise
"""
raise NotImplementedException("Method not implemented by derived class")
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def getFrame(self):
"""Get the next frame from the buffer
:returns: The frame data or ''
"""
raise NotImplementedException("Method not implemented by derived class")
|
def getFrame(self):
"""Get the next frame from the buffer
:returns: The frame data or ''
"""
raise NotImplementedException("Method not implemented by derived class")
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def populateResult(self, result):
"""Populates the modbus result with current frame header
We basically copy the data back over from the current header
to the result header. This may not be needed for serial messages.
:param result: The response packet
"""
raise NotImplementedException("Method not implemented by derived class")
|
def populateResult(self, result):
"""Populates the modbus result with current frame header
We basically copy the data back over from the current header
to the result header. This may not be needed for serial messages.
:param result: The response packet
"""
raise NotImplementedException("Method not implemented by derived class")
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def processIncomingPacket(self, data, callback):
"""The new packet processing pattern
This takes in a new request packet, adds it to the current
packet stream, and performs framing on it. That is, checks
for complete messages, and once found, will process all that
exist. This handles the case when we read N + 1 or 1 / N
messages at a time instead of 1.
The processed and decoded messages are pushed to the callback
function to process and send.
:param data: The new packet data
:param callback: The function to send results to
"""
raise NotImplementedException("Method not implemented by derived class")
|
def processIncomingPacket(self, data, callback):
"""The new packet processing pattern
This takes in a new request packet, adds it to the current
packet stream, and performs framing on it. That is, checks
for complete messages, and once found, will process all that
exist. This handles the case when we read N + 1 or 1 / N
messages at a time instead of 1.
The processed and decoded messages are pushed to the callback
function to process and send.
:param data: The new packet data
:param callback: The function to send results to
"""
raise NotImplementedException("Method not implemented by derived class")
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def buildPacket(self, message):
"""Creates a ready to send modbus packet
The raw packet is built off of a fully populated modbus
request / response message.
:param message: The request/response to send
:returns: The built packet
"""
raise NotImplementedException("Method not implemented by derived class")
|
def buildPacket(self, message):
"""Creates a ready to send modbus packet
The raw packet is built off of a fully populated modbus
request / response message.
:param message: The request/response to send
:returns: The built packet
"""
raise NotImplementedException("Method not implemented by derived class")
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def decode(self, fx):
"""Converts the function code to the datastore to
:param fx: The function we are working with
:returns: one of [d(iscretes),i(inputs),h(oliding),c(oils)
"""
return self.__fx_mapper[fx]
|
def decode(self, fx):
"""Converts the function code to the datastore to
:param fx: The function we are working with
:returns: one of [d(iscretes),i(inputs),h(oliding),c(oils)
"""
return self.__fx_mapper[fx]
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def reset(self):
"""Resets all the datastores to their default values"""
raise NotImplementedException("Context Reset")
|
def reset(self):
"""Resets all the datastores to their default values"""
raise NotImplementedException("Context Reset")
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def validate(self, fx, address, count=1):
"""Validates the request to make sure it is in range
:param fx: The function we are working with
:param address: The starting address
:param count: The number of values to test
:returns: True if the request in within range, False otherwise
"""
raise NotImplementedException("validate context values")
|
def validate(self, fx, address, count=1):
"""Validates the request to make sure it is in range
:param fx: The function we are working with
:param address: The starting address
:param count: The number of values to test
:returns: True if the request in within range, False otherwise
"""
raise NotImplementedException("validate context values")
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def getValues(self, fx, address, count=1):
"""Get `count` values from datastore
:param fx: The function we are working with
:param address: The starting address
:param count: The number of values to retrieve
:returns: The requested values from a:a+c
"""
raise NotImplementedException("get context values")
|
def getValues(self, fx, address, count=1):
"""Get `count` values from datastore
:param fx: The function we are working with
:param address: The starting address
:param count: The number of values to retrieve
:returns: The requested values from a:a+c
"""
raise NotImplementedException("get context values")
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def setValues(self, fx, address, values):
"""Sets the datastore with the supplied values
:param fx: The function we are working with
:param address: The starting address
:param values: The new values to be set
"""
raise NotImplementedException("set context values")
|
def setValues(self, fx, address, values):
"""Sets the datastore with the supplied values
:param fx: The function we are working with
:param address: The starting address
:param values: The new values to be set
"""
raise NotImplementedException("set context values")
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def build(self):
"""Return the payload buffer as a list
This list is two bytes per element and can
thus be treated as a list of registers.
:returns: The payload buffer as a list
"""
raise NotImplementedException("set context values")
|
def build(self):
"""Return the payload buffer as a list
This list is two bytes per element and can
thus be treated as a list of registers.
:returns: The payload buffer as a list
"""
raise NotImplementedException("set context values")
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def execute(self, context=None):
"""Run a read exeception status request against the store
:returns: The populated response
"""
information = DeviceInformationFactory.get(_MCB)
identifier = "-".join(information.values()).encode()
identifier = identifier or b"Pymodbus"
return ReportSlaveIdResponse(identifier)
|
def execute(self, context=None):
"""Run a read exeception status request against the store
:returns: The populated response
"""
identifier = b"Pymodbus"
return ReportSlaveIdResponse(identifier)
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def to_coils(self):
"""Convert the payload buffer into a coil
layout that can be used as a context block.
:returns: The coil layout to use as a block
"""
payload = self.to_registers()
coils = [bool(int(bit)) for reg in payload for bit in format(reg, "016b")]
return coils
|
def to_coils(self):
"""Convert the payload buffer into a coil
layout that can be used as a context block.
:returns: The coil layout to use as a block
"""
payload = self.to_registers()
coils = [bool(int(bit)) for reg in payload[1:] for bit in format(reg, "016b")]
return coils
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def fromCoils(klass, coils, byteorder=Endian.Little, wordorder=Endian.Big):
"""Initialize a payload decoder with the result of
reading a collection of coils from a modbus device.
The coils are treated as a list of bit(boolean) values.
:param coils: The coil results to initialize with
:param byteorder: The endianess of the payload
:returns: An initialized PayloadDecoder
"""
if isinstance(coils, list):
payload = b""
padding = len(coils) % 8
if padding: # Pad zero's
extra = [False] * padding
coils = extra + coils
chunks = klass.bit_chunks(coils)
for chunk in chunks:
payload += pack_bitstring(chunk[::-1])
return klass(payload, byteorder)
raise ParameterException("Invalid collection of coils supplied")
|
def fromCoils(klass, coils, byteorder=Endian.Little):
"""Initialize a payload decoder with the result of
reading a collection of coils from a modbus device.
The coils are treated as a list of bit(boolean) values.
:param coils: The coil results to initialize with
:param byteorder: The endianess of the payload
:returns: An initialized PayloadDecoder
"""
if isinstance(coils, list):
payload = pack_bitstring(coils)
return klass(payload, byteorder)
raise ParameterException("Invalid collection of coils supplied")
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def __init__(self, store, framer=None, identity=None, **kwargs):
"""Overloaded initializer for the modbus factory
If the identify structure is not passed in, the ModbusControlBlock
uses its own empty structure.
:param store: The ModbusServerContext datastore
:param framer: The framer strategy to use
:param identity: An optional identify structure
:param ignore_missing_slaves: True to not send errors on a request to
a missing slave
"""
framer = framer or ModbusSocketFramer
self.decoder = ServerDecoder()
self.framer = framer(self.decoder)
self.store = store or ModbusServerContext()
self.control = ModbusControlBlock()
self.access = ModbusAccessControl()
self.ignore_missing_slaves = kwargs.get(
"ignore_missing_slaves", Defaults.IgnoreMissingSlaves
)
if isinstance(identity, ModbusDeviceIdentification):
self.control.Identity.update(identity)
|
def __init__(self, store, framer=None, identity=None, **kwargs):
"""Overloaded initializer for the modbus factory
If the identify structure is not passed in, the ModbusControlBlock
uses its own empty structure.
:param store: The ModbusServerContext datastore
:param framer: The framer strategy to use
:param identity: An optional identify structure
:param ignore_missing_slaves: True to not send errors on a request to
a missing slave
"""
framer = framer or ModbusSocketFramer
self.framer = framer(decoder=ServerDecoder())
self.store = store or ModbusServerContext()
self.control = ModbusControlBlock()
self.access = ModbusAccessControl()
self.ignore_missing_slaves = kwargs.get(
"ignore_missing_slaves", Defaults.IgnoreMissingSlaves
)
if isinstance(identity, ModbusDeviceIdentification):
self.control.Identity.update(identity)
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def StartTcpServer(
context,
identity=None,
address=None,
console=False,
defer_reactor_run=False,
custom_functions=[],
**kwargs,
):
"""
Helper method to start the Modbus Async TCP server
:param context: The server data context
:param identify: The server identity to use (default empty)
:param address: An optional (interface, port) to bind to.
:param console: A flag indicating if you want the debug console
:param ignore_missing_slaves: True to not send errors on a request \
to a missing slave
:param defer_reactor_run: True/False defer running reactor.run() as part \
of starting server, to be explictly started by the user
:param custom_functions: An optional list of custom function classes
supported by server instance.
"""
from twisted.internet import reactor
address = address or ("", Defaults.Port)
framer = kwargs.pop("framer", ModbusSocketFramer)
factory = ModbusServerFactory(context, framer, identity, **kwargs)
for f in custom_functions:
factory.decoder.register(f)
if console:
InstallManagementConsole({"factory": factory})
_logger.info("Starting Modbus TCP Server on %s:%s" % address)
reactor.listenTCP(address[1], factory, interface=address[0])
if not defer_reactor_run:
reactor.run(installSignalHandlers=_is_main_thread())
|
def StartTcpServer(
context,
identity=None,
address=None,
console=False,
defer_reactor_run=False,
**kwargs,
):
"""
Helper method to start the Modbus Async TCP server
:param context: The server data context
:param identify: The server identity to use (default empty)
:param address: An optional (interface, port) to bind to.
:param console: A flag indicating if you want the debug console
:param ignore_missing_slaves: True to not send errors on a request
to a missing slave
:param defer_reactor_run: True/False defer running reactor.run() as part
of starting server, to be explictly started by the user
"""
from twisted.internet import reactor
address = address or ("", Defaults.Port)
framer = kwargs.pop("framer", ModbusSocketFramer)
factory = ModbusServerFactory(context, framer, identity, **kwargs)
if console:
InstallManagementConsole({"factory": factory})
_logger.info("Starting Modbus TCP Server on %s:%s" % address)
reactor.listenTCP(address[1], factory, interface=address[0])
if not defer_reactor_run:
reactor.run(installSignalHandlers=_is_main_thread())
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def StartUdpServer(
context,
identity=None,
address=None,
defer_reactor_run=False,
custom_functions=[],
**kwargs,
):
"""
Helper method to start the Modbus Async Udp server
:param context: The server data context
:param identify: The server identity to use (default empty)
:param address: An optional (interface, port) to bind to.
:param ignore_missing_slaves: True to not send errors on a request \
to a missing slave
:param defer_reactor_run: True/False defer running reactor.run() as part \
of starting server, to be explictly started by the user
:param custom_functions: An optional list of custom function classes
supported by server instance.
"""
from twisted.internet import reactor
address = address or ("", Defaults.Port)
framer = kwargs.pop("framer", ModbusSocketFramer)
server = ModbusUdpProtocol(context, framer, identity, **kwargs)
for f in custom_functions:
server.decoder.register(f)
_logger.info("Starting Modbus UDP Server on %s:%s" % address)
reactor.listenUDP(address[1], server, interface=address[0])
if not defer_reactor_run:
reactor.run(installSignalHandlers=_is_main_thread())
|
def StartUdpServer(
context, identity=None, address=None, defer_reactor_run=False, **kwargs
):
"""
Helper method to start the Modbus Async Udp server
:param context: The server data context
:param identify: The server identity to use (default empty)
:param address: An optional (interface, port) to bind to.
:param ignore_missing_slaves: True to not send errors on a request
to a missing slave
:param defer_reactor_run: True/False defer running reactor.run() as part
of starting server, to be explictly started by the user
"""
from twisted.internet import reactor
address = address or ("", Defaults.Port)
framer = kwargs.pop("framer", ModbusSocketFramer)
server = ModbusUdpProtocol(context, framer, identity, **kwargs)
_logger.info("Starting Modbus UDP Server on %s:%s" % address)
reactor.listenUDP(address[1], server, interface=address[0])
if not defer_reactor_run:
reactor.run(installSignalHandlers=_is_main_thread())
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def StartSerialServer(
context,
identity=None,
framer=ModbusAsciiFramer,
defer_reactor_run=False,
custom_functions=[],
**kwargs,
):
"""
Helper method to start the Modbus Async Serial server
:param context: The server data context
:param identify: The server identity to use (default empty)
:param framer: The framer to use (default ModbusAsciiFramer)
:param port: The serial port to attach to
:param baudrate: The baud rate to use for the serial device
:param console: A flag indicating if you want the debug console
:param ignore_missing_slaves: True to not send errors on a request to a
missing slave
:param defer_reactor_run: True/False defer running reactor.run() as part
of starting server, to be explictly started by the user
:param custom_functions: An optional list of custom function classes
supported by server instance.
"""
from twisted.internet import reactor
from twisted.internet.serialport import SerialPort
port = kwargs.get("port", "/dev/ttyS0")
baudrate = kwargs.get("baudrate", Defaults.Baudrate)
console = kwargs.get("console", False)
bytesize = kwargs.get("bytesize", Defaults.Bytesize)
stopbits = kwargs.get("stopbits", Defaults.Stopbits)
parity = kwargs.get("parity", Defaults.Parity)
timeout = kwargs.get("timeout", 0)
xonxoff = kwargs.get("xonxoff", 0)
rtscts = kwargs.get("rtscts", 0)
_logger.info("Starting Modbus Serial Server on %s" % port)
factory = ModbusServerFactory(context, framer, identity, **kwargs)
for f in custom_functions:
factory.decoder.register(f)
if console:
InstallManagementConsole({"factory": factory})
if console:
InstallManagementConsole({"factory": factory})
protocol = factory.buildProtocol(None)
SerialPort.getHost = lambda self: port # hack for logging
SerialPort(
protocol,
port,
reactor,
baudrate=baudrate,
parity=parity,
stopbits=stopbits,
timeout=timeout,
xonxoff=xonxoff,
rtscts=rtscts,
bytesize=bytesize,
)
if not defer_reactor_run:
reactor.run(installSignalHandlers=_is_main_thread())
|
def StartSerialServer(
context, identity=None, framer=ModbusAsciiFramer, defer_reactor_run=False, **kwargs
):
"""
Helper method to start the Modbus Async Serial server
:param context: The server data context
:param identify: The server identity to use (default empty)
:param framer: The framer to use (default ModbusAsciiFramer)
:param port: The serial port to attach to
:param baudrate: The baud rate to use for the serial device
:param console: A flag indicating if you want the debug console
:param ignore_missing_slaves: True to not send errors on a request to a
missing slave
:param defer_reactor_run: True/False defer running reactor.run() as part
of starting server, to be explictly started by the user
"""
from twisted.internet import reactor
from twisted.internet.serialport import SerialPort
port = kwargs.get("port", "/dev/ttyS0")
baudrate = kwargs.get("baudrate", Defaults.Baudrate)
console = kwargs.get("console", False)
_logger.info("Starting Modbus Serial Server on %s" % port)
factory = ModbusServerFactory(context, framer, identity, **kwargs)
if console:
InstallManagementConsole({"factory": factory})
protocol = factory.buildProtocol(None)
SerialPort.getHost = lambda self: port # hack for logging
SerialPort(protocol, port, reactor, baudrate)
if not defer_reactor_run:
reactor.run(installSignalHandlers=_is_main_thread())
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def execute(self, request):
"""The callback to call with the resulting message
:param request: The decoded request message
"""
broadcast = False
try:
if self.server.broadcast_enable and request.unit_id == 0:
broadcast = True
# if broadcasting then execute on all slave contexts, note response will be ignored
for unit_id in self.server.context.slaves():
response = request.execute(self.server.context[unit_id])
else:
context = self.server.context[request.unit_id]
response = request.execute(context)
except NoSuchSlaveException as ex:
_logger.debug("requested slave does not exist: %s" % request.unit_id)
if self.server.ignore_missing_slaves:
return # the client will simply timeout waiting for a response
response = request.doException(merror.GatewayNoResponse)
except Exception as ex:
_logger.debug(
"Datastore unable to fulfill request: %s; %s", ex, traceback.format_exc()
)
response = request.doException(merror.SlaveFailure)
# no response when broadcasting
if not broadcast:
response.transaction_id = request.transaction_id
response.unit_id = request.unit_id
self.send(response)
|
def execute(self, request):
"""The callback to call with the resulting message
:param request: The decoded request message
"""
try:
context = self.server.context[request.unit_id]
response = request.execute(context)
except NoSuchSlaveException as ex:
_logger.debug("requested slave does not exist: %s" % request.unit_id)
if self.server.ignore_missing_slaves:
return # the client will simply timeout waiting for a response
response = request.doException(merror.GatewayNoResponse)
except Exception as ex:
_logger.debug(
"Datastore unable to fulfill request: %s; %s", ex, traceback.format_exc()
)
response = request.doException(merror.SlaveFailure)
response.transaction_id = request.transaction_id
response.unit_id = request.unit_id
self.send(response)
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def handle(self):
"""Callback when we receive any data"""
while self.running:
try:
data = self.request.recv(1024)
if data:
units = self.server.context.slaves()
if not isinstance(units, (list, tuple)):
units = [units]
# if broadcast is enabled make sure to process requests to address 0
if self.server.broadcast_enable:
if 0 not in units:
units.append(0)
single = self.server.context.single
self.framer.processIncomingPacket(
data, self.execute, units, single=single
)
except Exception as msg:
# Since we only have a single socket, we cannot exit
# Clear frame buffer
self.framer.resetFrame()
_logger.debug("Error: Socket error occurred %s" % msg)
|
def handle(self):
"""Callback when we receive any data"""
while self.running:
try:
data = self.request.recv(1024)
if data:
units = self.server.context.slaves()
single = self.server.context.single
self.framer.processIncomingPacket(
data, self.execute, units, single=single
)
except Exception as msg:
# Since we only have a single socket, we cannot exit
# Clear frame buffer
self.framer.resetFrame()
_logger.debug("Error: Socket error occurred %s" % msg)
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def send(self, message):
"""Send a request (string) to the network
:param message: The unencoded modbus response
"""
if message.should_respond:
# self.server.control.Counter.BusMessage += 1
pdu = self.framer.buildPacket(message)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug("send: [%s]- %s" % (message, b2a_hex(pdu)))
return self.request.send(pdu)
|
def send(self, message):
"""Send a request (string) to the network
:param message: The unencoded modbus response
"""
if message.should_respond:
# self.server.control.Counter.BusMessage += 1
pdu = self.framer.buildPacket(message)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug("send: %s" % b2a_hex(pdu))
return self.request.send(pdu)
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def handle(self):
"""Callback when we receive any data, until self.running becomes False.
Blocks indefinitely awaiting data. If shutdown is required, then the
global socket.settimeout(<seconds>) may be used, to allow timely
checking of self.running. However, since this also affects socket
connects, if there are outgoing socket connections used in the same
program, then these will be prevented, if the specfied timeout is too
short. Hence, this is unreliable.
To respond to Modbus...Server.server_close() (which clears each
handler's self.running), derive from this class to provide an
alternative handler that awakens from time to time when no input is
available and checks self.running.
Use Modbus...Server( handler=... ) keyword to supply the alternative
request handler class.
"""
reset_frame = False
while self.running:
try:
units = self.server.context.slaves()
data = self.request.recv(1024)
if not data:
self.running = False
else:
if not isinstance(units, (list, tuple)):
units = [units]
# if broadcast is enabled make sure to
# process requests to address 0
if self.server.broadcast_enable:
if 0 not in units:
units.append(0)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug("Handling data: " + hexlify_packets(data))
single = self.server.context.single
self.framer.processIncomingPacket(data, self.execute, units, single=single)
except socket.timeout as msg:
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug("Socket timeout occurred %s", msg)
reset_frame = True
except socket.error as msg:
_logger.error("Socket error occurred %s" % msg)
self.running = False
except:
_logger.error("Socket exception occurred %s" % traceback.format_exc())
self.running = False
reset_frame = True
finally:
if reset_frame:
self.framer.resetFrame()
reset_frame = False
|
def handle(self):
"""Callback when we receive any data, until self.running becomes False.
Blocks indefinitely awaiting data. If shutdown is required, then the
global socket.settimeout(<seconds>) may be used, to allow timely
checking of self.running. However, since this also affects socket
connects, if there are outgoing socket connections used in the same
program, then these will be prevented, if the specfied timeout is too
short. Hence, this is unreliable.
To respond to Modbus...Server.server_close() (which clears each
handler's self.running), derive from this class to provide an
alternative handler that awakens from time to time when no input is
available and checks self.running.
Use Modbus...Server( handler=... ) keyword to supply the alternative
request handler class.
"""
reset_frame = False
while self.running:
try:
data = self.request.recv(1024)
if not data:
self.running = False
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug("Handling data: " + hexlify_packets(data))
# if not self.server.control.ListenOnly:
units = self.server.context.slaves()
single = self.server.context.single
self.framer.processIncomingPacket(data, self.execute, units, single=single)
except socket.timeout as msg:
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug("Socket timeout occurred %s", msg)
reset_frame = True
except socket.error as msg:
_logger.error("Socket error occurred %s" % msg)
self.running = False
except:
_logger.error("Socket exception occurred %s" % traceback.format_exc())
self.running = False
reset_frame = True
finally:
if reset_frame:
self.framer.resetFrame()
reset_frame = False
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def handle(self):
"""Callback when we receive any data"""
reset_frame = False
while self.running:
try:
data, self.socket = self.request
if not data:
self.running = False
data = b""
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug("Handling data: " + hexlify_packets(data))
# if not self.server.control.ListenOnly:
units = self.server.context.slaves()
single = self.server.context.single
self.framer.processIncomingPacket(data, self.execute, units, single=single)
except socket.timeout:
pass
except socket.error as msg:
_logger.error("Socket error occurred %s" % msg)
self.running = False
reset_frame = True
except Exception as msg:
_logger.error(msg)
self.running = False
reset_frame = True
finally:
# Reset data after processing
self.request = (None, self.socket)
if reset_frame:
self.framer.resetFrame()
reset_frame = False
|
def handle(self):
"""Callback when we receive any data"""
reset_frame = False
while self.running:
try:
data, self.socket = self.request
if not data:
self.running = False
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug("Handling data: " + hexlify_packets(data))
# if not self.server.control.ListenOnly:
units = self.server.context.slaves()
single = self.server.context.single
self.framer.processIncomingPacket(data, self.execute, units, single=single)
except socket.timeout:
pass
except socket.error as msg:
_logger.error("Socket error occurred %s" % msg)
self.running = False
reset_frame = True
except Exception as msg:
_logger.error(msg)
self.running = False
reset_frame = True
finally:
# Reset data after processing
self.request = (None, self.socket)
if reset_frame:
self.framer.resetFrame()
reset_frame = False
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def send(self, message):
"""Send a request (string) to the network
:param message: The unencoded modbus response
"""
if message.should_respond:
# self.server.control.Counter.BusMessage += 1
pdu = self.framer.buildPacket(message)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug("send: [%s]- %s" % (message, b2a_hex(pdu)))
return self.socket.sendto(pdu, self.client_address)
|
def send(self, message):
"""Send a request (string) to the network
:param message: The unencoded modbus response
"""
if message.should_respond:
# self.server.control.Counter.BusMessage += 1
pdu = self.framer.buildPacket(message)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug("send: %s" % b2a_hex(pdu))
return self.socket.sendto(pdu, self.client_address)
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def __init__(
self,
context,
framer=None,
identity=None,
address=None,
handler=None,
allow_reuse_address=False,
**kwargs,
):
"""Overloaded initializer for the socket server
If the identify structure is not passed in, the ModbusControlBlock
uses its own empty structure.
:param context: The ModbusServerContext datastore
:param framer: The framer strategy to use
:param identity: An optional identify structure
:param address: An optional (interface, port) to bind to.
:param handler: A handler for each client session; default is
ModbusConnectedRequestHandler
:param allow_reuse_address: Whether the server will allow the
reuse of an address.
:param ignore_missing_slaves: True to not send errors on a request
to a missing slave
:param broadcast_enable: True to treat unit_id 0 as broadcast address,
False to treat 0 as any other unit_id
"""
self.threads = []
self.allow_reuse_address = allow_reuse_address
self.decoder = ServerDecoder()
self.framer = framer or ModbusSocketFramer
self.context = context or ModbusServerContext()
self.control = ModbusControlBlock()
self.address = address or ("", Defaults.Port)
self.handler = handler or ModbusConnectedRequestHandler
self.ignore_missing_slaves = kwargs.get(
"ignore_missing_slaves", Defaults.IgnoreMissingSlaves
)
self.broadcast_enable = kwargs.get("broadcast_enable", Defaults.broadcast_enable)
if isinstance(identity, ModbusDeviceIdentification):
self.control.Identity.update(identity)
socketserver.ThreadingTCPServer.__init__(self, self.address, self.handler)
|
def __init__(
self,
context,
framer=None,
identity=None,
address=None,
handler=None,
allow_reuse_address=False,
**kwargs,
):
"""Overloaded initializer for the socket server
If the identify structure is not passed in, the ModbusControlBlock
uses its own empty structure.
:param context: The ModbusServerContext datastore
:param framer: The framer strategy to use
:param identity: An optional identify structure
:param address: An optional (interface, port) to bind to.
:param handler: A handler for each client session; default is
ModbusConnectedRequestHandler
:param allow_reuse_address: Whether the server will allow the
reuse of an address.
:param ignore_missing_slaves: True to not send errors on a request
to a missing slave
"""
self.threads = []
self.allow_reuse_address = allow_reuse_address
self.decoder = ServerDecoder()
self.framer = framer or ModbusSocketFramer
self.context = context or ModbusServerContext()
self.control = ModbusControlBlock()
self.address = address or ("", Defaults.Port)
self.handler = handler or ModbusConnectedRequestHandler
self.ignore_missing_slaves = kwargs.get(
"ignore_missing_slaves", Defaults.IgnoreMissingSlaves
)
if isinstance(identity, ModbusDeviceIdentification):
self.control.Identity.update(identity)
socketserver.ThreadingTCPServer.__init__(self, self.address, self.handler)
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def __init__(
self, context, framer=None, identity=None, address=None, handler=None, **kwargs
):
"""Overloaded initializer for the socket server
If the identify structure is not passed in, the ModbusControlBlock
uses its own empty structure.
:param context: The ModbusServerContext datastore
:param framer: The framer strategy to use
:param identity: An optional identify structure
:param address: An optional (interface, port) to bind to.
:param handler: A handler for each client session; default is
ModbusDisonnectedRequestHandler
:param ignore_missing_slaves: True to not send errors on a request
to a missing slave
:param broadcast_enable: True to treat unit_id 0 as broadcast address,
False to treat 0 as any other unit_id
"""
self.threads = []
self.decoder = ServerDecoder()
self.framer = framer or ModbusSocketFramer
self.context = context or ModbusServerContext()
self.control = ModbusControlBlock()
self.address = address or ("", Defaults.Port)
self.handler = handler or ModbusDisconnectedRequestHandler
self.ignore_missing_slaves = kwargs.get(
"ignore_missing_slaves", Defaults.IgnoreMissingSlaves
)
self.broadcast_enable = kwargs.get("broadcast_enable", Defaults.broadcast_enable)
if isinstance(identity, ModbusDeviceIdentification):
self.control.Identity.update(identity)
socketserver.ThreadingUDPServer.__init__(self, self.address, self.handler)
|
def __init__(
self, context, framer=None, identity=None, address=None, handler=None, **kwargs
):
"""Overloaded initializer for the socket server
If the identify structure is not passed in, the ModbusControlBlock
uses its own empty structure.
:param context: The ModbusServerContext datastore
:param framer: The framer strategy to use
:param identity: An optional identify structure
:param address: An optional (interface, port) to bind to.
:param handler: A handler for each client session; default is
ModbusDisonnectedRequestHandler
:param ignore_missing_slaves: True to not send errors on a request
to a missing slave
"""
self.threads = []
self.decoder = ServerDecoder()
self.framer = framer or ModbusSocketFramer
self.context = context or ModbusServerContext()
self.control = ModbusControlBlock()
self.address = address or ("", Defaults.Port)
self.handler = handler or ModbusDisconnectedRequestHandler
self.ignore_missing_slaves = kwargs.get(
"ignore_missing_slaves", Defaults.IgnoreMissingSlaves
)
if isinstance(identity, ModbusDeviceIdentification):
self.control.Identity.update(identity)
socketserver.ThreadingUDPServer.__init__(self, self.address, self.handler)
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def __init__(self, context, framer=None, identity=None, **kwargs):
"""Overloaded initializer for the socket server
If the identify structure is not passed in, the ModbusControlBlock
uses its own empty structure.
:param context: The ModbusServerContext datastore
:param framer: The framer strategy to use
:param identity: An optional identify structure
:param port: The serial port to attach to
:param stopbits: The number of stop bits to use
:param bytesize: The bytesize of the serial messages
:param parity: Which kind of parity to use
:param baudrate: The baud rate to use for the serial device
:param timeout: The timeout to use for the serial device
:param ignore_missing_slaves: True to not send errors on a request
to a missing slave
:param broadcast_enable: True to treat unit_id 0 as broadcast address,
False to treat 0 as any other unit_id
"""
self.threads = []
self.decoder = ServerDecoder()
self.framer = framer or ModbusAsciiFramer
self.context = context or ModbusServerContext()
self.control = ModbusControlBlock()
if isinstance(identity, ModbusDeviceIdentification):
self.control.Identity.update(identity)
self.device = kwargs.get("port", 0)
self.stopbits = kwargs.get("stopbits", Defaults.Stopbits)
self.bytesize = kwargs.get("bytesize", Defaults.Bytesize)
self.parity = kwargs.get("parity", Defaults.Parity)
self.baudrate = kwargs.get("baudrate", Defaults.Baudrate)
self.timeout = kwargs.get("timeout", Defaults.Timeout)
self.ignore_missing_slaves = kwargs.get(
"ignore_missing_slaves", Defaults.IgnoreMissingSlaves
)
self.broadcast_enable = kwargs.get("broadcast_enable", Defaults.broadcast_enable)
self.socket = None
if self._connect():
self.is_running = True
self._build_handler()
|
def __init__(self, context, framer=None, identity=None, **kwargs):
"""Overloaded initializer for the socket server
If the identify structure is not passed in, the ModbusControlBlock
uses its own empty structure.
:param context: The ModbusServerContext datastore
:param framer: The framer strategy to use
:param identity: An optional identify structure
:param port: The serial port to attach to
:param stopbits: The number of stop bits to use
:param bytesize: The bytesize of the serial messages
:param parity: Which kind of parity to use
:param baudrate: The baud rate to use for the serial device
:param timeout: The timeout to use for the serial device
:param ignore_missing_slaves: True to not send errors on a request
to a missing slave
"""
self.threads = []
self.decoder = ServerDecoder()
self.framer = framer or ModbusAsciiFramer
self.context = context or ModbusServerContext()
self.control = ModbusControlBlock()
if isinstance(identity, ModbusDeviceIdentification):
self.control.Identity.update(identity)
self.device = kwargs.get("port", 0)
self.stopbits = kwargs.get("stopbits", Defaults.Stopbits)
self.bytesize = kwargs.get("bytesize", Defaults.Bytesize)
self.parity = kwargs.get("parity", Defaults.Parity)
self.baudrate = kwargs.get("baudrate", Defaults.Baudrate)
self.timeout = kwargs.get("timeout", Defaults.Timeout)
self.ignore_missing_slaves = kwargs.get(
"ignore_missing_slaves", Defaults.IgnoreMissingSlaves
)
self.socket = None
if self._connect():
self.is_running = True
self._build_handler()
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def StartTcpServer(
context=None, identity=None, address=None, custom_functions=[], **kwargs
):
"""A factory to start and run a tcp modbus server
:param context: The ModbusServerContext datastore
:param identity: An optional identify structure
:param address: An optional (interface, port) to bind to.
:param custom_functions: An optional list of custom function classes
supported by server instance.
:param ignore_missing_slaves: True to not send errors on a request to a
missing slave
"""
framer = kwargs.pop("framer", ModbusSocketFramer)
server = ModbusTcpServer(context, framer, identity, address, **kwargs)
for f in custom_functions:
server.decoder.register(f)
server.serve_forever()
|
def StartTcpServer(context=None, identity=None, address=None, **kwargs):
"""A factory to start and run a tcp modbus server
:param context: The ModbusServerContext datastore
:param identity: An optional identify structure
:param address: An optional (interface, port) to bind to.
:param ignore_missing_slaves: True to not send errors on a request to a
missing slave
"""
framer = kwargs.pop("framer", ModbusSocketFramer)
server = ModbusTcpServer(context, framer, identity, address, **kwargs)
server.serve_forever()
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def StartUdpServer(
context=None, identity=None, address=None, custom_functions=[], **kwargs
):
"""A factory to start and run a udp modbus server
:param context: The ModbusServerContext datastore
:param identity: An optional identify structure
:param address: An optional (interface, port) to bind to.
:param custom_functions: An optional list of custom function classes
supported by server instance.
:param framer: The framer to operate with (default ModbusSocketFramer)
:param ignore_missing_slaves: True to not send errors on a request
to a missing slave
"""
framer = kwargs.pop("framer", ModbusSocketFramer)
server = ModbusUdpServer(context, framer, identity, address, **kwargs)
for f in custom_functions:
server.decoder.register(f)
server.serve_forever()
|
def StartUdpServer(context=None, identity=None, address=None, **kwargs):
"""A factory to start and run a udp modbus server
:param context: The ModbusServerContext datastore
:param identity: An optional identify structure
:param address: An optional (interface, port) to bind to.
:param framer: The framer to operate with (default ModbusSocketFramer)
:param ignore_missing_slaves: True to not send errors on a request
to a missing slave
"""
framer = kwargs.pop("framer", ModbusSocketFramer)
server = ModbusUdpServer(context, framer, identity, address, **kwargs)
server.serve_forever()
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def StartSerialServer(context=None, identity=None, custom_functions=[], **kwargs):
"""A factory to start and run a serial modbus server
:param context: The ModbusServerContext datastore
:param identity: An optional identify structure
:param custom_functions: An optional list of custom function classes
supported by server instance.
:param framer: The framer to operate with (default ModbusAsciiFramer)
:param port: The serial port to attach to
:param stopbits: The number of stop bits to use
:param bytesize: The bytesize of the serial messages
:param parity: Which kind of parity to use
:param baudrate: The baud rate to use for the serial device
:param timeout: The timeout to use for the serial device
:param ignore_missing_slaves: True to not send errors on a request to a
missing slave
"""
framer = kwargs.pop("framer", ModbusAsciiFramer)
server = ModbusSerialServer(context, framer, identity, **kwargs)
for f in custom_functions:
server.decoder.register(f)
server.serve_forever()
|
def StartSerialServer(context=None, identity=None, **kwargs):
"""A factory to start and run a serial modbus server
:param context: The ModbusServerContext datastore
:param identity: An optional identify structure
:param framer: The framer to operate with (default ModbusAsciiFramer)
:param port: The serial port to attach to
:param stopbits: The number of stop bits to use
:param bytesize: The bytesize of the serial messages
:param parity: Which kind of parity to use
:param baudrate: The baud rate to use for the serial device
:param timeout: The timeout to use for the serial device
:param ignore_missing_slaves: True to not send errors on a request to a
missing slave
"""
framer = kwargs.pop("framer", ModbusAsciiFramer)
server = ModbusSerialServer(context, framer, identity, **kwargs)
server.serve_forever()
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def execute(self, request):
"""Starts the producer to send the next request to
consumer.write(Frame(request))
"""
with self._transaction_lock:
try:
_logger.debug(
"Current transaction state - {}".format(
ModbusTransactionState.to_string(self.client.state)
)
)
retries = self.retries
request.transaction_id = self.getNextTID()
_logger.debug("Running transaction {}".format(request.transaction_id))
_buffer = hexlify_packets(self.client.framer._buffer)
if _buffer:
_logger.debug("Clearing current Frame : - {}".format(_buffer))
self.client.framer.resetFrame()
broadcast = self.client.broadcast_enable and request.unit_id == 0
if broadcast:
self._transact(request, None, broadcast=True)
response = b"Broadcast write sent - no response expected"
else:
expected_response_length = None
if not isinstance(self.client.framer, ModbusSocketFramer):
if hasattr(request, "get_response_pdu_size"):
response_pdu_size = request.get_response_pdu_size()
if isinstance(self.client.framer, ModbusAsciiFramer):
response_pdu_size = response_pdu_size * 2
if response_pdu_size:
expected_response_length = self._calculate_response_length(
response_pdu_size
)
if request.unit_id in self._no_response_devices:
full = True
else:
full = False
c_str = str(self.client)
if "modbusudpclient" in c_str.lower().strip():
full = True
if not expected_response_length:
expected_response_length = Defaults.ReadSize
response, last_exception = self._transact(
request, expected_response_length, full=full, broadcast=broadcast
)
if not response and (request.unit_id not in self._no_response_devices):
self._no_response_devices.append(request.unit_id)
elif request.unit_id in self._no_response_devices and response:
self._no_response_devices.remove(request.unit_id)
if not response and self.retry_on_empty and retries:
while retries > 0:
if hasattr(self.client, "state"):
_logger.debug(
"RESETTING Transaction state to 'IDLE' for retry"
)
self.client.state = ModbusTransactionState.IDLE
_logger.debug("Retry on empty - {}".format(retries))
response, last_exception = self._transact(
request, expected_response_length
)
if not response:
retries -= 1
continue
# Remove entry
self._no_response_devices.remove(request.unit_id)
break
addTransaction = partial(
self.addTransaction, tid=request.transaction_id
)
self.client.framer.processIncomingPacket(
response, addTransaction, request.unit_id
)
response = self.getTransaction(request.transaction_id)
if not response:
if len(self.transactions):
response = self.getTransaction(tid=0)
else:
last_exception = last_exception or (
"No Response received from the remote unit"
"/Unable to decode response"
)
response = ModbusIOException(
last_exception, request.function_code
)
if hasattr(self.client, "state"):
_logger.debug(
"Changing transaction state from "
"'PROCESSING REPLY' to "
"'TRANSACTION_COMPLETE'"
)
self.client.state = ModbusTransactionState.TRANSACTION_COMPLETE
return response
except ModbusIOException as ex:
# Handle decode errors in processIncomingPacket method
_logger.exception(ex)
self.client.state = ModbusTransactionState.TRANSACTION_COMPLETE
return ex
|
def execute(self, request):
"""Starts the producer to send the next request to
consumer.write(Frame(request))
"""
with self._transaction_lock:
try:
_logger.debug(
"Current transaction state - {}".format(
ModbusTransactionState.to_string(self.client.state)
)
)
retries = self.retries
request.transaction_id = self.getNextTID()
_logger.debug("Running transaction %d" % request.transaction_id)
_buffer = hexlify_packets(self.client.framer._buffer)
if _buffer:
_logger.debug("Clearing current Frame : - {}".format(_buffer))
self.client.framer.resetFrame()
expected_response_length = None
if not isinstance(self.client.framer, ModbusSocketFramer):
if hasattr(request, "get_response_pdu_size"):
response_pdu_size = request.get_response_pdu_size()
if isinstance(self.client.framer, ModbusAsciiFramer):
response_pdu_size = response_pdu_size * 2
if response_pdu_size:
expected_response_length = self._calculate_response_length(
response_pdu_size
)
if request.unit_id in self._no_response_devices:
full = True
else:
full = False
c_str = str(self.client)
if "modbusudpclient" in c_str.lower().strip():
full = True
if not expected_response_length:
expected_response_length = Defaults.ReadSize
response, last_exception = self._transact(
request, expected_response_length, full=full
)
if not response and (request.unit_id not in self._no_response_devices):
self._no_response_devices.append(request.unit_id)
elif request.unit_id in self._no_response_devices and response:
self._no_response_devices.remove(request.unit_id)
if not response and self.retry_on_empty and retries:
while retries > 0:
if hasattr(self.client, "state"):
_logger.debug("RESETTING Transaction state to 'IDLE' for retry")
self.client.state = ModbusTransactionState.IDLE
_logger.debug("Retry on empty - {}".format(retries))
response, last_exception = self._transact(
request, expected_response_length
)
if not response:
retries -= 1
continue
# Remove entry
self._no_response_devices.remove(request.unit_id)
break
addTransaction = partial(self.addTransaction, tid=request.transaction_id)
self.client.framer.processIncomingPacket(
response, addTransaction, request.unit_id
)
response = self.getTransaction(request.transaction_id)
if not response:
if len(self.transactions):
response = self.getTransaction(tid=0)
else:
last_exception = last_exception or (
"No Response received from the remote unit"
"/Unable to decode response"
)
response = ModbusIOException(last_exception, request.function_code)
if hasattr(self.client, "state"):
_logger.debug(
"Changing transaction state from "
"'PROCESSING REPLY' to "
"'TRANSACTION_COMPLETE'"
)
self.client.state = ModbusTransactionState.TRANSACTION_COMPLETE
return response
except ModbusIOException as ex:
# Handle decode errors in processIncomingPacket method
_logger.exception(ex)
self.client.state = ModbusTransactionState.TRANSACTION_COMPLETE
return ex
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def _transact(self, packet, response_length, full=False, broadcast=False):
"""
Does a Write and Read transaction
:param packet: packet to be sent
:param response_length: Expected response length
:param full: the target device was notorious for its no response. Dont
waste time this time by partial querying
:return: response
"""
last_exception = None
try:
self.client.connect()
packet = self.client.framer.buildPacket(packet)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug("SEND: " + hexlify_packets(packet))
size = self._send(packet)
if broadcast:
if size:
_logger.debug(
"Changing transaction state from 'SENDING' "
"to 'TRANSACTION_COMPLETE'"
)
self.client.state = ModbusTransactionState.TRANSACTION_COMPLETE
return b"", None
if size:
_logger.debug(
"Changing transaction state from 'SENDING' to 'WAITING FOR REPLY'"
)
self.client.state = ModbusTransactionState.WAITING_FOR_REPLY
result = self._recv(response_length, full)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug("RECV: " + hexlify_packets(result))
except (socket.error, ModbusIOException, InvalidMessageReceivedException) as msg:
self.client.close()
_logger.debug("Transaction failed. (%s) " % msg)
last_exception = msg
result = b""
return result, last_exception
|
def _transact(self, packet, response_length, full=False):
"""
Does a Write and Read transaction
:param packet: packet to be sent
:param response_length: Expected response length
:param full: the target device was notorious for its no response. Dont
waste time this time by partial querying
:return: response
"""
last_exception = None
try:
self.client.connect()
packet = self.client.framer.buildPacket(packet)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug("SEND: " + hexlify_packets(packet))
size = self._send(packet)
if size:
_logger.debug(
"Changing transaction state from 'SENDING' to 'WAITING FOR REPLY'"
)
self.client.state = ModbusTransactionState.WAITING_FOR_REPLY
result = self._recv(response_length, full)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug("RECV: " + hexlify_packets(result))
except (socket.error, ModbusIOException, InvalidMessageReceivedException) as msg:
self.client.close()
_logger.debug("Transaction failed. (%s) " % msg)
last_exception = msg
result = b""
return result, last_exception
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def run_binary_payload_ex():
# ----------------------------------------------------------------------- #
# We are going to use a simple client to send our requests
# ----------------------------------------------------------------------- #
client = ModbusClient("127.0.0.1", port=5020)
client.connect()
# ----------------------------------------------------------------------- #
# If you need to build a complex message to send, you can use the payload
# builder to simplify the packing logic.
#
# Here we demonstrate packing a random payload layout, unpacked it looks
# like the following:
#
# - a 8 byte string 'abcdefgh'
# - a 32 bit float 22.34
# - a 16 bit unsigned int 0x1234
# - another 16 bit unsigned int 0x5678
# - an 8 bit int 0x12
# - an 8 bit bitstring [0,1,0,1,1,0,1,0]
# - an 32 bit uint 0x12345678
# - an 32 bit signed int -0x1234
# - an 64 bit signed int 0x12345678
# The packing can also be applied to the word (wordorder) and bytes in each
# word (byteorder)
# The wordorder is applicable only for 32 and 64 bit values
# Lets say we need to write a value 0x12345678 to a 32 bit register
# The following combinations could be used to write the register
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# Word Order - Big Byte Order - Big
# word1 =0x1234 word2 = 0x5678
# Word Order - Big Byte Order - Little
# word1 =0x3412 word2 = 0x7856
# Word Order - Little Byte Order - Big
# word1 = 0x5678 word2 = 0x1234
# Word Order - Little Byte Order - Little
# word1 =0x7856 word2 = 0x3412
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# ----------------------------------------------------------------------- #
builder = BinaryPayloadBuilder(byteorder=Endian.Big, wordorder=Endian.Little)
builder.add_string("abcdefgh")
builder.add_bits([0, 1, 0, 1, 1, 0, 1, 0])
builder.add_8bit_int(-0x12)
builder.add_8bit_uint(0x12)
builder.add_16bit_int(-0x5678)
builder.add_16bit_uint(0x1234)
builder.add_32bit_int(-0x1234)
builder.add_32bit_uint(0x12345678)
builder.add_32bit_float(22.34)
builder.add_32bit_float(-22.34)
builder.add_64bit_int(-0xDEADBEEF)
builder.add_64bit_uint(0x12345678DEADBEEF)
builder.add_64bit_uint(0x12345678DEADBEEF)
builder.add_64bit_float(123.45)
builder.add_64bit_float(-123.45)
payload = builder.to_registers()
print("-" * 60)
print("Writing Registers")
print("-" * 60)
print(payload)
print("\n")
payload = builder.build()
address = 0
# Can write registers
# registers = builder.to_registers()
# client.write_registers(address, registers, unit=1)
# Or can write encoded binary string
client.write_registers(address, payload, skip_encode=True, unit=1)
# ----------------------------------------------------------------------- #
# If you need to decode a collection of registers in a weird layout, the
# payload decoder can help you as well.
#
# Here we demonstrate decoding a random register layout, unpacked it looks
# like the following:
#
# - a 8 byte string 'abcdefgh'
# - a 32 bit float 22.34
# - a 16 bit unsigned int 0x1234
# - another 16 bit unsigned int which we will ignore
# - an 8 bit int 0x12
# - an 8 bit bitstring [0,1,0,1,1,0,1,0]
# ----------------------------------------------------------------------- #
address = 0x0
count = len(payload)
result = client.read_holding_registers(address, count, unit=1)
print("-" * 60)
print("Registers")
print("-" * 60)
print(result.registers)
print("\n")
decoder = BinaryPayloadDecoder.fromRegisters(
result.registers, byteorder=Endian.Big, wordorder=Endian.Little
)
assert decoder._byteorder == builder._byteorder, (
"Make sure byteorder is consistent between BinaryPayloadBuilder and BinaryPayloadDecoder"
)
assert decoder._wordorder == builder._wordorder, (
"Make sure wordorder is consistent between BinaryPayloadBuilder and BinaryPayloadDecoder"
)
decoded = OrderedDict(
[
("string", decoder.decode_string(8)),
("bits", decoder.decode_bits()),
("8int", decoder.decode_8bit_int()),
("8uint", decoder.decode_8bit_uint()),
("16int", decoder.decode_16bit_int()),
("16uint", decoder.decode_16bit_uint()),
("32int", decoder.decode_32bit_int()),
("32uint", decoder.decode_32bit_uint()),
("32float", decoder.decode_32bit_float()),
("32float2", decoder.decode_32bit_float()),
("64int", decoder.decode_64bit_int()),
("64uint", decoder.decode_64bit_uint()),
("ignore", decoder.skip_bytes(8)),
("64float", decoder.decode_64bit_float()),
("64float2", decoder.decode_64bit_float()),
]
)
print("-" * 60)
print("Decoded Data")
print("-" * 60)
for name, value in iteritems(decoded):
print("%s\t" % name, hex(value) if isinstance(value, int) else value)
# ----------------------------------------------------------------------- #
# close the client
# ----------------------------------------------------------------------- #
client.close()
|
def run_binary_payload_ex():
# ----------------------------------------------------------------------- #
# We are going to use a simple client to send our requests
# ----------------------------------------------------------------------- #
client = ModbusClient("127.0.0.1", port=5020)
client.connect()
# ----------------------------------------------------------------------- #
# If you need to build a complex message to send, you can use the payload
# builder to simplify the packing logic.
#
# Here we demonstrate packing a random payload layout, unpacked it looks
# like the following:
#
# - a 8 byte string 'abcdefgh'
# - a 32 bit float 22.34
# - a 16 bit unsigned int 0x1234
# - another 16 bit unsigned int 0x5678
# - an 8 bit int 0x12
# - an 8 bit bitstring [0,1,0,1,1,0,1,0]
# - an 32 bit uint 0x12345678
# - an 32 bit signed int -0x1234
# - an 64 bit signed int 0x12345678
# The packing can also be applied to the word (wordorder) and bytes in each
# word (byteorder)
# The wordorder is applicable only for 32 and 64 bit values
# Lets say we need to write a value 0x12345678 to a 32 bit register
# The following combinations could be used to write the register
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# Word Order - Big Byte Order - Big
# word1 =0x1234 word2 = 0x5678
# Word Order - Big Byte Order - Little
# word1 =0x3412 word2 = 0x7856
# Word Order - Little Byte Order - Big
# word1 = 0x5678 word2 = 0x1234
# Word Order - Little Byte Order - Little
# word1 =0x7856 word2 = 0x3412
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# ----------------------------------------------------------------------- #
builder = BinaryPayloadBuilder(byteorder=Endian.Big, wordorder=Endian.Little)
builder.add_string("abcdefgh")
builder.add_bits([0, 1, 0, 1, 1, 0, 1, 0])
builder.add_8bit_int(-0x12)
builder.add_8bit_uint(0x12)
builder.add_16bit_int(-0x5678)
builder.add_16bit_uint(0x1234)
builder.add_32bit_int(-0x1234)
builder.add_32bit_uint(0x12345678)
builder.add_32bit_float(22.34)
builder.add_32bit_float(-22.34)
builder.add_64bit_int(-0xDEADBEEF)
builder.add_64bit_uint(0x12345678DEADBEEF)
builder.add_64bit_uint(0x12345678DEADBEEF)
builder.add_64bit_float(123.45)
builder.add_64bit_float(-123.45)
payload = builder.to_registers()
print("-" * 60)
print("Writing Registers")
print("-" * 60)
print(payload)
print("\n")
payload = builder.build()
address = 0
# Can write registers
# registers = builder.to_registers()
# client.write_registers(address, registers, unit=1)
# Or can write encoded binary string
client.write_registers(address, payload, skip_encode=True, unit=1)
# ----------------------------------------------------------------------- #
# If you need to decode a collection of registers in a weird layout, the
# payload decoder can help you as well.
#
# Here we demonstrate decoding a random register layout, unpacked it looks
# like the following:
#
# - a 8 byte string 'abcdefgh'
# - a 32 bit float 22.34
# - a 16 bit unsigned int 0x1234
# - another 16 bit unsigned int which we will ignore
# - an 8 bit int 0x12
# - an 8 bit bitstring [0,1,0,1,1,0,1,0]
# ----------------------------------------------------------------------- #
address = 0x0
count = len(payload)
result = client.read_holding_registers(address, count, unit=1)
print("-" * 60)
print("Registers")
print("-" * 60)
print(result.registers)
print("\n")
decoder = BinaryPayloadDecoder.fromRegisters(
result.registers, byteorder=Endian.Little, wordorder=Endian.Little
)
decoded = OrderedDict(
[
("string", decoder.decode_string(8)),
("bits", decoder.decode_bits()),
("8int", decoder.decode_8bit_int()),
("8uint", decoder.decode_8bit_uint()),
("16int", decoder.decode_16bit_int()),
("16uint", decoder.decode_16bit_uint()),
("32int", decoder.decode_32bit_int()),
("32uint", decoder.decode_32bit_uint()),
("32float", decoder.decode_32bit_float()),
("32float2", decoder.decode_32bit_float()),
("64int", decoder.decode_64bit_int()),
("64uint", decoder.decode_64bit_uint()),
("ignore", decoder.skip_bytes(8)),
("64float", decoder.decode_64bit_float()),
("64float2", decoder.decode_64bit_float()),
]
)
print("-" * 60)
print("Decoded Data")
print("-" * 60)
for name, value in iteritems(decoded):
print("%s\t" % name, hex(value) if isinstance(value, int) else value)
# ----------------------------------------------------------------------- #
# close the client
# ----------------------------------------------------------------------- #
client.close()
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def run_sync_client():
# ------------------------------------------------------------------------#
# choose the client you want
# ------------------------------------------------------------------------#
# make sure to start an implementation to hit against. For this
# you can use an existing device, the reference implementation in the tools
# directory, or start a pymodbus server.
#
# If you use the UDP or TCP clients, you can override the framer being used
# to use a custom implementation (say RTU over TCP). By default they use
# the socket framer::
#
# client = ModbusClient('localhost', port=5020, framer=ModbusRtuFramer)
#
# It should be noted that you can supply an ipv4 or an ipv6 host address
# for both the UDP and TCP clients.
#
# There are also other options that can be set on the client that controls
# how transactions are performed. The current ones are:
#
# * retries - Specify how many retries to allow per transaction (default=3)
# * retry_on_empty - Is an empty response a retry (default = False)
# * source_address - Specifies the TCP source address to bind to
# * strict - Applicable only for Modbus RTU clients.
# Adheres to modbus protocol for timing restrictions
# (default = True).
# Setting this to False would disable the inter char timeout
# restriction (t1.5) for Modbus RTU
#
#
# Here is an example of using these options::
#
# client = ModbusClient('localhost', retries=3, retry_on_empty=True)
# ------------------------------------------------------------------------#
client = ModbusClient("localhost", port=5020)
# from pymodbus.transaction import ModbusRtuFramer
# client = ModbusClient('localhost', port=5020, framer=ModbusRtuFramer)
# client = ModbusClient(method='binary', port='/dev/ptyp0', timeout=1)
# client = ModbusClient(method='ascii', port='/dev/ptyp0', timeout=1)
# client = ModbusClient(method='rtu', port='/dev/ptyp0', timeout=1,
# baudrate=9600)
client.connect()
# ------------------------------------------------------------------------#
# specify slave to query
# ------------------------------------------------------------------------#
# The slave to query is specified in an optional parameter for each
# individual request. This can be done by specifying the `unit` parameter
# which defaults to `0x00`
# ----------------------------------------------------------------------- #
log.debug("Reading Coils")
rr = client.read_coils(1, 1, unit=UNIT)
log.debug(rr)
# ----------------------------------------------------------------------- #
# example requests
# ----------------------------------------------------------------------- #
# simply call the methods that you would like to use. An example session
# is displayed below along with some assert checks. Note that some modbus
# implementations differentiate holding/input discrete/coils and as such
# you will not be able to write to these, therefore the starting values
# are not known to these tests. Furthermore, some use the same memory
# blocks for the two sets, so a change to one is a change to the other.
# Keep both of these cases in mind when testing as the following will
# _only_ pass with the supplied asynchronous modbus server (script supplied).
# ----------------------------------------------------------------------- #
log.debug("Write to a Coil and read back")
rq = client.write_coil(0, True, unit=UNIT)
rr = client.read_coils(0, 1, unit=UNIT)
assert not rq.isError() # test that we are not an error
assert rr.bits[0] == True # test the expected value
log.debug("Write to multiple coils and read back- test 1")
rq = client.write_coils(1, [True] * 8, unit=UNIT)
assert not rq.isError() # test that we are not an error
rr = client.read_coils(1, 21, unit=UNIT)
assert not rr.isError() # test that we are not an error
resp = [True] * 21
# If the returned output quantity is not a multiple of eight,
# the remaining bits in the final data byte will be padded with zeros
# (toward the high order end of the byte).
resp.extend([False] * 3)
assert rr.bits == resp # test the expected value
log.debug("Write to multiple coils and read back - test 2")
rq = client.write_coils(1, [False] * 8, unit=UNIT)
rr = client.read_coils(1, 8, unit=UNIT)
assert not rq.isError() # test that we are not an error
assert rr.bits == [False] * 8 # test the expected value
log.debug("Read discrete inputs")
rr = client.read_discrete_inputs(0, 8, unit=UNIT)
assert not rq.isError() # test that we are not an error
log.debug("Write to a holding register and read back")
rq = client.write_register(1, 10, unit=UNIT)
rr = client.read_holding_registers(1, 1, unit=UNIT)
assert not rq.isError() # test that we are not an error
assert rr.registers[0] == 10 # test the expected value
log.debug("Write to multiple holding registers and read back")
rq = client.write_registers(1, [10] * 8, unit=UNIT)
rr = client.read_holding_registers(1, 8, unit=UNIT)
assert not rq.isError() # test that we are not an error
assert rr.registers == [10] * 8 # test the expected value
log.debug("Read input registers")
rr = client.read_input_registers(1, 8, unit=UNIT)
assert not rq.isError() # test that we are not an error
arguments = {
"read_address": 1,
"read_count": 8,
"write_address": 1,
"write_registers": [20] * 8,
}
log.debug("Read write registeres simulataneously")
rq = client.readwrite_registers(unit=UNIT, **arguments)
rr = client.read_holding_registers(1, 8, unit=UNIT)
assert not rq.isError() # test that we are not an error
assert rq.registers == [20] * 8 # test the expected value
assert rr.registers == [20] * 8 # test the expected value
# ----------------------------------------------------------------------- #
# close the client
# ----------------------------------------------------------------------- #
client.close()
|
def run_sync_client():
# ------------------------------------------------------------------------#
# choose the client you want
# ------------------------------------------------------------------------#
# make sure to start an implementation to hit against. For this
# you can use an existing device, the reference implementation in the tools
# directory, or start a pymodbus server.
#
# If you use the UDP or TCP clients, you can override the framer being used
# to use a custom implementation (say RTU over TCP). By default they use
# the socket framer::
#
# client = ModbusClient('localhost', port=5020, framer=ModbusRtuFramer)
#
# It should be noted that you can supply an ipv4 or an ipv6 host address
# for both the UDP and TCP clients.
#
# There are also other options that can be set on the client that controls
# how transactions are performed. The current ones are:
#
# * retries - Specify how many retries to allow per transaction (default=3)
# * retry_on_empty - Is an empty response a retry (default = False)
# * source_address - Specifies the TCP source address to bind to
#
# Here is an example of using these options::
#
# client = ModbusClient('localhost', retries=3, retry_on_empty=True)
# ------------------------------------------------------------------------#
client = ModbusClient("localhost", port=5020)
# from pymodbus.transaction import ModbusRtuFramer
# client = ModbusClient('localhost', port=5020, framer=ModbusRtuFramer)
# client = ModbusClient(method='binary', port='/dev/ptyp0', timeout=1)
# client = ModbusClient(method='ascii', port='/dev/ptyp0', timeout=1)
# client = ModbusClient(method='rtu', port='/dev/ptyp0', timeout=1,
# baudrate=9600)
client.connect()
# ------------------------------------------------------------------------#
# specify slave to query
# ------------------------------------------------------------------------#
# The slave to query is specified in an optional parameter for each
# individual request. This can be done by specifying the `unit` parameter
# which defaults to `0x00`
# ----------------------------------------------------------------------- #
log.debug("Reading Coils")
rr = client.read_coils(1, 1, unit=UNIT)
log.debug(rr)
# ----------------------------------------------------------------------- #
# example requests
# ----------------------------------------------------------------------- #
# simply call the methods that you would like to use. An example session
# is displayed below along with some assert checks. Note that some modbus
# implementations differentiate holding/input discrete/coils and as such
# you will not be able to write to these, therefore the starting values
# are not known to these tests. Furthermore, some use the same memory
# blocks for the two sets, so a change to one is a change to the other.
# Keep both of these cases in mind when testing as the following will
# _only_ pass with the supplied async modbus server (script supplied).
# ----------------------------------------------------------------------- #
log.debug("Write to a Coil and read back")
rq = client.write_coil(0, True, unit=UNIT)
rr = client.read_coils(0, 1, unit=UNIT)
assert not rq.isError() # test that we are not an error
assert rr.bits[0] == True # test the expected value
log.debug("Write to multiple coils and read back- test 1")
rq = client.write_coils(1, [True] * 8, unit=UNIT)
assert not rq.isError() # test that we are not an error
rr = client.read_coils(1, 21, unit=UNIT)
assert not rr.isError() # test that we are not an error
resp = [True] * 21
# If the returned output quantity is not a multiple of eight,
# the remaining bits in the final data byte will be padded with zeros
# (toward the high order end of the byte).
resp.extend([False] * 3)
assert rr.bits == resp # test the expected value
log.debug("Write to multiple coils and read back - test 2")
rq = client.write_coils(1, [False] * 8, unit=UNIT)
rr = client.read_coils(1, 8, unit=UNIT)
assert not rq.isError() # test that we are not an error
assert rr.bits == [False] * 8 # test the expected value
log.debug("Read discrete inputs")
rr = client.read_discrete_inputs(0, 8, unit=UNIT)
assert not rq.isError() # test that we are not an error
log.debug("Write to a holding register and read back")
rq = client.write_register(1, 10, unit=UNIT)
rr = client.read_holding_registers(1, 1, unit=UNIT)
assert not rq.isError() # test that we are not an error
assert rr.registers[0] == 10 # test the expected value
log.debug("Write to multiple holding registers and read back")
rq = client.write_registers(1, [10] * 8, unit=UNIT)
rr = client.read_holding_registers(1, 8, unit=UNIT)
assert not rq.isError() # test that we are not an error
assert rr.registers == [10] * 8 # test the expected value
log.debug("Read input registers")
rr = client.read_input_registers(1, 8, unit=UNIT)
assert not rq.isError() # test that we are not an error
arguments = {
"read_address": 1,
"read_count": 8,
"write_address": 1,
"write_registers": [20] * 8,
}
log.debug("Read write registeres simulataneously")
rq = client.readwrite_registers(unit=UNIT, **arguments)
rr = client.read_holding_registers(1, 8, unit=UNIT)
assert not rq.isError() # test that we are not an error
assert rq.registers == [20] * 8 # test the expected value
assert rr.registers == [20] * 8 # test the expected value
# ----------------------------------------------------------------------- #
# close the client
# ----------------------------------------------------------------------- #
client.close()
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def __init__(self, method="ascii", **kwargs):
"""Initialize a serial client instance
The methods to connect are::
- ascii
- rtu
- binary
:param method: The method to use for connection
:param port: The serial port to attach to
:param stopbits: The number of stop bits to use
:param bytesize: The bytesize of the serial messages
:param parity: Which kind of parity to use
:param baudrate: The baud rate to use for the serial device
:param timeout: The timeout between serial requests (default 3s)
:param strict: Use Inter char timeout for baudrates <= 19200 (adhere
to modbus standards)
"""
self.method = method
self.socket = None
BaseModbusClient.__init__(self, self.__implementation(method, self), **kwargs)
self.port = kwargs.get("port", 0)
self.stopbits = kwargs.get("stopbits", Defaults.Stopbits)
self.bytesize = kwargs.get("bytesize", Defaults.Bytesize)
self.parity = kwargs.get("parity", Defaults.Parity)
self.baudrate = kwargs.get("baudrate", Defaults.Baudrate)
self.timeout = kwargs.get("timeout", Defaults.Timeout)
self._strict = kwargs.get("strict", True)
self.last_frame_end = None
if self.method == "rtu":
if self.baudrate > 19200:
self.silent_interval = 1.75 / 1000 # ms
else:
self._t0 = float((1 + 8 + 2)) / self.baudrate
self.inter_char_timeout = 1.5 * self._t0
self.silent_interval = 3.5 * self._t0
self.silent_interval = round(self.silent_interval, 6)
|
def __init__(self, method="ascii", **kwargs):
"""Initialize a serial client instance
The methods to connect are::
- ascii
- rtu
- binary
:param method: The method to use for connection
:param port: The serial port to attach to
:param stopbits: The number of stop bits to use
:param bytesize: The bytesize of the serial messages
:param parity: Which kind of parity to use
:param baudrate: The baud rate to use for the serial device
:param timeout: The timeout between serial requests (default 3s)
"""
self.method = method
self.socket = None
BaseModbusClient.__init__(self, self.__implementation(method, self), **kwargs)
self.port = kwargs.get("port", 0)
self.stopbits = kwargs.get("stopbits", Defaults.Stopbits)
self.bytesize = kwargs.get("bytesize", Defaults.Bytesize)
self.parity = kwargs.get("parity", Defaults.Parity)
self.baudrate = kwargs.get("baudrate", Defaults.Baudrate)
self.timeout = kwargs.get("timeout", Defaults.Timeout)
self.last_frame_end = None
if self.method == "rtu":
if self.baudrate > 19200:
self.silent_interval = 1.75 / 1000 # ms
else:
self._t0 = float((1 + 8 + 2)) / self.baudrate
self.inter_char_timeout = 1.5 * self._t0
self.silent_interval = 3.5 * self._t0
self.silent_interval = round(self.silent_interval, 6)
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def connect(self):
"""Connect to the modbus serial server
:returns: True if connection succeeded, False otherwise
"""
if self.socket:
return True
try:
self.socket = serial.Serial(
port=self.port,
timeout=self.timeout,
bytesize=self.bytesize,
stopbits=self.stopbits,
baudrate=self.baudrate,
parity=self.parity,
)
if self.method == "rtu":
if self._strict:
self.socket.interCharTimeout = self.inter_char_timeout
self.last_frame_end = None
except serial.SerialException as msg:
_logger.error(msg)
self.close()
return self.socket is not None
|
def connect(self):
"""Connect to the modbus serial server
:returns: True if connection succeeded, False otherwise
"""
if self.socket:
return True
try:
self.socket = serial.Serial(
port=self.port,
timeout=self.timeout,
bytesize=self.bytesize,
stopbits=self.stopbits,
baudrate=self.baudrate,
parity=self.parity,
)
except serial.SerialException as msg:
_logger.error(msg)
self.close()
if self.method == "rtu":
self.socket.interCharTimeout = self.inter_char_timeout
self.last_frame_end = None
return self.socket is not None
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def _get(self, type, offset, count):
"""
:param type: The key prefix to use
:param offset: The address offset to start at
:param count: The number of bits to read
:returns: The resulting values
"""
query = self._table.select(
and_(
self._table.c.type == type,
self._table.c.index >= offset,
self._table.c.index <= offset + count - 1,
)
)
query = query.order_by(self._table.c.index.asc())
result = self._connection.execute(query).fetchall()
return [row.value for row in result]
|
def _get(self, type, offset, count):
"""
:param type: The key prefix to use
:param offset: The address offset to start at
:param count: The number of bits to read
:returns: The resulting values
"""
query = self._table.select(
and_(
self._table.c.type == type,
self._table.c.index >= offset,
self._table.c.index <= offset + count,
)
)
query = query.order_by(self._table.c.index.asc())
result = self._connection.execute(query).fetchall()
return [row.value for row in result]
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def _validate(self, type, offset, count):
"""
:param key: The key prefix to use
:param offset: The address offset to start at
:param count: The number of bits to read
:returns: The result of the validation
"""
query = self._table.select(
and_(
self._table.c.type == type,
self._table.c.index >= offset,
self._table.c.index <= offset + count - 1,
)
)
result = self._connection.execute(query).fetchall()
return len(result) == count
|
def _validate(self, type, offset, count):
"""
:param key: The key prefix to use
:param offset: The address offset to start at
:param count: The number of bits to read
:returns: The result of the validation
"""
query = self._table.select(
and_(
self._table.c.type == type,
self._table.c.index >= offset,
self._table.c.index <= offset + count,
)
)
result = self._connection.execute(query)
return result.rowcount == count
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def _validate_unit_id(self, units, single):
"""
Validates if the received data is valid for the client
:param units: list of unit id for which the transaction is valid
:param single: Set to true to treat this as a single context
:return:"""
if single:
return True
else:
if 0 in units or 0xFF in units:
# Handle Modbus TCP unit identifier (0x00 0r 0xFF)
# in asynchronous requests
return True
return self._header["uid"] in units
|
def _validate_unit_id(self, units, single):
"""
Validates if the received data is valid for the client
:param units: list of unit id for which the transaction is valid
:param single: Set to true to treat this as a single context
:return:"""
if single:
return True
else:
if 0 in units or 0xFF in units:
# Handle Modbus TCP unit identifier (0x00 0r 0xFF)
# in async requests
return True
return self._header["uid"] in units
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def processIncomingPacket(self, data, callback, unit, **kwargs):
"""
The new packet processing pattern
This takes in a new request packet, adds it to the current
packet stream, and performs framing on it. That is, checks
for complete messages, and once found, will process all that
exist. This handles the case when we read N + 1 or 1 // N
messages at a time instead of 1.
The processed and decoded messages are pushed to the callback
function to process and send.
:param data: The new packet data
:param callback: The function to send results to
:param unit: Process if unit id matches, ignore otherwise (could be a
list of unit ids (server) or single unit id(client/server))
:param single: True or False (If True, ignore unit address validation)
"""
if not isinstance(unit, (list, tuple)):
unit = [unit]
single = kwargs.get("single", False)
self.addToFrame(data)
while self.isFrameReady():
if self.checkFrame():
if self._validate_unit_id(unit, single):
frame = self.getFrame()
result = self.decoder.decode(frame)
if result is None:
raise ModbusIOException("Unable to decode response")
self.populateResult(result)
self.advanceFrame()
callback(result) # defer this
else:
_logger.error(
"Not a valid unit id - {}, ignoring!!".format(self._header["uid"])
)
self.resetFrame()
else:
break
|
def processIncomingPacket(self, data, callback, unit, **kwargs):
"""
The new packet processing pattern
This takes in a new request packet, adds it to the current
packet stream, and performs framing on it. That is, checks
for complete messages, and once found, will process all that
exist. This handles the case when we read N + 1 or 1 // N
messages at a time instead of 1.
The processed and decoded messages are pushed to the callback
function to process and send.
:param data: The new packet data
:param callback: The function to send results to
:param unit: Process if unit id matches, ignore otherwise (could be a \
list of unit ids (server) or single unit id(client/server))
:param single: True or False (If True, ignore unit address validation)
"""
if not isinstance(unit, (list, tuple)):
unit = [unit]
single = kwargs.get("single", False)
self.addToFrame(data)
while self.isFrameReady():
if self.checkFrame():
if self._validate_unit_id(unit, single):
frame = self.getFrame()
result = self.decoder.decode(frame)
if result is None:
raise ModbusIOException("Unable to decode response")
self.populateResult(result)
self.advanceFrame()
callback(result) # defer this
else:
_logger.error(
"Not a valid unit id - {}, ignoring!!".format(self._header["uid"])
)
self.resetFrame()
else:
break
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def processIncomingPacket(self, data, callback, unit, **kwargs):
"""
The new packet processing pattern
This takes in a new request packet, adds it to the current
packet stream, and performs framing on it. That is, checks
for complete messages, and once found, will process all that
exist. This handles the case when we read N + 1 or 1 // N
messages at a time instead of 1.
The processed and decoded messages are pushed to the callback
function to process and send.
:param data: The new packet data
:param callback: The function to send results to
:param unit: Process if unit id matches, ignore otherwise (could be a
list of unit ids (server) or single unit id(client/server)
:param single: True or False (If True, ignore unit address validation)
"""
self.addToFrame(data)
if not isinstance(unit, (list, tuple)):
unit = [unit]
single = kwargs.get("single", False)
while self.isFrameReady():
if self.checkFrame():
if self._validate_unit_id(unit, single):
result = self.decoder.decode(self.getFrame())
if result is None:
raise ModbusIOException("Unable to decode response")
self.populateResult(result)
self.advanceFrame()
callback(result) # defer or push to a thread?
else:
_logger.debug(
"Not a valid unit id - {}, ignoring!!".format(self._header["uid"])
)
self.resetFrame()
break
else:
_logger.debug("Frame check failed, ignoring!!")
self.resetFrame()
break
|
def processIncomingPacket(self, data, callback, unit, **kwargs):
"""
The new packet processing pattern
This takes in a new request packet, adds it to the current
packet stream, and performs framing on it. That is, checks
for complete messages, and once found, will process all that
exist. This handles the case when we read N + 1 or 1 // N
messages at a time instead of 1.
The processed and decoded messages are pushed to the callback
function to process and send.
:param data: The new packet data
:param callback: The function to send results to
:param unit: Process if unit id matches, ignore otherwise (could be a \
list of unit ids (server) or single unit id(client/server)
:param single: True or False (If True, ignore unit address validation)
"""
self.addToFrame(data)
if not isinstance(unit, (list, tuple)):
unit = [unit]
single = kwargs.get("single", False)
while self.isFrameReady():
if self.checkFrame():
if self._validate_unit_id(unit, single):
result = self.decoder.decode(self.getFrame())
if result is None:
raise ModbusIOException("Unable to decode response")
self.populateResult(result)
self.advanceFrame()
callback(result) # defer or push to a thread?
else:
_logger.debug(
"Not a valid unit id - {}, ignoring!!".format(self._header["uid"])
)
self.resetFrame()
break
else:
_logger.debug("Frame check failed, ignoring!!")
self.resetFrame()
break
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def processIncomingPacket(self, data, callback, unit, **kwargs):
"""
The new packet processing pattern
This takes in a new request packet, adds it to the current
packet stream, and performs framing on it. That is, checks
for complete messages, and once found, will process all that
exist. This handles the case when we read N + 1 or 1 // N
messages at a time instead of 1.
The processed and decoded messages are pushed to the callback
function to process and send.
:param data: The new packet data
:param callback: The function to send results to
:param unit: Process if unit id matches, ignore otherwise (could be a
list of unit ids (server) or single unit id(client/server)
:param single: True or False (If True, ignore unit address validation)
"""
if not isinstance(unit, (list, tuple)):
unit = [unit]
self.addToFrame(data)
single = kwargs.get("single", False)
if self.isFrameReady():
if self.checkFrame():
if self._validate_unit_id(unit, single):
self._process(callback)
else:
_logger.debug(
"Not a valid unit id - {}, ignoring!!".format(self._header["uid"])
)
self.resetFrame()
else:
_logger.debug("Frame check failed, ignoring!!")
self.resetFrame()
else:
_logger.debug("Frame - [{}] not ready".format(data))
|
def processIncomingPacket(self, data, callback, unit, **kwargs):
"""
The new packet processing pattern
This takes in a new request packet, adds it to the current
packet stream, and performs framing on it. That is, checks
for complete messages, and once found, will process all that
exist. This handles the case when we read N + 1 or 1 // N
messages at a time instead of 1.
The processed and decoded messages are pushed to the callback
function to process and send.
:param data: The new packet data
:param callback: The function to send results to
:param unit: Process if unit id matches, ignore otherwise (could be a \
list of unit ids (server) or single unit id(client/server)
:param single: True or False (If True, ignore unit address validation)
"""
if not isinstance(unit, (list, tuple)):
unit = [unit]
self.addToFrame(data)
single = kwargs.get("single", False)
if self.isFrameReady():
if self.checkFrame():
if self._validate_unit_id(unit, single):
self._process(callback)
else:
_logger.debug(
"Not a valid unit id - {}, ignoring!!".format(self._header["uid"])
)
self.resetFrame()
else:
_logger.debug("Frame - [{}] not ready".format(data))
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def processIncomingPacket(self, data, callback, unit, **kwargs):
"""
The new packet processing pattern
This takes in a new request packet, adds it to the current
packet stream, and performs framing on it. That is, checks
for complete messages, and once found, will process all that
exist. This handles the case when we read N + 1 or 1 // N
messages at a time instead of 1.
The processed and decoded messages are pushed to the callback
function to process and send.
:param data: The new packet data
:param callback: The function to send results to
:param unit: Process if unit id matches, ignore otherwise (could be a
list of unit ids (server) or single unit id(client/server)
:param single: True or False (If True, ignore unit address validation)
:return:
"""
if not isinstance(unit, (list, tuple)):
unit = [unit]
single = kwargs.get("single", False)
_logger.debug("Processing: " + hexlify_packets(data))
self.addToFrame(data)
while True:
if self.isFrameReady():
if self.checkFrame():
if self._validate_unit_id(unit, single):
self._process(callback)
else:
_logger.debug(
"Not a valid unit id - {}, ignoring!!".format(
self._header["uid"]
)
)
self.resetFrame()
else:
_logger.debug("Frame check failed, ignoring!!")
self.resetFrame()
else:
if len(self._buffer):
# Possible error ???
if self._header["len"] < 2:
self._process(callback, error=True)
break
|
def processIncomingPacket(self, data, callback, unit, **kwargs):
"""
The new packet processing pattern
This takes in a new request packet, adds it to the current
packet stream, and performs framing on it. That is, checks
for complete messages, and once found, will process all that
exist. This handles the case when we read N + 1 or 1 // N
messages at a time instead of 1.
The processed and decoded messages are pushed to the callback
function to process and send.
:param data: The new packet data
:param callback: The function to send results to
:param unit: Process if unit id matches, ignore otherwise (could be a \
list of unit ids (server) or single unit id(client/server)
:param single: True or False (If True, ignore unit address validation)
:return:
"""
if not isinstance(unit, (list, tuple)):
unit = [unit]
single = kwargs.get("single", False)
_logger.debug("Processing: " + hexlify_packets(data))
self.addToFrame(data)
while True:
if self.isFrameReady():
if self.checkFrame():
if self._validate_unit_id(unit, single):
self._process(callback)
else:
_logger.debug(
"Not a valid unit id - {}, ignoring!!".format(
self._header["uid"]
)
)
self.resetFrame()
else:
_logger.debug("Frame check failed, ignoring!!")
self.resetFrame()
else:
if len(self._buffer):
# Possible error ???
if self._header["len"] < 2:
self._process(callback, error=True)
break
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def connect(self):
"""Connect to the modbus serial server
:returns: True if connection succeeded, False otherwise
"""
if self.socket:
return True
try:
self.socket = serial.Serial(
port=self.port,
timeout=self.timeout,
bytesize=self.bytesize,
stopbits=self.stopbits,
baudrate=self.baudrate,
parity=self.parity,
)
if self.method == "rtu":
if self._strict:
self.socket.interCharTimeout = self.inter_char_timeout
self.last_frame_end = None
except serial.SerialException as msg:
_logger.error(msg)
self.close()
return self.socket is not None
|
def connect(self):
"""Connect to the modbus serial server
:returns: True if connection succeeded, False otherwise
"""
if self.socket:
return True
try:
self.socket = serial.Serial(
port=self.port,
timeout=self.timeout,
bytesize=self.bytesize,
stopbits=self.stopbits,
baudrate=self.baudrate,
parity=self.parity,
)
except serial.SerialException as msg:
_logger.error(msg)
self.close()
if self.method == "rtu":
if self._strict:
self.socket.interCharTimeout = self.inter_char_timeout
self.last_frame_end = None
return self.socket is not None
|
https://github.com/riptideio/pymodbus/issues/377
|
client = ModbusSerialClient(port='notexist', method='rtu')
client.connect()
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "lib/python3.6/site-packages/pymodbus/client/sync.py", line 476, in connect
self.socket.interCharTimeout = self.inter_char_timeout
AttributeError: 'NoneType' object has no attribute 'interCharTimeout'
|
AttributeError
|
def run_binary_payload_ex():
# ----------------------------------------------------------------------- #
# We are going to use a simple client to send our requests
# ----------------------------------------------------------------------- #
client = ModbusClient("127.0.0.1", port=5440)
client.connect()
# ----------------------------------------------------------------------- #
# If you need to build a complex message to send, you can use the payload
# builder to simplify the packing logic.
#
# Here we demonstrate packing a random payload layout, unpacked it looks
# like the following:
#
# - a 8 byte string 'abcdefgh'
# - a 32 bit float 22.34
# - a 16 bit unsigned int 0x1234
# - another 16 bit unsigned int 0x5678
# - an 8 bit int 0x12
# - an 8 bit bitstring [0,1,0,1,1,0,1,0]
# ----------------------------------------------------------------------- #
builder = BinaryPayloadBuilder(endian=Endian.Little)
builder.add_string("abcdefgh")
builder.add_32bit_float(22.34)
builder.add_16bit_uint(0x1234)
builder.add_16bit_uint(0x5678)
builder.add_8bit_int(0x12)
builder.add_bits([0, 1, 0, 1, 1, 0, 1, 0])
payload = builder.build()
address = 0
result = client.write_registers(address, payload, skip_encode=True, unit=1)
# ----------------------------------------------------------------------- #
# If you need to decode a collection of registers in a weird layout, the
# payload decoder can help you as well.
#
# Here we demonstrate decoding a random register layout, unpacked it looks
# like the following:
#
# - a 8 byte string 'abcdefgh'
# - a 32 bit float 22.34
# - a 16 bit unsigned int 0x1234
# - another 16 bit unsigned int which we will ignore
# - an 8 bit int 0x12
# - an 8 bit bitstring [0,1,0,1,1,0,1,0]
# ----------------------------------------------------------------------- #
address = 0x00
count = len(payload)
result = client.read_holding_registers(address, count, unit=1)
decoder = BinaryPayloadDecoder.fromRegisters(result.registers, endian=Endian.Little)
decoded = {
"string": decoder.decode_string(8),
"float": decoder.decode_32bit_float(),
"16uint": decoder.decode_16bit_uint(),
"ignored": decoder.skip_bytes(2),
"8int": decoder.decode_8bit_int(),
"bits": decoder.decode_bits(),
}
print("-" * 60)
print("Decoded Data")
print("-" * 60)
for name, value in iteritems(decoded):
print("%s\t" % name, hex(value) if isinstance(value, int) else value)
# ----------------------------------------------------------------------- #
# close the client
# ----------------------------------------------------------------------- #
client.close()
|
def run_binary_payload_ex():
# ----------------------------------------------------------------------- #
# We are going to use a simple client to send our requests
# ----------------------------------------------------------------------- #
client = ModbusClient("127.0.0.1", port=5020)
client.connect()
# ----------------------------------------------------------------------- #
# If you need to build a complex message to send, you can use the payload
# builder to simplify the packing logic.
#
# Here we demonstrate packing a random payload layout, unpacked it looks
# like the following:
#
# - a 8 byte string 'abcdefgh'
# - a 32 bit float 22.34
# - a 16 bit unsigned int 0x1234
# - another 16 bit unsigned int 0x5678
# - an 8 bit int 0x12
# - an 8 bit bitstring [0,1,0,1,1,0,1,0]
# ----------------------------------------------------------------------- #
builder = BinaryPayloadBuilder(endian=Endian.Big)
builder.add_string("abcdefgh")
builder.add_32bit_float(22.34)
builder.add_16bit_uint(0x1234)
builder.add_16bit_uint(0x5678)
builder.add_8bit_int(0x12)
builder.add_bits([0, 1, 0, 1, 1, 0, 1, 0])
payload = builder.build()
address = 0
result = client.write_registers(address, payload, skip_encode=True, unit=1)
# ----------------------------------------------------------------------- #
# If you need to decode a collection of registers in a weird layout, the
# payload decoder can help you as well.
#
# Here we demonstrate decoding a random register layout, unpacked it looks
# like the following:
#
# - a 8 byte string 'abcdefgh'
# - a 32 bit float 22.34
# - a 16 bit unsigned int 0x1234
# - another 16 bit unsigned int which we will ignore
# - an 8 bit int 0x12
# - an 8 bit bitstring [0,1,0,1,1,0,1,0]
# ----------------------------------------------------------------------- #
address = 0x00
count = 8
result = client.read_holding_registers(address, count, unit=1)
decoder = BinaryPayloadDecoder.fromRegisters(result.registers, endian=Endian.Big)
decoded = {
"string": decoder.decode_string(8),
"float": decoder.decode_32bit_float(),
"16uint": decoder.decode_16bit_uint(),
"ignored": decoder.skip_bytes(2),
"8int": decoder.decode_8bit_int(),
"bits": decoder.decode_bits(),
}
print("-" * 60)
print("Decoded Data")
print("-" * 60)
for name, value in iteritems(decoded):
print("%s\t" % name, value)
# ----------------------------------------------------------------------- #
# close the client
# ----------------------------------------------------------------------- #
client.close()
|
https://github.com/riptideio/pymodbus/issues/255
|
from pymodbus.client.sync import ModbusTcpClient as Client
import logging
# import time
import struct
from pymodbus.payload import BinaryPayloadBuilder
from pymodbus.payload import BinaryPayloadDecoder
from pymodbus.constants import Endian
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.DEBUG)
write_value = 23
builder = BinaryPayloadBuilder(endian=Endian.Little)
builder.add_16bit_uint(write_value)
payload = builder.build()
#
count = 1
#
#
# c = Client(method="rtu", port="/tmp/ttyp0", timeout=2)
c = Client(port=5440)
c.connect()
#
r = c.write_registers(0, payload, skip_encode=True, unit=1)
r = c.read_holding_registers(0, 1, unit=1, skip_encode=True)
decoder = BinaryPayloadDecoder.fromRegisters(r.registers, endian=Endian.Little)
read_value = decoder.decode_16bit_uint()
log.debug("Wrote - {} , Read - {}".format(write_value, read_value))
assert read_value == write_value
c.close()
### Logs
DEBUG:pymodbus.transaction:Running transaction 1
DEBUG:pymodbus.transaction:send: 0x0 0x1 0x0 0x0 0x0 0x9 0x1 0x10 0x0 0x0 0x0 0x1 0x2 0x17 0x0
DEBUG:pymodbus.transaction:recv: 0x0 0x1 0x0 0x0 0x0 0x6 0x1 0x10 0x0 0x0 0x0 0x1
DEBUG:pymodbus.transaction:0x0 0x1 0x0 0x0 0x0 0x6 0x1 0x10 0x0 0x0 0x0 0x1
DEBUG:pymodbus.factory:Factory Response[16]
DEBUG:pymodbus.transaction:adding transaction 1
DEBUG:pymodbus.transaction:getting transaction 1
DEBUG:pymodbus.transaction:Running transaction 2
DEBUG:pymodbus.transaction:send: 0x0 0x2 0x0 0x0 0x0 0x6 0x1 0x3 0x0 0x0 0x0 0x1
DEBUG:pymodbus.transaction:recv: 0x0 0x2 0x0 0x0 0x0 0x5 0x1 0x3 0x2 0x17 0x0
DEBUG:pymodbus.transaction:0x0 0x2 0x0 0x0 0x0 0x5 0x1 0x3 0x2 0x17 0x0
DEBUG:pymodbus.factory:Factory Response[3]
DEBUG:pymodbus.transaction:adding transaction 2
DEBUG:pymodbus.transaction:getting transaction 2
DEBUG:root:Wrote - 23 , Read - 5888
Traceback (most recent call last):
File "/Users/sanjay/Library/Preferences/PyCharm2017.3/scratches/scratch_49.py", line 31, in <module>
assert read_value == write_value
AssertionError
|
AssertionError
|
def fromRegisters(klass, registers, endian=Endian.Little):
"""Initialize a payload decoder with the result of
reading a collection of registers from a modbus device.
The registers are treated as a list of 2 byte values.
We have to do this because of how the data has already
been decoded by the rest of the library.
:param registers: The register results to initialize with
:param endian: The endianess of the payload
:returns: An initialized PayloadDecoder
"""
if isinstance(registers, list): # repack into flat binary
payload = b"".join(pack("!H", x) for x in registers)
return klass(payload, endian)
raise ParameterException("Invalid collection of registers supplied")
|
def fromRegisters(klass, registers, endian=Endian.Little):
"""Initialize a payload decoder with the result of
reading a collection of registers from a modbus device.
The registers are treated as a list of 2 byte values.
We have to do this because of how the data has already
been decoded by the rest of the library.
:param registers: The register results to initialize with
:param endian: The endianess of the payload
:returns: An initialized PayloadDecoder
"""
if isinstance(registers, list): # repack into flat binary
payload = b"".join(pack(endian + "H", x) for x in registers)
return klass(payload, endian)
raise ParameterException("Invalid collection of registers supplied")
|
https://github.com/riptideio/pymodbus/issues/255
|
from pymodbus.client.sync import ModbusTcpClient as Client
import logging
# import time
import struct
from pymodbus.payload import BinaryPayloadBuilder
from pymodbus.payload import BinaryPayloadDecoder
from pymodbus.constants import Endian
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.DEBUG)
write_value = 23
builder = BinaryPayloadBuilder(endian=Endian.Little)
builder.add_16bit_uint(write_value)
payload = builder.build()
#
count = 1
#
#
# c = Client(method="rtu", port="/tmp/ttyp0", timeout=2)
c = Client(port=5440)
c.connect()
#
r = c.write_registers(0, payload, skip_encode=True, unit=1)
r = c.read_holding_registers(0, 1, unit=1, skip_encode=True)
decoder = BinaryPayloadDecoder.fromRegisters(r.registers, endian=Endian.Little)
read_value = decoder.decode_16bit_uint()
log.debug("Wrote - {} , Read - {}".format(write_value, read_value))
assert read_value == write_value
c.close()
### Logs
DEBUG:pymodbus.transaction:Running transaction 1
DEBUG:pymodbus.transaction:send: 0x0 0x1 0x0 0x0 0x0 0x9 0x1 0x10 0x0 0x0 0x0 0x1 0x2 0x17 0x0
DEBUG:pymodbus.transaction:recv: 0x0 0x1 0x0 0x0 0x0 0x6 0x1 0x10 0x0 0x0 0x0 0x1
DEBUG:pymodbus.transaction:0x0 0x1 0x0 0x0 0x0 0x6 0x1 0x10 0x0 0x0 0x0 0x1
DEBUG:pymodbus.factory:Factory Response[16]
DEBUG:pymodbus.transaction:adding transaction 1
DEBUG:pymodbus.transaction:getting transaction 1
DEBUG:pymodbus.transaction:Running transaction 2
DEBUG:pymodbus.transaction:send: 0x0 0x2 0x0 0x0 0x0 0x6 0x1 0x3 0x0 0x0 0x0 0x1
DEBUG:pymodbus.transaction:recv: 0x0 0x2 0x0 0x0 0x0 0x5 0x1 0x3 0x2 0x17 0x0
DEBUG:pymodbus.transaction:0x0 0x2 0x0 0x0 0x0 0x5 0x1 0x3 0x2 0x17 0x0
DEBUG:pymodbus.factory:Factory Response[3]
DEBUG:pymodbus.transaction:adding transaction 2
DEBUG:pymodbus.transaction:getting transaction 2
DEBUG:root:Wrote - 23 , Read - 5888
Traceback (most recent call last):
File "/Users/sanjay/Library/Preferences/PyCharm2017.3/scratches/scratch_49.py", line 31, in <module>
assert read_value == write_value
AssertionError
|
AssertionError
|
def coerce_type(module, value):
# If our value is already None we can just return directly
if value is None:
return value
yaml_ish = bool(
(value.startswith("{") and value.endswith("}"))
or (value.startswith("[") and value.endswith("]"))
)
if yaml_ish:
if not HAS_YAML:
module.fail_json(msg="yaml is not installed, try 'pip install pyyaml'")
return yaml.safe_load(value)
elif value.lower in ("true", "false", "t", "f"):
return {"t": True, "f": False}[value[0].lower()]
try:
return int(value)
except ValueError:
pass
return value
|
def coerce_type(module, value):
yaml_ish = bool(
(value.startswith("{") and value.endswith("}"))
or (value.startswith("[") and value.endswith("]"))
)
if yaml_ish:
if not HAS_YAML:
module.fail_json(msg="yaml is not installed, try 'pip install pyyaml'")
return yaml.safe_load(value)
elif value.lower in ("true", "false", "t", "f"):
return {"t": True, "f": False}[value[0].lower()]
try:
return int(value)
except ValueError:
pass
return value
|
https://github.com/ansible/awx/issues/7267
|
The full traceback is:
Traceback (most recent call last):
File "/home/kkulkarni/.ansible/tmp/ansible-tmp-1591366383.968238-878504-224766098821440/AnsiballZ_tower_settings.py", line 102, in <module>
_ansiballz_main()
File "/home/kkulkarni/.ansible/tmp/ansible-tmp-1591366383.968238-878504-224766098821440/AnsiballZ_tower_settings.py", line 94, in _ansiballz_main
invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)
File "/home/kkulkarni/.ansible/tmp/ansible-tmp-1591366383.968238-878504-224766098821440/AnsiballZ_tower_settings.py", line 40, in invoke_module
runpy.run_module(mod_name='ansible_collections.awx.awx.plugins.modules.tower_settings', init_globals=None, run_name='__main__', alter_sys=True)
File "/usr/lib64/python3.8/runpy.py", line 206, in run_module
return _run_module_code(code, init_globals, run_name, mod_spec)
File "/usr/lib64/python3.8/runpy.py", line 96, in _run_module_code
_run_code(code, mod_globals, init_globals,
File "/usr/lib64/python3.8/runpy.py", line 86, in _run_code
exec(code, run_globals)
File "/tmp/ansible_awx.awx.tower_settings_payload_x13vlw6v/ansible_awx.awx.tower_settings_payload.zip/ansible_collections/awx/awx/plugins/modules/tower_settings.py", line 184, in <module>
File "/tmp/ansible_awx.awx.tower_settings_payload_x13vlw6v/ansible_awx.awx.tower_settings_payload.zip/ansible_collections/awx/awx/plugins/modules/tower_settings.py", line 136, in main
File "/tmp/ansible_awx.awx.tower_settings_payload_x13vlw6v/ansible_awx.awx.tower_settings_payload.zip/ansible_collections/awx/awx/plugins/modules/tower_settings.py", line 95, in coerce_type
AttributeError: 'NoneType' object has no attribute 'startswith'
failed: [localhost] (item={'name': 'AWX_TASK_ENV', 'setting': {'GIT_SSL_NO_VERIFY': 'True'}}) => {
"ansible_loop_var": "tower_setting_item",
"changed": false,
"module_stderr": "Traceback (most recent call last):\n File \"/home/kkulkarni/.ansible/tmp/ansible-tmp-1591366383.968238-878504-224766098821440/AnsiballZ_tower_settings.py\", line 102, in <module>\n _ansiballz_main()\n File \"/home/kkulkarni/.ansible/tmp/ansible-tmp-1591366383.968238-878504-224766098821440/AnsiballZ_tower_settings.py\", line 94, in _ansiballz_main\n invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)\n File \"/home/kkulkarni/.ansible/tmp/ansible-tmp-1591366383.968238-878504-224766098821440/AnsiballZ_tower_settings.py\", line 40, in invoke_module\n runpy.run_module(mod_name='ansible_collections.awx.awx.plugins.modules.tower_settings', init_globals=None, run_name='__main__', alter_sys=True)\n File \"/usr/lib64/python3.8/runpy.py\", line 206, in run_module\n return _run_module_code(code, init_globals, run_name, mod_spec)\n File \"/usr/lib64/python3.8/runpy.py\", line 96, in _run_module_code\n _run_code(code, mod_globals, init_globals,\n File \"/usr/lib64/python3.8/runpy.py\", line 86, in _run_code\n exec(code, run_globals)\n File \"/tmp/ansible_awx.awx.tower_settings_payload_x13vlw6v/ansible_awx.awx.tower_settings_payload.zip/ansible_collections/awx/awx/plugins/modules/tower_settings.py\", line 184, in <module>\n File \"/tmp/ansible_awx.awx.tower_settings_payload_x13vlw6v/ansible_awx.awx.tower_settings_payload.zip/ansible_collections/awx/awx/plugins/modules/tower_settings.py\", line 136, in main\n File \"/tmp/ansible_awx.awx.tower_settings_payload_x13vlw6v/ansible_awx.awx.tower_settings_payload.zip/ansible_collections/awx/awx/plugins/modules/tower_settings.py\", line 95, in coerce_type\nAttributeError: 'NoneType' object has no attribute 'startswith'\n",
"module_stdout": "",
"msg": "MODULE FAILURE\nSee stdout/stderr for the exact error",
"rc": 1,
"tower_setting_item": {
"name": "AWX_TASK_ENV",
"setting": {
"GIT_SSL_NO_VERIFY": "True"
}
}
}
|
AttributeError
|
def consume_events(self):
# discover new events and ingest them
events_path = self.path_to("artifacts", self.ident, "job_events")
# it's possible that `events_path` doesn't exist *yet*, because runner
# hasn't actually written any events yet (if you ran e.g., a sleep 30)
# only attempt to consume events if any were rsynced back
if os.path.exists(events_path):
for event in set(os.listdir(events_path)) - self.handled_events:
path = os.path.join(events_path, event)
if os.path.exists(path) and os.path.isfile(path):
try:
event_data = json.load(open(os.path.join(events_path, event), "r"))
except json.decoder.JSONDecodeError:
# This means the event we got back isn't valid JSON
# that can happen if runner is still partially
# writing an event file while it's rsyncing
# these event writes are _supposed_ to be atomic
# but it doesn't look like they actually are in
# practice
# in this scenario, just ignore this event and try it
# again on the next sync
continue
self.event_handler(event_data)
self.handled_events.add(event)
|
def consume_events(self):
# discover new events and ingest them
events_path = self.path_to("artifacts", self.ident, "job_events")
# it's possible that `events_path` doesn't exist *yet*, because runner
# hasn't actually written any events yet (if you ran e.g., a sleep 30)
# only attempt to consume events if any were rsynced back
if os.path.exists(events_path):
for event in set(os.listdir(events_path)) - self.handled_events:
path = os.path.join(events_path, event)
if os.path.exists(path):
try:
event_data = json.load(open(os.path.join(events_path, event), "r"))
except json.decoder.JSONDecodeError:
# This means the event we got back isn't valid JSON
# that can happen if runner is still partially
# writing an event file while it's rsyncing
# these event writes are _supposed_ to be atomic
# but it doesn't look like they actually are in
# practice
# in this scenario, just ignore this event and try it
# again on the next sync
continue
self.event_handler(event_data)
self.handled_events.add(event)
|
https://github.com/ansible/awx/issues/6675
|
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python3.6/site-packages/awx/main/tasks.py", line 1468, in run
ident=str(self.instance.pk))
File "/var/lib/awx/venv/awx/lib/python3.6/site-packages/awx/main/isolated/manager.py", line 422, in run status, rc = self.check()
File "/var/lib/awx/venv/awx/lib/python3.6/site-packages/awx/main/isolated/manager.py", line 241, in check
self.consume_events()
File "/var/lib/awx/venv/awx/lib/python3.6/site-packages/awx/main/isolated/manager.py", line 293, in consume_events
open(os.path.join(events_path, event), 'r')
IsADirectoryError: [Errno 21] Is a directory: '/tmp/awx_754_hs8ylxru/artifacts/754/job_events/.~tmp~'
|
IsADirectoryError
|
def get_user_capabilities(
self, obj, method_list=[], parent_obj=None, capabilities_cache={}
):
if obj is None:
return {}
user_capabilities = {}
# Custom ordering to loop through methods so we can reuse earlier calcs
for display_method in [
"edit",
"delete",
"start",
"schedule",
"copy",
"adhoc",
"unattach",
]:
if display_method not in method_list:
continue
if not settings.MANAGE_ORGANIZATION_AUTH and isinstance(obj, (Team, User)):
user_capabilities[display_method] = self.user.is_superuser
continue
# Actions not possible for reason unrelated to RBAC
# Cannot copy with validation errors, or update a manual group/project
if "write" not in getattr(self.user, "oauth_scopes", ["write"]):
user_capabilities[display_method] = (
False # Read tokens cannot take any actions
)
continue
elif display_method in ["copy", "start", "schedule"] and isinstance(
obj, JobTemplate
):
if obj.validation_errors:
user_capabilities[display_method] = False
continue
elif (
display_method == "copy"
and isinstance(obj, WorkflowJobTemplate)
and obj.organization_id is None
):
user_capabilities[display_method] = self.user.is_superuser
continue
elif (
display_method == "copy" and isinstance(obj, Project) and obj.scm_type == ""
):
# Cannot copy manual project without errors
user_capabilities[display_method] = False
continue
elif display_method in ["start", "schedule"] and isinstance(obj, (Project)):
if obj.scm_type == "":
user_capabilities[display_method] = False
continue
# Grab the answer from the cache, if available
if display_method in capabilities_cache:
user_capabilities[display_method] = capabilities_cache[display_method]
if self.user.is_superuser and not user_capabilities[display_method]:
# Cache override for models with bad orphaned state
user_capabilities[display_method] = True
continue
# Aliases for going form UI language to API language
if display_method == "edit":
method = "change"
elif display_method == "adhoc":
method = "run_ad_hoc_commands"
else:
method = display_method
# Shortcuts in certain cases by deferring to earlier property
if display_method == "schedule":
user_capabilities["schedule"] = user_capabilities["start"]
continue
elif display_method == "delete" and not isinstance(
obj, (User, UnifiedJob, CustomInventoryScript, CredentialInputSource)
):
user_capabilities["delete"] = user_capabilities["edit"]
continue
elif display_method == "copy" and isinstance(obj, (Group, Host)):
user_capabilities["copy"] = user_capabilities["edit"]
continue
# Compute permission
user_capabilities[display_method] = self.get_method_capability(
method, obj, parent_obj
)
return user_capabilities
|
def get_user_capabilities(
self, obj, method_list=[], parent_obj=None, capabilities_cache={}
):
if obj is None:
return {}
user_capabilities = {}
# Custom ordering to loop through methods so we can reuse earlier calcs
for display_method in [
"edit",
"delete",
"start",
"schedule",
"copy",
"adhoc",
"unattach",
]:
if display_method not in method_list:
continue
if not settings.MANAGE_ORGANIZATION_AUTH and isinstance(obj, (Team, User)):
user_capabilities[display_method] = self.user.is_superuser
continue
# Actions not possible for reason unrelated to RBAC
# Cannot copy with validation errors, or update a manual group/project
if "write" not in getattr(self.user, "oauth_scopes", ["write"]):
user_capabilities[display_method] = (
False # Read tokens cannot take any actions
)
continue
elif display_method in ["copy", "start", "schedule"] and isinstance(
obj, JobTemplate
):
if obj.validation_errors:
user_capabilities[display_method] = False
continue
elif (
display_method == "copy"
and isinstance(obj, WorkflowJobTemplate)
and obj.organization_id is None
):
user_capabilities[display_method] = self.user.is_superuser
continue
elif (
display_method == "copy" and isinstance(obj, Project) and obj.scm_type == ""
):
# Cannot copy manual project without errors
user_capabilities[display_method] = False
continue
elif display_method in ["start", "schedule"] and isinstance(
obj, Group
): # TODO: remove in 3.3
try:
if (
obj.deprecated_inventory_source
and not obj.deprecated_inventory_source._can_update()
):
user_capabilities[display_method] = False
continue
except Group.deprecated_inventory_source.RelatedObjectDoesNotExist:
user_capabilities[display_method] = False
continue
elif display_method in ["start", "schedule"] and isinstance(obj, (Project)):
if obj.scm_type == "":
user_capabilities[display_method] = False
continue
# Grab the answer from the cache, if available
if display_method in capabilities_cache:
user_capabilities[display_method] = capabilities_cache[display_method]
if self.user.is_superuser and not user_capabilities[display_method]:
# Cache override for models with bad orphaned state
user_capabilities[display_method] = True
continue
# Aliases for going form UI language to API language
if display_method == "edit":
method = "change"
elif display_method == "adhoc":
method = "run_ad_hoc_commands"
else:
method = display_method
# Shortcuts in certain cases by deferring to earlier property
if display_method == "schedule":
user_capabilities["schedule"] = user_capabilities["start"]
continue
elif display_method == "delete" and not isinstance(
obj, (User, UnifiedJob, CustomInventoryScript, CredentialInputSource)
):
user_capabilities["delete"] = user_capabilities["edit"]
continue
elif display_method == "copy" and isinstance(obj, (Group, Host)):
user_capabilities["copy"] = user_capabilities["edit"]
continue
# Compute permission
user_capabilities[display_method] = self.get_method_capability(
method, obj, parent_obj
)
return user_capabilities
|
https://github.com/ansible/awx/issues/6309
|
Traceback (most recent call last): File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/tasks.py", line 1308, in run args = self.build_args(self.instance, private_data_dir, passwords) File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/tasks.py", line 2472, in build_args if getattr(settings, '%s_EXCLUDE_EMPTY_GROUPS' % src.upper()): File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/conf/settings.py", line 511, in __getattr_without_cache__ return getattr(self._wrapped, name) File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/conf/settings.py", line 418, in __getattr__ value = self._get_default(name) File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/conf/settings.py", line 405, in _get_default return getattr(self.default_settings, name) AttributeError: 'Settings' object has no attribute '_EXCLUDE_EMPTY_GROUPS'
|
AttributeError
|
def _delete_groups(self):
"""
# If overwrite is set, for each group in the database that is NOT in
# the local list, delete it. When importing from a cloud inventory
# source attached to a specific group, only delete children of that
# group. Delete each group individually so signal handlers will run.
"""
if settings.SQL_DEBUG:
queries_before = len(connection.queries)
groups_qs = self.inventory_source.groups.all()
# Build list of all group pks, remove those that should not be deleted.
del_group_pks = set(groups_qs.values_list("pk", flat=True))
all_group_names = list(self.all_group.all_groups.keys())
for offset in range(0, len(all_group_names), self._batch_size):
group_names = all_group_names[offset : (offset + self._batch_size)]
for group_pk in groups_qs.filter(name__in=group_names).values_list(
"pk", flat=True
):
del_group_pks.discard(group_pk)
# Now delete all remaining groups in batches.
all_del_pks = sorted(list(del_group_pks))
for offset in range(0, len(all_del_pks), self._batch_size):
del_pks = all_del_pks[offset : (offset + self._batch_size)]
for group in groups_qs.filter(pk__in=del_pks):
group_name = group.name
with ignore_inventory_computed_fields():
group.delete()
logger.debug('Group "%s" deleted', group_name)
if settings.SQL_DEBUG:
logger.warning(
"group deletions took %d queries for %d groups",
len(connection.queries) - queries_before,
len(all_del_pks),
)
|
def _delete_groups(self):
"""
# If overwrite is set, for each group in the database that is NOT in
# the local list, delete it. When importing from a cloud inventory
# source attached to a specific group, only delete children of that
# group. Delete each group individually so signal handlers will run.
"""
if settings.SQL_DEBUG:
queries_before = len(connection.queries)
groups_qs = self.inventory_source.groups.all()
# Build list of all group pks, remove those that should not be deleted.
del_group_pks = set(groups_qs.values_list("pk", flat=True))
all_group_names = list(self.all_group.all_groups.keys())
for offset in range(0, len(all_group_names), self._batch_size):
group_names = all_group_names[offset : (offset + self._batch_size)]
for group_pk in groups_qs.filter(name__in=group_names).values_list(
"pk", flat=True
):
del_group_pks.discard(group_pk)
if (
self.inventory_source.deprecated_group_id in del_group_pks
): # TODO: remove in 3.3
logger.warning(
'Group "%s" from v1 API is not deleted by overwrite',
self.inventory_source.deprecated_group.name,
)
del_group_pks.discard(self.inventory_source.deprecated_group_id)
# Now delete all remaining groups in batches.
all_del_pks = sorted(list(del_group_pks))
for offset in range(0, len(all_del_pks), self._batch_size):
del_pks = all_del_pks[offset : (offset + self._batch_size)]
for group in groups_qs.filter(pk__in=del_pks):
group_name = group.name
with ignore_inventory_computed_fields():
group.delete()
logger.debug('Group "%s" deleted', group_name)
if settings.SQL_DEBUG:
logger.warning(
"group deletions took %d queries for %d groups",
len(connection.queries) - queries_before,
len(all_del_pks),
)
|
https://github.com/ansible/awx/issues/6309
|
Traceback (most recent call last): File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/tasks.py", line 1308, in run args = self.build_args(self.instance, private_data_dir, passwords) File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/tasks.py", line 2472, in build_args if getattr(settings, '%s_EXCLUDE_EMPTY_GROUPS' % src.upper()): File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/conf/settings.py", line 511, in __getattr_without_cache__ return getattr(self._wrapped, name) File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/conf/settings.py", line 418, in __getattr__ value = self._get_default(name) File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/conf/settings.py", line 405, in _get_default return getattr(self.default_settings, name) AttributeError: 'Settings' object has no attribute '_EXCLUDE_EMPTY_GROUPS'
|
AttributeError
|
def _delete_group_children_and_hosts(self):
"""
Clear all invalid child relationships for groups and all invalid host
memberships. When importing from a cloud inventory source attached to
a specific group, only clear relationships for hosts and groups that
are beneath the inventory source group.
"""
# FIXME: Optimize performance!
if settings.SQL_DEBUG:
queries_before = len(connection.queries)
group_group_count = 0
group_host_count = 0
db_groups = self.inventory_source.groups
# Set of all group names managed by this inventory source
all_source_group_names = frozenset(self.all_group.all_groups.keys())
# Set of all host pks managed by this inventory source
all_source_host_pks = self._existing_host_pks()
for db_group in db_groups.all():
# Delete child group relationships not present in imported data.
db_children = db_group.children
db_children_name_pk_map = dict(db_children.values_list("name", "pk"))
# Exclude child groups from removal list if they were returned by
# the import, because this parent-child relationship has not changed
mem_children = self.all_group.all_groups[db_group.name].children
for mem_group in mem_children:
db_children_name_pk_map.pop(mem_group.name, None)
# Exclude child groups from removal list if they were not imported
# by this specific inventory source, because
# those relationships are outside of the dominion of this inventory source
other_source_group_names = (
set(db_children_name_pk_map.keys()) - all_source_group_names
)
for group_name in other_source_group_names:
db_children_name_pk_map.pop(group_name, None)
# Removal list is complete - now perform the removals
del_child_group_pks = list(set(db_children_name_pk_map.values()))
for offset in range(0, len(del_child_group_pks), self._batch_size):
child_group_pks = del_child_group_pks[offset : (offset + self._batch_size)]
for db_child in db_children.filter(pk__in=child_group_pks):
group_group_count += 1
db_group.children.remove(db_child)
logger.debug(
'Group "%s" removed from group "%s"', db_child.name, db_group.name
)
# FIXME: Inventory source group relationships
# Delete group/host relationships not present in imported data.
db_hosts = db_group.hosts
del_host_pks = set(db_hosts.values_list("pk", flat=True))
# Exclude child hosts from removal list if they were not imported
# by this specific inventory source, because
# those relationships are outside of the dominion of this inventory source
del_host_pks = del_host_pks & all_source_host_pks
# Exclude child hosts from removal list if they were returned by
# the import, because this group-host relationship has not changed
mem_hosts = self.all_group.all_groups[db_group.name].hosts
all_mem_host_names = [h.name for h in mem_hosts if not h.instance_id]
for offset in range(0, len(all_mem_host_names), self._batch_size):
mem_host_names = all_mem_host_names[offset : (offset + self._batch_size)]
for db_host_pk in db_hosts.filter(name__in=mem_host_names).values_list(
"pk", flat=True
):
del_host_pks.discard(db_host_pk)
all_mem_instance_ids = [h.instance_id for h in mem_hosts if h.instance_id]
for offset in range(0, len(all_mem_instance_ids), self._batch_size):
mem_instance_ids = all_mem_instance_ids[
offset : (offset + self._batch_size)
]
for db_host_pk in db_hosts.filter(
instance_id__in=mem_instance_ids
).values_list("pk", flat=True):
del_host_pks.discard(db_host_pk)
all_db_host_pks = [
v for k, v in self.db_instance_id_map.items() if k in all_mem_instance_ids
]
for db_host_pk in all_db_host_pks:
del_host_pks.discard(db_host_pk)
# Removal list is complete - now perform the removals
del_host_pks = list(del_host_pks)
for offset in range(0, len(del_host_pks), self._batch_size):
del_pks = del_host_pks[offset : (offset + self._batch_size)]
for db_host in db_hosts.filter(pk__in=del_pks):
group_host_count += 1
if db_host not in db_group.hosts.all():
continue
db_group.hosts.remove(db_host)
logger.debug(
'Host "%s" removed from group "%s"', db_host.name, db_group.name
)
if settings.SQL_DEBUG:
logger.warning(
"group-group and group-host deletions took %d queries for %d relationships",
len(connection.queries) - queries_before,
group_group_count + group_host_count,
)
|
def _delete_group_children_and_hosts(self):
"""
Clear all invalid child relationships for groups and all invalid host
memberships. When importing from a cloud inventory source attached to
a specific group, only clear relationships for hosts and groups that
are beneath the inventory source group.
"""
# FIXME: Optimize performance!
if settings.SQL_DEBUG:
queries_before = len(connection.queries)
group_group_count = 0
group_host_count = 0
db_groups = self.inventory_source.groups
# Set of all group names managed by this inventory source
all_source_group_names = frozenset(self.all_group.all_groups.keys())
# Set of all host pks managed by this inventory source
all_source_host_pks = self._existing_host_pks()
for db_group in db_groups.all():
if (
self.inventory_source.deprecated_group_id == db_group.id
): # TODO: remove in 3.3
logger.debug(
'Group "%s" from v1 API child group/host connections preserved',
db_group.name,
)
continue
# Delete child group relationships not present in imported data.
db_children = db_group.children
db_children_name_pk_map = dict(db_children.values_list("name", "pk"))
# Exclude child groups from removal list if they were returned by
# the import, because this parent-child relationship has not changed
mem_children = self.all_group.all_groups[db_group.name].children
for mem_group in mem_children:
db_children_name_pk_map.pop(mem_group.name, None)
# Exclude child groups from removal list if they were not imported
# by this specific inventory source, because
# those relationships are outside of the dominion of this inventory source
other_source_group_names = (
set(db_children_name_pk_map.keys()) - all_source_group_names
)
for group_name in other_source_group_names:
db_children_name_pk_map.pop(group_name, None)
# Removal list is complete - now perform the removals
del_child_group_pks = list(set(db_children_name_pk_map.values()))
for offset in range(0, len(del_child_group_pks), self._batch_size):
child_group_pks = del_child_group_pks[offset : (offset + self._batch_size)]
for db_child in db_children.filter(pk__in=child_group_pks):
group_group_count += 1
db_group.children.remove(db_child)
logger.debug(
'Group "%s" removed from group "%s"', db_child.name, db_group.name
)
# FIXME: Inventory source group relationships
# Delete group/host relationships not present in imported data.
db_hosts = db_group.hosts
del_host_pks = set(db_hosts.values_list("pk", flat=True))
# Exclude child hosts from removal list if they were not imported
# by this specific inventory source, because
# those relationships are outside of the dominion of this inventory source
del_host_pks = del_host_pks & all_source_host_pks
# Exclude child hosts from removal list if they were returned by
# the import, because this group-host relationship has not changed
mem_hosts = self.all_group.all_groups[db_group.name].hosts
all_mem_host_names = [h.name for h in mem_hosts if not h.instance_id]
for offset in range(0, len(all_mem_host_names), self._batch_size):
mem_host_names = all_mem_host_names[offset : (offset + self._batch_size)]
for db_host_pk in db_hosts.filter(name__in=mem_host_names).values_list(
"pk", flat=True
):
del_host_pks.discard(db_host_pk)
all_mem_instance_ids = [h.instance_id for h in mem_hosts if h.instance_id]
for offset in range(0, len(all_mem_instance_ids), self._batch_size):
mem_instance_ids = all_mem_instance_ids[
offset : (offset + self._batch_size)
]
for db_host_pk in db_hosts.filter(
instance_id__in=mem_instance_ids
).values_list("pk", flat=True):
del_host_pks.discard(db_host_pk)
all_db_host_pks = [
v for k, v in self.db_instance_id_map.items() if k in all_mem_instance_ids
]
for db_host_pk in all_db_host_pks:
del_host_pks.discard(db_host_pk)
# Removal list is complete - now perform the removals
del_host_pks = list(del_host_pks)
for offset in range(0, len(del_host_pks), self._batch_size):
del_pks = del_host_pks[offset : (offset + self._batch_size)]
for db_host in db_hosts.filter(pk__in=del_pks):
group_host_count += 1
if db_host not in db_group.hosts.all():
continue
db_group.hosts.remove(db_host)
logger.debug(
'Host "%s" removed from group "%s"', db_host.name, db_group.name
)
if settings.SQL_DEBUG:
logger.warning(
"group-group and group-host deletions took %d queries for %d relationships",
len(connection.queries) - queries_before,
group_group_count + group_host_count,
)
|
https://github.com/ansible/awx/issues/6309
|
Traceback (most recent call last): File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/tasks.py", line 1308, in run args = self.build_args(self.instance, private_data_dir, passwords) File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/tasks.py", line 2472, in build_args if getattr(settings, '%s_EXCLUDE_EMPTY_GROUPS' % src.upper()): File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/conf/settings.py", line 511, in __getattr_without_cache__ return getattr(self._wrapped, name) File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/conf/settings.py", line 418, in __getattr__ value = self._get_default(name) File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/conf/settings.py", line 405, in _get_default return getattr(self.default_settings, name) AttributeError: 'Settings' object has no attribute '_EXCLUDE_EMPTY_GROUPS'
|
AttributeError
|
def websocket_emit_data(self):
websocket_data = super(InventoryUpdate, self).websocket_emit_data()
websocket_data.update(dict(inventory_source_id=self.inventory_source.pk))
if self.inventory_source.inventory is not None:
websocket_data.update(dict(inventory_id=self.inventory_source.inventory.pk))
return websocket_data
|
def websocket_emit_data(self):
websocket_data = super(InventoryUpdate, self).websocket_emit_data()
websocket_data.update(dict(inventory_source_id=self.inventory_source.pk))
if self.inventory_source.inventory is not None:
websocket_data.update(dict(inventory_id=self.inventory_source.inventory.pk))
if self.inventory_source.deprecated_group is not None: # TODO: remove in 3.3
websocket_data.update(dict(group_id=self.inventory_source.deprecated_group.id))
return websocket_data
|
https://github.com/ansible/awx/issues/6309
|
Traceback (most recent call last): File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/tasks.py", line 1308, in run args = self.build_args(self.instance, private_data_dir, passwords) File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/tasks.py", line 2472, in build_args if getattr(settings, '%s_EXCLUDE_EMPTY_GROUPS' % src.upper()): File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/conf/settings.py", line 511, in __getattr_without_cache__ return getattr(self._wrapped, name) File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/conf/settings.py", line 418, in __getattr__ value = self._get_default(name) File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/conf/settings.py", line 405, in _get_default return getattr(self.default_settings, name) AttributeError: 'Settings' object has no attribute '_EXCLUDE_EMPTY_GROUPS'
|
AttributeError
|
def activity_stream_create(sender, instance, created, **kwargs):
if created and activity_stream_enabled:
_type = type(instance)
if getattr(_type, "_deferred", False):
return
object1 = camelcase_to_underscore(instance.__class__.__name__)
changes = model_to_dict(instance, model_serializer_mapping())
# Special case where Job survey password variables need to be hidden
if type(instance) == Job:
changes["credentials"] = [
"{} ({})".format(c.name, c.id) for c in instance.credentials.iterator()
]
changes["labels"] = [l.name for l in instance.labels.iterator()]
if "extra_vars" in changes:
changes["extra_vars"] = instance.display_extra_vars()
if type(instance) == OAuth2AccessToken:
changes["token"] = CENSOR_VALUE
activity_entry = get_activity_stream_class()(
operation="create",
object1=object1,
changes=json.dumps(changes),
actor=get_current_user_or_none(),
)
# TODO: Weird situation where cascade SETNULL doesn't work
# it might actually be a good idea to remove all of these FK references since
# we don't really use them anyway.
if instance._meta.model_name != "setting": # Is not conf.Setting instance
activity_entry.save()
getattr(activity_entry, object1).add(instance.pk)
else:
activity_entry.setting = conf_to_dict(instance)
activity_entry.save()
connection.on_commit(lambda: emit_activity_stream_change(activity_entry))
|
def activity_stream_create(sender, instance, created, **kwargs):
if created and activity_stream_enabled:
# TODO: remove deprecated_group conditional in 3.3
# Skip recording any inventory source directly associated with a group.
if isinstance(instance, InventorySource) and instance.deprecated_group:
return
_type = type(instance)
if getattr(_type, "_deferred", False):
return
object1 = camelcase_to_underscore(instance.__class__.__name__)
changes = model_to_dict(instance, model_serializer_mapping())
# Special case where Job survey password variables need to be hidden
if type(instance) == Job:
changes["credentials"] = [
"{} ({})".format(c.name, c.id) for c in instance.credentials.iterator()
]
changes["labels"] = [l.name for l in instance.labels.iterator()]
if "extra_vars" in changes:
changes["extra_vars"] = instance.display_extra_vars()
if type(instance) == OAuth2AccessToken:
changes["token"] = CENSOR_VALUE
activity_entry = get_activity_stream_class()(
operation="create",
object1=object1,
changes=json.dumps(changes),
actor=get_current_user_or_none(),
)
# TODO: Weird situation where cascade SETNULL doesn't work
# it might actually be a good idea to remove all of these FK references since
# we don't really use them anyway.
if instance._meta.model_name != "setting": # Is not conf.Setting instance
activity_entry.save()
getattr(activity_entry, object1).add(instance.pk)
else:
activity_entry.setting = conf_to_dict(instance)
activity_entry.save()
connection.on_commit(lambda: emit_activity_stream_change(activity_entry))
|
https://github.com/ansible/awx/issues/6309
|
Traceback (most recent call last): File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/tasks.py", line 1308, in run args = self.build_args(self.instance, private_data_dir, passwords) File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/tasks.py", line 2472, in build_args if getattr(settings, '%s_EXCLUDE_EMPTY_GROUPS' % src.upper()): File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/conf/settings.py", line 511, in __getattr_without_cache__ return getattr(self._wrapped, name) File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/conf/settings.py", line 418, in __getattr__ value = self._get_default(name) File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/conf/settings.py", line 405, in _get_default return getattr(self.default_settings, name) AttributeError: 'Settings' object has no attribute '_EXCLUDE_EMPTY_GROUPS'
|
AttributeError
|
def activity_stream_delete(sender, instance, **kwargs):
if not activity_stream_enabled:
return
# Inventory delete happens in the task system rather than request-response-cycle.
# If we trigger this handler there we may fall into db-integrity-related race conditions.
# So we add flag verification to prevent normal signal handling. This funciton will be
# explicitly called with flag on in Inventory.schedule_deletion.
changes = {}
if isinstance(instance, Inventory):
if not kwargs.get("inventory_delete_flag", False):
return
# Add additional data about child hosts / groups that will be deleted
changes["coalesced_data"] = {
"hosts_deleted": instance.hosts.count(),
"groups_deleted": instance.groups.count(),
}
elif isinstance(instance, (Host, Group)) and instance.inventory.pending_deletion:
return # accounted for by inventory entry, above
_type = type(instance)
if getattr(_type, "_deferred", False):
return
changes.update(model_to_dict(instance, model_serializer_mapping()))
object1 = camelcase_to_underscore(instance.__class__.__name__)
if type(instance) == OAuth2AccessToken:
changes["token"] = CENSOR_VALUE
activity_entry = get_activity_stream_class()(
operation="delete",
changes=json.dumps(changes),
object1=object1,
actor=get_current_user_or_none(),
)
activity_entry.save()
connection.on_commit(lambda: emit_activity_stream_change(activity_entry))
|
def activity_stream_delete(sender, instance, **kwargs):
if not activity_stream_enabled:
return
# TODO: remove deprecated_group conditional in 3.3
# Skip recording any inventory source directly associated with a group.
if isinstance(instance, InventorySource) and instance.deprecated_group:
return
# Inventory delete happens in the task system rather than request-response-cycle.
# If we trigger this handler there we may fall into db-integrity-related race conditions.
# So we add flag verification to prevent normal signal handling. This funciton will be
# explicitly called with flag on in Inventory.schedule_deletion.
changes = {}
if isinstance(instance, Inventory):
if not kwargs.get("inventory_delete_flag", False):
return
# Add additional data about child hosts / groups that will be deleted
changes["coalesced_data"] = {
"hosts_deleted": instance.hosts.count(),
"groups_deleted": instance.groups.count(),
}
elif isinstance(instance, (Host, Group)) and instance.inventory.pending_deletion:
return # accounted for by inventory entry, above
_type = type(instance)
if getattr(_type, "_deferred", False):
return
changes.update(model_to_dict(instance, model_serializer_mapping()))
object1 = camelcase_to_underscore(instance.__class__.__name__)
if type(instance) == OAuth2AccessToken:
changes["token"] = CENSOR_VALUE
activity_entry = get_activity_stream_class()(
operation="delete",
changes=json.dumps(changes),
object1=object1,
actor=get_current_user_or_none(),
)
activity_entry.save()
connection.on_commit(lambda: emit_activity_stream_change(activity_entry))
|
https://github.com/ansible/awx/issues/6309
|
Traceback (most recent call last): File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/tasks.py", line 1308, in run args = self.build_args(self.instance, private_data_dir, passwords) File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/tasks.py", line 2472, in build_args if getattr(settings, '%s_EXCLUDE_EMPTY_GROUPS' % src.upper()): File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/conf/settings.py", line 511, in __getattr_without_cache__ return getattr(self._wrapped, name) File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/conf/settings.py", line 418, in __getattr__ value = self._get_default(name) File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/conf/settings.py", line 405, in _get_default return getattr(self.default_settings, name) AttributeError: 'Settings' object has no attribute '_EXCLUDE_EMPTY_GROUPS'
|
AttributeError
|
def consume_events(self):
# discover new events and ingest them
events_path = self.path_to("artifacts", self.ident, "job_events")
# it's possible that `events_path` doesn't exist *yet*, because runner
# hasn't actually written any events yet (if you ran e.g., a sleep 30)
# only attempt to consume events if any were rsynced back
if os.path.exists(events_path):
for event in set(os.listdir(events_path)) - self.handled_events:
path = os.path.join(events_path, event)
if os.path.exists(path):
try:
event_data = json.load(open(os.path.join(events_path, event), "r"))
except json.decoder.JSONDecodeError:
# This means the event we got back isn't valid JSON
# that can happen if runner is still partially
# writing an event file while it's rsyncing
# these event writes are _supposed_ to be atomic
# but it doesn't look like they actually are in
# practice
# in this scenario, just ignore this event and try it
# again on the next sync
continue
self.event_handler(event_data)
self.handled_events.add(event)
|
def consume_events(self):
# discover new events and ingest them
events_path = self.path_to("artifacts", self.ident, "job_events")
# it's possible that `events_path` doesn't exist *yet*, because runner
# hasn't actually written any events yet (if you ran e.g., a sleep 30)
# only attempt to consume events if any were rsynced back
if os.path.exists(events_path):
for event in set(os.listdir(events_path)) - self.handled_events:
path = os.path.join(events_path, event)
if os.path.exists(path):
try:
event_data = json.load(open(os.path.join(events_path, event), "r"))
except json.decoder.JSONDecodeError:
# This means the event we got back isn't valid JSON
# that can happen if runner is still partially
# writing an event file while it's rsyncing
# these event writes are _supposed_ to be atomic
# but it doesn't look like they actually are in
# practice
# in this scenario, just ignore this event and try it
# again on the next sync
pass
self.event_handler(event_data)
self.handled_events.add(event)
|
https://github.com/ansible/awx/issues/6280
|
2020-03-12 23:43:10,040 ERROR awx.isolated.manager.playbooks
PLAY [Poll for status of active job.] ******************************************
TASK [Determine if daemon process is alive.] ***********************************
changed: [awx-job-164]
TASK [Copy artifacts from the isolated host.] **********************************
skipping: [awx-job-164]
TASK [Copy daemon log from the isolated host] **********************************
skipping: [awx-job-164]
TASK [Copy artifacts from pod] *************************************************
2020-03-12 23:43:10,042 ERROR awx.main.tasks job 164 (running) Exception occurred while running task
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python3.6/site-packages/awx/main/tasks.py", line 1401, in run
ident=str(self.instance.pk))
File "/var/lib/awx/venv/awx/lib/python3.6/site-packages/awx/main/isolated/manager.py", line 432, in run
status, rc = self.check()
File "/var/lib/awx/venv/awx/lib/python3.6/site-packages/awx/main/isolated/manager.py", line 241, in check
self.consume_events(dispatcher)
File "/var/lib/awx/venv/awx/lib/python3.6/site-packages/awx/main/isolated/manager.py", line 294, in consume_events
open(os.path.join(events_path, event), 'r')
IsADirectoryError: [Errno 21] Is a directory: '/tmp/awx_164_pojd2p9l/artifacts/164/job_events/.~tmp~'
2020-03-12 23:43:10,044 DEBUG awx.main.tasks job 164 (running) finished running, producing 0 events.
|
IsADirectoryError
|
def main():
argument_spec = dict(
name=dict(required=True),
description=dict(required=False),
extra_vars=dict(type="dict", required=False),
organization=dict(required=False),
allow_simultaneous=dict(type="bool", required=False),
schema=dict(type="list", elements="dict", required=False),
survey=dict(required=False),
survey_enabled=dict(type="bool", required=False),
inventory=dict(required=False),
ask_inventory=dict(type="bool", required=False),
ask_extra_vars=dict(type="bool", required=False),
state=dict(choices=["present", "absent"], default="present"),
)
module = TowerModule(argument_spec=argument_spec, supports_check_mode=False)
name = module.params.get("name")
state = module.params.get("state")
schema = None
if module.params.get("schema"):
schema = module.params.get("schema")
if schema and state == "absent":
module.fail_json(
msg="Setting schema when state is absent is not allowed", changed=False
)
json_output = {"workflow_template": name, "state": state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
wfjt_res = tower_cli.get_resource("workflow")
params = {}
params["name"] = name
if module.params.get("description"):
params["description"] = module.params.get("description")
if module.params.get("organization"):
organization_res = tower_cli.get_resource("organization")
try:
organization = organization_res.get(
name=module.params.get("organization")
)
params["organization"] = organization["id"]
except exc.NotFound as excinfo:
module.fail_json(
msg="Failed to update organization source,"
"organization not found: {0}".format(excinfo),
changed=False,
)
if module.params.get("survey"):
params["survey_spec"] = module.params.get("survey")
if module.params.get("ask_extra_vars"):
params["ask_variables_on_launch"] = module.params.get("ask_extra_vars")
if module.params.get("ask_inventory"):
params["ask_inventory_on_launch"] = module.params.get("ask_inventory")
for key in ("allow_simultaneous", "inventory", "survey_enabled", "description"):
if module.params.get(key):
params[key] = module.params.get(key)
# Special treatment for tower-cli extra_vars
extra_vars = module.params.get("extra_vars")
if extra_vars:
params["extra_vars"] = [json.dumps(extra_vars)]
try:
if state == "present":
params["create_on_missing"] = True
result = wfjt_res.modify(**params)
json_output["id"] = result["id"]
if schema:
wfjt_res.schema(result["id"], json.dumps(schema))
elif state == "absent":
params["fail_on_missing"] = False
result = wfjt_res.delete(**params)
except (exc.ConnectionError, exc.BadRequest, exc.AuthError) as excinfo:
module.fail_json(
msg="Failed to update workflow template: \
{0}".format(excinfo),
changed=False,
)
json_output["changed"] = result["changed"]
module.exit_json(**json_output)
|
def main():
argument_spec = dict(
name=dict(required=True),
description=dict(required=False),
extra_vars=dict(required=False),
organization=dict(required=False),
allow_simultaneous=dict(type="bool", required=False),
schema=dict(required=False),
survey=dict(required=False),
survey_enabled=dict(type="bool", required=False),
inventory=dict(required=False),
ask_inventory=dict(type="bool", required=False),
ask_extra_vars=dict(type="bool", required=False),
state=dict(choices=["present", "absent"], default="present"),
)
module = TowerModule(argument_spec=argument_spec, supports_check_mode=False)
name = module.params.get("name")
state = module.params.get("state")
schema = None
if module.params.get("schema"):
schema = module.params.get("schema")
if schema and state == "absent":
module.fail_json(
msg="Setting schema when state is absent is not allowed", changed=False
)
json_output = {"workflow_template": name, "state": state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
wfjt_res = tower_cli.get_resource("workflow")
params = {}
params["name"] = name
if module.params.get("description"):
params["description"] = module.params.get("description")
if module.params.get("organization"):
organization_res = tower_cli.get_resource("organization")
try:
organization = organization_res.get(
name=module.params.get("organization")
)
params["organization"] = organization["id"]
except exc.NotFound as excinfo:
module.fail_json(
msg="Failed to update organization source,"
"organization not found: {0}".format(excinfo),
changed=False,
)
if module.params.get("survey"):
params["survey_spec"] = module.params.get("survey")
if module.params.get("ask_extra_vars"):
params["ask_variables_on_launch"] = module.params.get("ask_extra_vars")
if module.params.get("ask_inventory"):
params["ask_inventory_on_launch"] = module.params.get("ask_inventory")
for key in (
"allow_simultaneous",
"extra_vars",
"inventory",
"survey_enabled",
"description",
):
if module.params.get(key):
params[key] = module.params.get(key)
try:
if state == "present":
params["create_on_missing"] = True
result = wfjt_res.modify(**params)
json_output["id"] = result["id"]
if schema:
wfjt_res.schema(result["id"], schema)
elif state == "absent":
params["fail_on_missing"] = False
result = wfjt_res.delete(**params)
except (exc.ConnectionError, exc.BadRequest, exc.AuthError) as excinfo:
module.fail_json(
msg="Failed to update workflow template: \
{0}".format(excinfo),
changed=False,
)
json_output["changed"] = result["changed"]
module.exit_json(**json_output)
|
https://github.com/ansible/awx/issues/6167
|
The full traceback is:
Traceback (most recent call last):
File "<stdin>", line 102, in <module>
File "<stdin>", line 94, in _ansiballz_main
File "<stdin>", line 40, in invoke_module
File "/usr/lib64/python2.7/runpy.py", line 176, in run_module
fname, loader, pkg_name)
File "/usr/lib64/python2.7/runpy.py", line 82, in _run_module_code
mod_name, mod_fname, mod_loader, pkg_name)
File "/usr/lib64/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals
File "/tmp/ansible_tower_workflow_template_payload_sphiyR/ansible_tower_workflow_template_payload.zip/ansible/modules/web_infrastructure/ansible_tower/tower_workflow_template.py", line 203, in <module>
File "/tmp/ansible_tower_workflow_template_payload_sphiyR/ansible_tower_workflow_template_payload.zip/ansible/modules/web_infrastructure/ansible_tower/tower_workflow_template.py", line 187, in main
File "/usr/lib/python2.7/site-packages/tower_cli/models/base.py", line 720, in modify
return self.write(pk, create_on_missing=create_on_missing, force_on_exists=True, **kwargs)
File "/usr/lib/python2.7/site-packages/tower_cli/models/base.py", line 1187, in write
kwargs['extra_vars'] = parser.process_extra_vars(kwargs['extra_vars'])
File "/usr/lib/python2.7/site-packages/tower_cli/utils/parser.py", line 130, in process_extra_vars
opt_dict = string_to_dict(extra_vars_opt, allow_kv=True)
File "/usr/lib/python2.7/site-packages/tower_cli/utils/parser.py", line 105, in string_to_dict
'variables.\nvariables: \n%s' % var_string
tower_cli.exceptions.TowerCLIError: failed to parse some of the extra variables.
variables:
{
|
tower_cli.exceptions.TowerCLIError
|
def validate_host_filter(self, host_filter):
if host_filter:
try:
for match in JSONBField.get_lookups().keys():
if match == "exact":
# __exact is allowed
continue
match = "__{}".format(match)
if re.match("ansible_facts[^=]+{}=".format(match), host_filter):
raise models.base.ValidationError(
{
"host_filter": "ansible_facts does not support searching with {}".format(
match
)
}
)
SmartFilter().query_from_string(host_filter)
except RuntimeError as e:
raise models.base.ValidationError(str(e))
return host_filter
|
def validate_host_filter(self, host_filter):
if host_filter:
try:
for match in JSONBField.get_lookups().keys():
if match == "exact":
# __exact is allowed
continue
match = "__{}".format(match)
if re.match("ansible_facts[^=]+{}=".format(match), host_filter):
raise models.base.ValidationError(
{
"host_filter": "ansible_facts does not support searching with {}".format(
match
)
}
)
SmartFilter().query_from_string(host_filter)
except RuntimeError as e:
raise models.base.ValidationError(e)
return host_filter
|
https://github.com/ansible/awx/issues/6250
|
The full traceback is:
Traceback (most recent call last):
File "<stdin>", line 102, in <module>
File "<stdin>", line 94, in _ansiballz_main
File "<stdin>", line 40, in invoke_module
File "/usr/lib64/python2.7/runpy.py", line 176, in run_module
fname, loader, pkg_name)
File "/usr/lib64/python2.7/runpy.py", line 82, in _run_module_code
mod_name, mod_fname, mod_loader, pkg_name)
File "/usr/lib64/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals
File "/tmp/ansible_tower_inventory_payload_Ptt6kB/ansible_tower_inventory_payload.zip/ansible/modules/web_infrastructure/ansible_tower/tower_inventory.py", line 130, in <module>
File "/tmp/ansible_tower_inventory_payload_Ptt6kB/ansible_tower_inventory_payload.zip/ansible/modules/web_infrastructure/ansible_tower/tower_inventory.py", line 116, in main
File "/usr/lib/python2.7/site-packages/tower_cli/models/base.py", line 720, in modify
return self.write(pk, create_on_missing=create_on_missing, force_on_exists=True, **kwargs)
File "/usr/lib/python2.7/site-packages/tower_cli/models/base.py", line 424, in write
r = getattr(client, method.lower())(url, data=kwargs)
File "/usr/lib/python2.7/site-packages/requests/sessions.py", line 578, in post
return self.request('POST', url, data=data, json=json, **kwargs)
File "/usr/lib/python2.7/site-packages/tower_cli/api.py", line 262, in request
raise exc.ServerError('The Tower server sent back a server error. '
tower_cli.exceptions.ServerError: The Tower server sent back a server error. Please try again later.
failed: [dhcp-3-213.vms.sat.rdu2.redhat.com] (item={'name': 'Test Inventory - Smart', 'organization': 'Default', 'description': 'created by Ansible Playbook', 'state': 'present', 'kind': 'smart', 'host_filter': 'ansible'}) => {
"ansible_loop_var": "item",
"changed": false,
"item": {
"description": "created by Ansible Playbook",
"host_filter": "ansible",
"kind": "smart",
"name": "Test Inventory - Smart",
"organization": "Default",
"state": "present"
},
"module_stderr": "Traceback (most recent call last):\n File \"<stdin>\", line 102, in <module>\n File \"<stdin>\", line 94, in _ansiballz_main\n File \"<stdin>\", line 40, in invoke_module\n File \"/usr/lib64/python2.7/runpy.py\", line 176, in run_module\n fname, loader, pkg_name)\n File \"/usr/lib64/python2.7/runpy.py\", line 82, in _run_module_code\n mod_name, mod_fname, mod_loader, pkg_name)\n File \"/usr/lib64/python2.7/runpy.py\", line 72, in _run_code\n exec code in run_globals\n File \"/tmp/ansible_tower_inventory_payload_Ptt6kB/ansible_tower_inventory_payload.zip/ansible/modules/web_infrastructure/ansible_tower/tower_inventory.py\", line 130, in <module>\n File \"/tmp/ansible_tower_inventory_payload_Ptt6kB/ansible_tower_inventory_payload.zip/ansible/modules/web_infrastructure/ansible_tower/tower_inventory.py\", line 116, in main\n File \"/usr/lib/python2.7/site-packages/tower_cli/models/base.py\", line 720, in modify\n return self.write(pk, create_on_missing=create_on_missing, force_on_exists=True, **kwargs)\n File \"/usr/lib/python2.7/site-packages/tower_cli/models/base.py\", line 424, in write\n r = getattr(client, method.lower())(url, data=kwargs)\n File \"/usr/lib/python2.7/site-packages/requests/sessions.py\", line 578, in post\n return self.request('POST', url, data=data, json=json, **kwargs)\n File \"/usr/lib/python2.7/site-packages/tower_cli/api.py\", line 262, in request\n raise exc.ServerError('The Tower server sent back a server error. '\ntower_cli.exceptions.ServerError: The Tower server sent back a server error. Please try again later.\n",
"module_stdout": "",
"msg": "MODULE FAILURE\nSee stdout/stderr for the exact error",
"rc": 1
}
|
tower_cli.exceptions.ServerError
|
def finish_job_fact_cache(self, destination, modification_times):
for host in self._get_inventory_hosts():
filepath = os.sep.join(map(str, [destination, host.name]))
if not os.path.realpath(filepath).startswith(destination):
system_tracking_logger.error(
"facts for host {} could not be cached".format(smart_str(host.name))
)
continue
if os.path.exists(filepath):
# If the file changed since we wrote it pre-playbook run...
modified = os.path.getmtime(filepath)
if modified > modification_times.get(filepath, 0):
with codecs.open(filepath, "r", encoding="utf-8") as f:
try:
ansible_facts = json.load(f)
except ValueError:
continue
host.ansible_facts = ansible_facts
host.ansible_facts_modified = now()
ansible_local = ansible_facts.get("ansible_local", {}).get(
"insights", {}
)
ansible_facts = ansible_facts.get("insights", {})
ansible_local_system_id = (
ansible_local.get("system_id", None)
if isinstance(ansible_local, dict)
else None
)
ansible_facts_system_id = (
ansible_facts.get("system_id", None)
if isinstance(ansible_facts, dict)
else None
)
if ansible_local_system_id:
print("Setting local {}".format(ansible_local_system_id))
logger.debug(
"Insights system_id {} found for host <{}, {}> in"
" ansible local facts".format(
ansible_local_system_id, host.inventory.id, host.name
)
)
host.insights_system_id = ansible_local_system_id
elif ansible_facts_system_id:
logger.debug(
"Insights system_id {} found for host <{}, {}> in"
" insights facts".format(
ansible_local_system_id, host.inventory.id, host.name
)
)
host.insights_system_id = ansible_facts_system_id
host.save()
system_tracking_logger.info(
"New fact for inventory {} host {}".format(
smart_str(host.inventory.name), smart_str(host.name)
),
extra=dict(
inventory_id=host.inventory.id,
host_name=host.name,
ansible_facts=host.ansible_facts,
ansible_facts_modified=host.ansible_facts_modified.isoformat(),
job_id=self.id,
),
)
else:
# if the file goes missing, ansible removed it (likely via clear_facts)
host.ansible_facts = {}
host.ansible_facts_modified = now()
system_tracking_logger.info(
"Facts cleared for inventory {} host {}".format(
smart_str(host.inventory.name), smart_str(host.name)
)
)
host.save()
|
def finish_job_fact_cache(self, destination, modification_times):
for host in self._get_inventory_hosts():
filepath = os.sep.join(map(str, [destination, host.name]))
if not os.path.realpath(filepath).startswith(destination):
system_tracking_logger.error(
"facts for host {} could not be cached".format(smart_str(host.name))
)
continue
if os.path.exists(filepath):
# If the file changed since we wrote it pre-playbook run...
modified = os.path.getmtime(filepath)
if modified > modification_times.get(filepath, 0):
with codecs.open(filepath, "r", encoding="utf-8") as f:
try:
ansible_facts = json.load(f)
except ValueError:
continue
host.ansible_facts = ansible_facts
host.ansible_facts_modified = now()
ansible_local_system_id = (
ansible_facts.get("ansible_local", {})
.get("insights", {})
.get("system_id", None)
)
ansible_facts_system_id = ansible_facts.get("insights", {}).get(
"system_id", None
)
if ansible_local_system_id:
print("Setting local {}".format(ansible_local_system_id))
logger.debug(
"Insights system_id {} found for host <{}, {}> in"
" ansible local facts".format(
ansible_local_system_id, host.inventory.id, host.name
)
)
host.insights_system_id = ansible_local_system_id
elif ansible_facts_system_id:
logger.debug(
"Insights system_id {} found for host <{}, {}> in"
" insights facts".format(
ansible_local_system_id, host.inventory.id, host.name
)
)
host.insights_system_id = ansible_facts_system_id
host.save()
system_tracking_logger.info(
"New fact for inventory {} host {}".format(
smart_str(host.inventory.name), smart_str(host.name)
),
extra=dict(
inventory_id=host.inventory.id,
host_name=host.name,
ansible_facts=host.ansible_facts,
ansible_facts_modified=host.ansible_facts_modified.isoformat(),
job_id=self.id,
),
)
else:
# if the file goes missing, ansible removed it (likely via clear_facts)
host.ansible_facts = {}
host.ansible_facts_modified = now()
system_tracking_logger.info(
"Facts cleared for inventory {} host {}".format(
smart_str(host.inventory.name), smart_str(host.name)
)
)
host.save()
|
https://github.com/ansible/awx/issues/5935
|
2020-02-13 19:10:29,258 ERROR awx.main.tasks job 909 (successful) Final run hook errored.
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/tasks.py", line 1411, in run
self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times, isolated_manager_instance=isolated_manager_instance)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/tasks.py", line 1864, in final_run_hook
fact_modification_times,
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/models/jobs.py", line 821, in finish_job_fact_cache
ansible_local_system_id = ansible_facts.get('ansible_local', {}).get('insights', {}).get('system_id', None)
AttributeError: 'str' object has no attribute 'get'
|
AttributeError
|
def _get_instances(self, inkwargs):
"""Make API calls"""
instances = []
si = None
try:
si = SmartConnect(**inkwargs)
except ssl.SSLError as connection_error:
if (
"[SSL: CERTIFICATE_VERIFY_FAILED]" in str(connection_error)
and self.validate_certs
):
sys.exit(
"Unable to connect to ESXi server due to %s, "
"please specify validate_certs=False and try again" % connection_error
)
except Exception as exc:
self.debugl("Unable to connect to ESXi server due to %s" % exc)
sys.exit("Unable to connect to ESXi server due to %s" % exc)
self.debugl("retrieving all instances")
if not si:
sys.exit(
"Could not connect to the specified host using specified "
"username and password"
)
atexit.register(Disconnect, si)
content = si.RetrieveContent()
# Create a search container for virtualmachines
self.debugl("creating containerview for virtualmachines")
container = content.rootFolder
viewType = [vim.VirtualMachine]
recursive = True
containerView = content.viewManager.CreateContainerView(
container, viewType, recursive
)
children = containerView.view
for child in children:
# If requested, limit the total number of instances
if self.args.max_instances:
if len(instances) >= self.args.max_instances:
break
instances.append(child)
self.debugl("%s total instances in container view" % len(instances))
if self.args.host:
instances = [x for x in instances if x.name == self.args.host]
instance_tuples = []
for instance in instances:
if self.guest_props:
ifacts = self.facts_from_proplist(instance)
else:
ifacts = self.facts_from_vobj(instance)
instance_tuples.append((instance, ifacts))
self.debugl("facts collected for all instances")
try:
cfm = content.customFieldsManager
if cfm is not None and cfm.field:
for f in cfm.field:
if not f.managedObjectType or f.managedObjectType == vim.VirtualMachine:
self.custom_fields[f.key] = f.name
self.debugl("%d custom fields collected" % len(self.custom_fields))
except vmodl.RuntimeFault as exc:
self.debugl("Unable to gather custom fields due to %s" % exc.msg)
except IndexError as exc:
self.debugl("Unable to gather custom fields due to %s" % exc)
return instance_tuples
|
def _get_instances(self, inkwargs):
"""Make API calls"""
instances = []
try:
si = SmartConnect(**inkwargs)
except ssl.SSLError as connection_error:
if (
"[SSL: CERTIFICATE_VERIFY_FAILED]" in str(connection_error)
and self.validate_certs
):
sys.exit(
"Unable to connect to ESXi server due to %s, "
"please specify validate_certs=False and try again" % connection_error
)
except Exception as exc:
self.debugl("Unable to connect to ESXi server due to %s" % exc)
sys.exit("Unable to connect to ESXi server due to %s" % exc)
self.debugl("retrieving all instances")
if not si:
sys.exit(
"Could not connect to the specified host using specified "
"username and password"
)
atexit.register(Disconnect, si)
content = si.RetrieveContent()
# Create a search container for virtualmachines
self.debugl("creating containerview for virtualmachines")
container = content.rootFolder
viewType = [vim.VirtualMachine]
recursive = True
containerView = content.viewManager.CreateContainerView(
container, viewType, recursive
)
children = containerView.view
for child in children:
# If requested, limit the total number of instances
if self.args.max_instances:
if len(instances) >= self.args.max_instances:
break
instances.append(child)
self.debugl("%s total instances in container view" % len(instances))
if self.args.host:
instances = [x for x in instances if x.name == self.args.host]
instance_tuples = []
for instance in instances:
if self.guest_props:
ifacts = self.facts_from_proplist(instance)
else:
ifacts = self.facts_from_vobj(instance)
instance_tuples.append((instance, ifacts))
self.debugl("facts collected for all instances")
try:
cfm = content.customFieldsManager
if cfm is not None and cfm.field:
for f in cfm.field:
if not f.managedObjectType or f.managedObjectType == vim.VirtualMachine:
self.custom_fields[f.key] = f.name
self.debugl("%d custom fields collected" % len(self.custom_fields))
except vmodl.RuntimeFault as exc:
self.debugl("Unable to gather custom fields due to %s" % exc.msg)
except IndexError as exc:
self.debugl("Unable to gather custom fields due to %s" % exc)
return instance_tuples
|
https://github.com/ansible/awx/issues/5648
|
4.545 INFO Updating inventory 28: VCENTER_INVENTORY
5.489 INFO Reading Ansible inventory source: /var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/plugins/inventory/vmware_inventory.py
5.495 INFO Using VIRTUAL_ENV: /var/lib/awx/venv/ansible
5.495 INFO Using PATH: /var/lib/awx/venv/ansible/bin:/var/lib/awx/venv/awx/bin:/var/lib/awx/venv/awx/bin:/var/lib/awx/venv/awx/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
5.496 INFO Using PYTHONPATH: /var/lib/awx/venv/ansible/lib/python3.6/site-packages:
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/bin/awx-manage", line 11, in <module>
load_entry_point('awx==9.1.0', 'console_scripts', 'awx-manage')()
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/__init__.py", line 165, in manage
execute_from_command_line(sys.argv)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/core/management/__init__.py", line 381, in execute_from_command_line
utility.execute()
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/core/management/__init__.py", line 375, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/core/management/base.py", line 323, in run_from_argv
self.execute(*args, **cmd_options)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/core/management/base.py", line 364, in execute
output = self.handle(*args, **options)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/management/commands/inventory_import.py", line 1158, in handle
raise exc
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/management/commands/inventory_import.py", line 1048, in handle
venv_path=venv_path, verbosity=self.verbosity).load()
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/management/commands/inventory_import.py", line 215, in load
return self.command_to_json(base_args + ['--list'])
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/management/commands/inventory_import.py", line 198, in command_to_json
self.method, proc.returncode, stdout, stderr))
RuntimeError: ansible-inventory failed (rc=1) with stdout:
stderr:
ansible-inventory 2.9.2
config file = /etc/ansible/ansible.cfg
configured module search path = ['/var/lib/awx/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.6/site-packages/ansible
executable location = /usr/local/bin/ansible-inventory
python version = 3.6.8 (default, Oct 7 2019, 17:58:22) [GCC 8.2.1 20180905 (Red Hat 8.2.1-3)]
Using /etc/ansible/ansible.cfg as config file
[WARNING]: * Failed to parse /var/lib/awx/venv/awx/lib64/python3.6/site-
packages/awx/plugins/inventory/vmware_inventory.py with script plugin:
Inventory script (/var/lib/awx/venv/awx/lib64/python3.6/site-
packages/awx/plugins/inventory/vmware_inventory.py) had an execution error:
Traceback (most recent call last): File
"/var/lib/awx/venv/awx/lib64/python3.6/site-
packages/awx/plugins/inventory/vmware_inventory.py", line 792, in <module>
print(VMWareInventory().show()) File
"/var/lib/awx/venv/awx/lib64/python3.6/site-
packages/awx/plugins/inventory/vmware_inventory.py", line 146, in __init__
self.do_api_calls_update_cache() File
"/var/lib/awx/venv/awx/lib64/python3.6/site-
packages/awx/plugins/inventory/vmware_inventory.py", line 185, in
do_api_calls_update_cache self.inventory =
self.instances_to_inventory(self.get_instances()) File
"/var/lib/awx/venv/awx/lib64/python3.6/site-
packages/awx/plugins/inventory/vmware_inventory.py", line 364, in get_instances
return self._get_instances(kwargs) File
"/var/lib/awx/venv/awx/lib64/python3.6/site-
packages/awx/plugins/inventory/vmware_inventory.py", line 381, in
_get_instances if not si: UnboundLocalError: local variable 'si' referenced
before assignment
File "/usr/local/lib/python3.6/site-packages/ansible/inventory/manager.py", line 280, in parse_source
plugin.parse(self._inventory, self._loader, source, cache=cache)
File "/usr/local/lib/python3.6/site-packages/ansible/plugins/inventory/script.py", line 161, in parse
raise AnsibleParserError(to_native(e))
[WARNING]: Unable to parse /var/lib/awx/venv/awx/lib64/python3.6/site-
packages/awx/plugins/inventory/vmware_inventory.py as an inventory source
ERROR! No inventory was parsed, please check your configuration and options.
|
RuntimeError
|
def process_request(self, request):
executor = MigrationExecutor(connection)
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if (
bool(plan)
and getattr(resolve(request.path), "url_name", "") != "migrations_notran"
):
return redirect(reverse("ui:migrations_notran"))
|
def process_request(self, request):
if migration_in_progress_check_or_relase():
if getattr(resolve(request.path), "url_name", "") == "migrations_notran":
return
return redirect(reverse("ui:migrations_notran"))
|
https://github.com/ansible/awx/issues/5530
|
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
psycopg2.errors.UndefinedColumn: column main_projectupdate.job_tags does not exist
LINE 1: ...e"."project_id", "main_projectupdate"."job_type", "main_proj...
^
HINT: Perhaps you meant to reference the column "main_projectupdate.job_type".
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/dispatch/worker/task.py", line 86, in perform_work
result = self.run_callable(body)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/dispatch/worker/task.py", line 62, in run_callable
return _call(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/scheduler/tasks.py", line 19, in run_task_manager
TaskManager().schedule()
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/scheduler/task_manager.py", line 643, in schedule
self._schedule()
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/scheduler/task_manager.py", line 605, in _schedule
all_sorted_tasks = self.get_tasks()
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/scheduler/task_manager.py", line 69, in get_tasks
project_updates = [p for p in ProjectUpdate.objects.filter(status__in=status_list, job_type='check').prefetch_related('instance_group')]
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/db/models/query.py", line 274, in __iter__
self._fetch_all()
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/db/models/query.py", line 1242, in _fetch_all
self._result_cache = list(self._iterable_class(self))
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/polymorphic/query.py", line 56, in _polymorphic_iterator
o = next(base_iter)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/db/models/query.py", line 55, in __iter__
results = compiler.execute_sql(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/db/models/sql/compiler.py", line 1100, in execute_sql
cursor.execute(sql, params)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/db/backends/utils.py", line 67, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/db/backends/utils.py", line 76, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/db/utils.py", line 89, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
django.db.utils.ProgrammingError: column main_projectupdate.job_tags does not exist
LINE 1: ...e"."project_id", "main_projectupdate"."job_type", "main_proj...
|
django.db.utils.ProgrammingError
|
def run_task_manager():
logger.debug("Running Tower task manager.")
TaskManager().schedule()
|
def run_task_manager():
if migration_in_progress_check_or_relase():
logger.debug("Not running task manager because migration is in progress.")
return
logger.debug("Running Tower task manager.")
TaskManager().schedule()
|
https://github.com/ansible/awx/issues/5530
|
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
psycopg2.errors.UndefinedColumn: column main_projectupdate.job_tags does not exist
LINE 1: ...e"."project_id", "main_projectupdate"."job_type", "main_proj...
^
HINT: Perhaps you meant to reference the column "main_projectupdate.job_type".
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/dispatch/worker/task.py", line 86, in perform_work
result = self.run_callable(body)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/dispatch/worker/task.py", line 62, in run_callable
return _call(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/scheduler/tasks.py", line 19, in run_task_manager
TaskManager().schedule()
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/scheduler/task_manager.py", line 643, in schedule
self._schedule()
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/scheduler/task_manager.py", line 605, in _schedule
all_sorted_tasks = self.get_tasks()
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/scheduler/task_manager.py", line 69, in get_tasks
project_updates = [p for p in ProjectUpdate.objects.filter(status__in=status_list, job_type='check').prefetch_related('instance_group')]
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/db/models/query.py", line 274, in __iter__
self._fetch_all()
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/db/models/query.py", line 1242, in _fetch_all
self._result_cache = list(self._iterable_class(self))
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/polymorphic/query.py", line 56, in _polymorphic_iterator
o = next(base_iter)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/db/models/query.py", line 55, in __iter__
results = compiler.execute_sql(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/db/models/sql/compiler.py", line 1100, in execute_sql
cursor.execute(sql, params)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/db/backends/utils.py", line 67, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/db/backends/utils.py", line 76, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/db/utils.py", line 89, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
django.db.utils.ProgrammingError: column main_projectupdate.job_tags does not exist
LINE 1: ...e"."project_id", "main_projectupdate"."job_type", "main_proj...
|
django.db.utils.ProgrammingError
|
def create(self, request, *args, **kwargs):
# If the object ID was not specified, it probably doesn't exist in the
# DB yet. We want to see if we can create it. The URL may choose to
# inject it's primary key into the object because we are posting to a
# subcollection. Use all the normal access control mechanisms.
# Make a copy of the data provided (since it's readonly) in order to
# inject additional data.
if hasattr(request.data, "copy"):
data = request.data.copy()
else:
data = QueryDict("")
data.update(request.data)
# add the parent key to the post data using the pk from the URL
parent_key = getattr(self, "parent_key", None)
if parent_key:
data[parent_key] = self.kwargs["pk"]
# attempt to deserialize the object
serializer = self.get_serializer(data=data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Verify we have permission to add the object as given.
if not request.user.can_access(self.model, "add", serializer.validated_data):
raise PermissionDenied()
# save the object through the serializer, reload and returned the saved
# object deserialized
obj = serializer.save()
serializer = self.get_serializer(instance=obj)
headers = {"Location": obj.get_absolute_url(request)}
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
|
def create(self, request, *args, **kwargs):
# If the object ID was not specified, it probably doesn't exist in the
# DB yet. We want to see if we can create it. The URL may choose to
# inject it's primary key into the object because we are posting to a
# subcollection. Use all the normal access control mechanisms.
# Make a copy of the data provided (since it's readonly) in order to
# inject additional data.
if hasattr(request.data, "copy"):
data = request.data.copy()
else:
data = QueryDict("")
data.update(request.data)
# add the parent key to the post data using the pk from the URL
parent_key = getattr(self, "parent_key", None)
if parent_key:
data[parent_key] = self.kwargs["pk"]
# attempt to deserialize the object
serializer = self.get_serializer(data=data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Verify we have permission to add the object as given.
if not request.user.can_access(self.model, "add", serializer.initial_data):
raise PermissionDenied()
# save the object through the serializer, reload and returned the saved
# object deserialized
obj = serializer.save()
serializer = self.get_serializer(instance=obj)
headers = {"Location": obj.get_absolute_url(request)}
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
|
https://github.com/ansible/awx/issues/4147
|
2019-06-21 20:31:01,747 ERROR django.request Internal Server Error: /api/v2/job_templates/7/schedules/
Traceback (most recent call last):
File "/venv/awx/lib64/python3.6/site-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/venv/awx/lib64/python3.6/site-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/venv/awx/lib64/python3.6/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/usr/lib64/python3.6/contextlib.py", line 52, in inner
return func(*args, **kwds)
File "/venv/awx/lib64/python3.6/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
return view_func(*args, **kwargs)
File "/venv/awx/lib64/python3.6/site-packages/django/views/generic/base.py", line 68, in view
return self.dispatch(request, *args, **kwargs)
File "./awx/api/generics.py", line 293, in dispatch
return super(APIView, self).dispatch(request, *args, **kwargs)
File "/venv/awx/lib64/python3.6/site-packages/rest_framework/views.py", line 495, in dispatch
response = self.handle_exception(exc)
File "/venv/awx/lib64/python3.6/site-packages/rest_framework/views.py", line 455, in handle_exception
self.raise_uncaught_exception(exc)
File "/venv/awx/lib64/python3.6/site-packages/rest_framework/views.py", line 492, in dispatch
response = handler(request, *args, **kwargs)
File "/venv/awx/lib64/python3.6/site-packages/rest_framework/generics.py", line 244, in post
return self.create(request, *args, **kwargs)
File "./awx/api/generics.py", line 570, in create
if not request.user.can_access(self.model, 'add', serializer.initial_data):
File "./awx/main/access.py", line 122, in check_user_access
result = access_method(*args, **kwargs)
File "./awx/main/access.py", line 162, in wrapper
return func(self, *args, **kwargs)
File "./awx/main/access.py", line 2445, in can_add
if not JobLaunchConfigAccess(self.user).can_add(data):
File "./awx/main/access.py", line 162, in wrapper
return func(self, *args, **kwargs)
File "./awx/main/access.py", line 1787, in can_add
cred_pks = [cred.pk for cred in data['credentials']]
File "./awx/main/access.py", line 1787, in <listcomp>
cred_pks = [cred.pk for cred in data['credentials']]
AttributeError: 'int' object has no attribute 'pk'
|
AttributeError
|
def generate_tmp_kube_config(credential, namespace):
host_input = credential.get_input("host")
config = {
"apiVersion": "v1",
"kind": "Config",
"preferences": {},
"clusters": [{"name": host_input, "cluster": {"server": host_input}}],
"users": [
{
"name": host_input,
"user": {"token": credential.get_input("bearer_token")},
}
],
"contexts": [
{
"name": host_input,
"context": {
"cluster": host_input,
"user": host_input,
"namespace": namespace,
},
}
],
"current-context": host_input,
}
if credential.get_input("verify_ssl") and "ssl_ca_cert" in credential.inputs:
config["clusters"][0]["cluster"]["certificate-authority-data"] = b64encode(
credential.get_input("ssl_ca_cert").encode() # encode to bytes
).decode() # decode the base64 data into a str
else:
config["clusters"][0]["cluster"]["insecure-skip-tls-verify"] = True
return config
|
def generate_tmp_kube_config(credential, namespace):
host_input = credential.get_input("host")
config = {
"apiVersion": "v1",
"kind": "Config",
"preferences": {},
"clusters": [{"name": host_input, "cluster": {"server": host_input}}],
"users": [
{
"name": host_input,
"user": {"token": credential.get_input("bearer_token")},
}
],
"contexts": [
{
"name": host_input,
"context": {
"cluster": host_input,
"user": host_input,
"namespace": namespace,
},
}
],
"current-context": host_input,
}
if credential.get_input("verify_ssl"):
config["clusters"][0]["cluster"]["certificate-authority-data"] = b64encode(
credential.get_input("ssl_ca_cert").encode() # encode to bytes
).decode() # decode the base64 data into a str
else:
config["clusters"][0]["cluster"]["insecure-skip-tls-verify"] = True
return config
|
https://github.com/ansible/awx/issues/5326
|
Traceback (most recent call last):
File "/awx_devel/awx/main/scheduler/kubernetes.py", line 55, in list_active_jobs
for pod in pm.kube_api.list_namespaced_pod(
File "/venv/awx/lib64/python3.6/site-packages/django/utils/functional.py", line 80, in __get__
res = instance.__dict__[self.name] = self.func(instance)
File "/awx_devel/awx/main/scheduler/kubernetes.py", line 91, in kube_api
config_dict=self.kube_config
File "/venv/awx/lib64/python3.6/site-packages/django/utils/functional.py", line 80, in __get__
res = instance.__dict__[self.name] = self.func(instance)
File "/awx_devel/awx/main/scheduler/kubernetes.py", line 83, in kube_config
return generate_tmp_kube_config(self.credential, self.namespace)
File "/awx_devel/awx/main/scheduler/kubernetes.py", line 178, in generate_tmp_kube_config
credential.get_input('ssl_ca_cert').encode() # encode to bytes
File "/awx_devel/awx/main/models/credential/__init__.py", line 288, in get_input
raise AttributeError
AttributeError
|
AttributeError
|
def start_task(self, task, rampart_group, dependent_tasks=None, instance=None):
from awx.main.tasks import handle_work_error, handle_work_success
dependent_tasks = dependent_tasks or []
task_actual = {
"type": get_type_for_model(type(task)),
"id": task.id,
}
dependencies = [
{"type": get_type_for_model(type(t)), "id": t.id} for t in dependent_tasks
]
controller_node = None
if task.supports_isolation() and rampart_group.controller_id:
try:
controller_node = rampart_group.choose_online_controller_node()
except IndexError:
logger.debug(
"No controllers available in group {} to run {}".format(
rampart_group.name, task.log_format
)
)
return
task.status = "waiting"
(start_status, opts) = task.pre_start()
if not start_status:
task.status = "failed"
if task.job_explanation:
task.job_explanation += " "
task.job_explanation += "Task failed pre-start check."
task.save()
# TODO: run error handler to fail sub-tasks and send notifications
else:
if type(task) is WorkflowJob:
task.status = "running"
task.send_notification_templates("running")
logger.debug("Transitioning %s to running status.", task.log_format)
schedule_task_manager()
elif not task.supports_isolation() and rampart_group.controller_id:
# non-Ansible jobs on isolated instances run on controller
task.instance_group = rampart_group.controller
task.execution_node = random.choice(
list(
rampart_group.controller.instances.all().values_list(
"hostname", flat=True
)
)
)
logger.debug(
"Submitting isolated {} to queue {}.".format(
task.log_format, task.instance_group.name, task.execution_node
)
)
elif controller_node:
task.instance_group = rampart_group
task.execution_node = instance.hostname
task.controller_node = controller_node
logger.debug(
"Submitting isolated {} to queue {} controlled by {}.".format(
task.log_format, task.execution_node, controller_node
)
)
elif rampart_group.is_containerized:
# find one real, non-containerized instance with capacity to
# act as the controller for k8s API interaction
match = None
for group in InstanceGroup.objects.all():
if group.is_containerized or group.controller_id:
continue
match = group.fit_task_to_most_remaining_capacity_instance(task)
if match:
break
task.instance_group = rampart_group
if match is None:
logger.warn(
"No available capacity to run containerized <{}>.".format(
task.log_format
)
)
else:
if task.supports_isolation():
task.controller_node = match.hostname
else:
# project updates and inventory updates don't *actually* run in pods,
# so just pick *any* non-isolated, non-containerized host and use it
# as the execution node
task.execution_node = match.hostname
logger.debug(
"Submitting containerized {} to queue {}.".format(
task.log_format, task.execution_node
)
)
else:
task.instance_group = rampart_group
if instance is not None:
task.execution_node = instance.hostname
logger.debug(
"Submitting {} to <instance group, instance> <{},{}>.".format(
task.log_format, task.instance_group_id, task.execution_node
)
)
with disable_activity_stream():
task.celery_task_id = str(uuid.uuid4())
task.save()
if rampart_group is not None:
self.consume_capacity(task, rampart_group.name)
def post_commit():
if task.status != "failed" and type(task) is not WorkflowJob:
task_cls = task._get_task_class()
task_cls.apply_async(
[task.pk],
opts,
queue=task.get_queue_name(),
uuid=task.celery_task_id,
callbacks=[
{
"task": handle_work_success.name,
"kwargs": {"task_actual": task_actual},
}
],
errbacks=[
{
"task": handle_work_error.name,
"args": [task.celery_task_id],
"kwargs": {"subtasks": [task_actual] + dependencies},
}
],
)
task.websocket_emit_status(task.status) # adds to on_commit
connection.on_commit(post_commit)
|
def start_task(self, task, rampart_group, dependent_tasks=None, instance=None):
from awx.main.tasks import handle_work_error, handle_work_success
dependent_tasks = dependent_tasks or []
task_actual = {
"type": get_type_for_model(type(task)),
"id": task.id,
}
dependencies = [
{"type": get_type_for_model(type(t)), "id": t.id} for t in dependent_tasks
]
controller_node = None
if task.supports_isolation() and rampart_group.controller_id:
try:
controller_node = rampart_group.choose_online_controller_node()
except IndexError:
logger.debug(
"No controllers available in group {} to run {}".format(
rampart_group.name, task.log_format
)
)
return
task.status = "waiting"
(start_status, opts) = task.pre_start()
if not start_status:
task.status = "failed"
if task.job_explanation:
task.job_explanation += " "
task.job_explanation += "Task failed pre-start check."
task.save()
# TODO: run error handler to fail sub-tasks and send notifications
else:
if type(task) is WorkflowJob:
task.status = "running"
task.send_notification_templates("running")
logger.debug("Transitioning %s to running status.", task.log_format)
schedule_task_manager()
elif not task.supports_isolation() and rampart_group.controller_id:
# non-Ansible jobs on isolated instances run on controller
task.instance_group = rampart_group.controller
task.execution_node = random.choice(
list(
rampart_group.controller.instances.all().values_list(
"hostname", flat=True
)
)
)
logger.debug(
"Submitting isolated {} to queue {}.".format(
task.log_format, task.instance_group.name, task.execution_node
)
)
elif controller_node:
task.instance_group = rampart_group
task.execution_node = instance.hostname
task.controller_node = controller_node
logger.debug(
"Submitting isolated {} to queue {} controlled by {}.".format(
task.log_format, task.execution_node, controller_node
)
)
elif rampart_group.is_containerized:
# find one real, non-containerized instance with capacity to
# act as the controller for k8s API interaction
match = None
for group in InstanceGroup.objects.all():
if group.is_containerized or group.controller_id:
continue
match = group.find_largest_idle_instance()
if match:
break
task.instance_group = rampart_group
if task.supports_isolation():
task.controller_node = match.hostname
else:
# project updates and inventory updates don't *actually* run in pods,
# so just pick *any* non-isolated, non-containerized host and use it
# as the execution node
task.execution_node = match.hostname
logger.debug(
"Submitting containerized {} to queue {}.".format(
task.log_format, task.execution_node
)
)
else:
task.instance_group = rampart_group
if instance is not None:
task.execution_node = instance.hostname
logger.debug(
"Submitting {} to <instance group, instance> <{},{}>.".format(
task.log_format, task.instance_group_id, task.execution_node
)
)
with disable_activity_stream():
task.celery_task_id = str(uuid.uuid4())
task.save()
if rampart_group is not None:
self.consume_capacity(task, rampart_group.name)
def post_commit():
if task.status != "failed" and type(task) is not WorkflowJob:
task_cls = task._get_task_class()
task_cls.apply_async(
[task.pk],
opts,
queue=task.get_queue_name(),
uuid=task.celery_task_id,
callbacks=[
{
"task": handle_work_success.name,
"kwargs": {"task_actual": task_actual},
}
],
errbacks=[
{
"task": handle_work_error.name,
"args": [task.celery_task_id],
"kwargs": {"subtasks": [task_actual] + dependencies},
}
],
)
task.websocket_emit_status(task.status) # adds to on_commit
connection.on_commit(post_commit)
|
https://github.com/ansible/awx/issues/5326
|
Traceback (most recent call last):
File "/awx_devel/awx/main/scheduler/kubernetes.py", line 55, in list_active_jobs
for pod in pm.kube_api.list_namespaced_pod(
File "/venv/awx/lib64/python3.6/site-packages/django/utils/functional.py", line 80, in __get__
res = instance.__dict__[self.name] = self.func(instance)
File "/awx_devel/awx/main/scheduler/kubernetes.py", line 91, in kube_api
config_dict=self.kube_config
File "/venv/awx/lib64/python3.6/site-packages/django/utils/functional.py", line 80, in __get__
res = instance.__dict__[self.name] = self.func(instance)
File "/awx_devel/awx/main/scheduler/kubernetes.py", line 83, in kube_config
return generate_tmp_kube_config(self.credential, self.namespace)
File "/awx_devel/awx/main/scheduler/kubernetes.py", line 178, in generate_tmp_kube_config
credential.get_input('ssl_ca_cert').encode() # encode to bytes
File "/awx_devel/awx/main/models/credential/__init__.py", line 288, in get_input
raise AttributeError
AttributeError
|
AttributeError
|
def create_from_data(cls, **kwargs):
# Convert the datetime for the event's creation
# appropriately, and include a time zone for it.
#
# In the event of any issue, throw it out, and Django will just save
# the current time.
try:
if not isinstance(kwargs["created"], datetime.datetime):
kwargs["created"] = parse_datetime(kwargs["created"])
if not kwargs["created"].tzinfo:
kwargs["created"] = kwargs["created"].replace(tzinfo=utc)
except (KeyError, ValueError):
kwargs.pop("created", None)
sanitize_event_keys(kwargs, cls.VALID_KEYS)
kwargs.pop("workflow_job_id", None)
event = cls.objects.create(**kwargs)
if isinstance(event, AdHocCommandEvent):
analytics_logger.info(
"Event data saved.", extra=dict(python_objects=dict(job_event=event))
)
return event
|
def create_from_data(cls, **kwargs):
# Convert the datetime for the event's creation
# appropriately, and include a time zone for it.
#
# In the event of any issue, throw it out, and Django will just save
# the current time.
try:
if not isinstance(kwargs["created"], datetime.datetime):
kwargs["created"] = parse_datetime(kwargs["created"])
if not kwargs["created"].tzinfo:
kwargs["created"] = kwargs["created"].replace(tzinfo=utc)
except (KeyError, ValueError):
kwargs.pop("created", None)
sanitize_event_keys(kwargs, cls.VALID_KEYS)
event = cls.objects.create(**kwargs)
if isinstance(event, AdHocCommandEvent):
analytics_logger.info(
"Event data saved.", extra=dict(python_objects=dict(job_event=event))
)
return event
|
https://github.com/ansible/awx/issues/4920
|
awx_1 | 2019-10-04 16:26:53,567 ERROR awx.main.commands.run_callback_receiver Callback Task Processor Raised Exception: TypeError("InventoryUpdateEvent() got an unexpected keyword argument 'workflow_job_id'",)
awx_1 | 2019-10-04 16:26:53,568 ERROR awx.main.commands.run_callback_receiver Detail: Traceback (most recent call last):
awx_1 | File "/awx_devel/awx/main/dispatch/worker/callback.py", line 107, in perform_work
awx_1 | _save_event_data()
awx_1 | File "/awx_devel/awx/main/dispatch/worker/callback.py", line 45, in _save_event_data
awx_1 | cls.create_from_data(**body)
awx_1 | File "/awx_devel/awx/main/models/events.py", line 622, in create_from_data
awx_1 | event = cls.objects.create(**kwargs)
awx_1 | File "/venv/awx/lib64/python3.6/site-packages/django/db/models/manager.py", line 82, in manager_method
awx_1 | return getattr(self.get_queryset(), name)(*args, **kwargs)
awx_1 | File "/venv/awx/lib64/python3.6/site-packages/django/db/models/query.py", line 420, in create
awx_1 | obj = self.model(**kwargs)
awx_1 | File "/venv/awx/lib64/python3.6/site-packages/django/db/models/base.py", line 501, in __init__
awx_1 | raise TypeError("%s() got an unexpected keyword argument '%s'" % (cls.__name__, kwarg))
awx_1 | TypeError: InventoryUpdateEvent() got an unexpected keyword argument 'workflow_job_id'
awx_1 | 2019-10-04 16:26:53,570 ERROR awx.main.commands.run_callback_receiver Callback Task Processor Raised Exception: TypeError("InventoryUpdateEvent() got an unexpected keyword argument 'workflow_job_id'",)
|
TypeError
|
def save_user_session_membership(sender, **kwargs):
session = kwargs.get("instance", None)
if pkg_resources.get_distribution("channels").version >= "2":
# If you get into this code block, it means we upgraded channels, but
# didn't make the settings.SESSIONS_PER_USER feature work
raise RuntimeError(
"save_user_session_membership must be updated for channels>=2: "
"http://channels.readthedocs.io/en/latest/one-to-two.html#requirements"
)
if "runworker" in sys.argv:
# don't track user session membership for websocket per-channel sessions
return
if not session:
return
user_id = session.get_decoded().get(SESSION_KEY, None)
if not user_id:
return
if UserSessionMembership.objects.filter(user=user_id, session=session).exists():
return
# check if user_id from session has an id match in User before saving
if User.objects.filter(id=int(user_id)).exists():
UserSessionMembership(
user_id=user_id, session=session, created=timezone.now()
).save()
expired = UserSessionMembership.get_memberships_over_limit(user_id)
for membership in expired:
Session.objects.filter(session_key__in=[membership.session_id]).delete()
membership.delete()
if len(expired):
consumers.emit_channel_notification(
"control-limit_reached_{}".format(user_id),
dict(group_name="control", reason="limit_reached"),
)
|
def save_user_session_membership(sender, **kwargs):
session = kwargs.get("instance", None)
if pkg_resources.get_distribution("channels").version >= "2":
# If you get into this code block, it means we upgraded channels, but
# didn't make the settings.SESSIONS_PER_USER feature work
raise RuntimeError(
"save_user_session_membership must be updated for channels>=2: "
"http://channels.readthedocs.io/en/latest/one-to-two.html#requirements"
)
if "runworker" in sys.argv:
# don't track user session membership for websocket per-channel sessions
return
if not session:
return
user_id = session.get_decoded().get(SESSION_KEY, None)
if not user_id:
return
if UserSessionMembership.objects.filter(user=user_id, session=session).exists():
return
UserSessionMembership(
user_id=user_id, session=session, created=timezone.now()
).save()
expired = UserSessionMembership.get_memberships_over_limit(user_id)
for membership in expired:
Session.objects.filter(session_key__in=[membership.session_id]).delete()
membership.delete()
if len(expired):
consumers.emit_channel_notification(
"control-limit_reached_{}".format(user_id),
dict(group_name="control", reason="limit_reached"),
)
|
https://github.com/ansible/awx/issues/4334
|
awx_1 | 2019-07-16 21:07:30,594 ERROR django.request Internal Server Error: /
awx_1 | Traceback (most recent call last):
awx_1 | File "/venv/awx/lib64/python3.6/site-packages/django/db/backends/base/base.py", line 240, in _commit
awx_1 | return self.connection.commit()
awx_1 | psycopg2.IntegrityError: insert or update on table "main_usersessionmembership" violates foreign key constraint "main_usersessionmembership_user_id_fe163c98_fk_auth_user_id"
awx_1 | DETAIL: Key (user_id)=(2) is not present in table "auth_user".
awx_1 |
awx_1 |
awx_1 | The above exception was the direct cause of the following exception:
awx_1 |
awx_1 | Traceback (most recent call last):
awx_1 | File "/venv/awx/lib64/python3.6/site-packages/django/core/handlers/exception.py", line 34, in inner
awx_1 | response = get_response(request)
awx_1 | File "/venv/awx/lib64/python3.6/site-packages/django/utils/deprecation.py", line 96, in __call__
awx_1 | response = self.process_response(request, response)
awx_1 | File "/venv/awx/lib64/python3.6/site-packages/django/contrib/sessions/middleware.py", line 58, in process_response
awx_1 | request.session.save()
awx_1 | File "/venv/awx/lib64/python3.6/site-packages/django/contrib/sessions/backends/db.py", line 87, in save
awx_1 | obj.save(force_insert=must_create, force_update=not must_create, using=using)
awx_1 | File "/venv/awx/lib64/python3.6/site-packages/django/db/transaction.py", line 240, in __exit__
awx_1 | connection.commit()
awx_1 | File "/venv/awx/lib64/python3.6/site-packages/django/db/backends/base/base.py", line 262, in commit
awx_1 | self._commit()
awx_1 | File "/venv/awx/lib64/python3.6/site-packages/django/db/backends/base/base.py", line 240, in _commit
awx_1 | return self.connection.commit()
awx_1 | File "/venv/awx/lib64/python3.6/site-packages/django/db/utils.py", line 89, in __exit__
awx_1 | raise dj_exc_value.with_traceback(traceback) from exc_value
awx_1 | File "/venv/awx/lib64/python3.6/site-packages/django/db/backends/base/base.py", line 240, in _commit
awx_1 | return self.connection.commit()
awx_1 | django.db.utils.IntegrityError: insert or update on table "main_usersessionmembership" violates foreign key constraint "main_usersessionmembership_user_id_fe163c98_fk_auth_user_id"
awx_1 | DETAIL: Key (user_id)=(2) is not present in table "auth_user".
|
psycopg2.IntegrityError
|
def reap(instance=None, status="failed", excluded_uuids=[]):
"""
Reap all jobs in waiting|running for this instance.
"""
me = instance
if me is None:
(changed, me) = Instance.objects.get_or_register()
if changed:
logger.info("Registered tower node '{}'".format(me.hostname))
now = tz_now()
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
jobs = UnifiedJob.objects.filter(
(
Q(status="running")
| Q(status="waiting", modified__lte=now - timedelta(seconds=60))
)
& (Q(execution_node=me.hostname) | Q(controller_node=me.hostname))
& ~Q(polymorphic_ctype_id=workflow_ctype_id)
).exclude(celery_task_id__in=excluded_uuids)
for j in jobs:
reap_job(j, status)
|
def reap(instance=None, status="failed", excluded_uuids=[]):
"""
Reap all jobs in waiting|running for this instance.
"""
me = instance or Instance.objects.me()
now = tz_now()
workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id
jobs = UnifiedJob.objects.filter(
(
Q(status="running")
| Q(status="waiting", modified__lte=now - timedelta(seconds=60))
)
& (Q(execution_node=me.hostname) | Q(controller_node=me.hostname))
& ~Q(polymorphic_ctype_id=workflow_ctype_id)
).exclude(celery_task_id__in=excluded_uuids)
for j in jobs:
reap_job(j, status)
|
https://github.com/ansible/awx/issues/4294
|
Jul 4 17:20:39 1.awx.node.dc1 dispatcher[505]: 2019-07-04 14:20:39,433 ERROR awx.main.dispatch failed to write inbound message
Jul 4 17:20:39 1.awx.node.dc1 dispatcher[505]: Traceback (most recent call last):
Jul 4 17:20:39 1.awx.node.dc1 dispatcher[505]: File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/dispatch/pool.py", line 388, in write
Jul 4 17:20:39 1.awx.node.dc1 dispatcher[505]: self.cleanup()
Jul 4 17:20:39 1.awx.node.dc1 dispatcher[505]: File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/dispatch/pool.py", line 373, in cleanup
Jul 4 17:20:39 1.awx.node.dc1 dispatcher[505]: reaper.reap(excluded_uuids=running_uuids)
Jul 4 17:20:39 1.awx.node.dc1 dispatcher[505]: File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/dispatch/reaper.py", line 35, in reap
Jul 4 17:20:39 1.awx.node.dc1 dispatcher[505]: me = instance or Instance.objects.me()
Jul 4 17:20:39 1.awx.node.dc1 dispatcher[505]: File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/managers.py", line 88, in me
Jul 4 17:20:39 1.awx.node.dc1 dispatcher[505]: raise RuntimeError("No instance found with the current cluster host id")
Jul 4 17:20:39 1.awx.node.dc1 dispatcher[505]: RuntimeError: No instance found with the current cluster host id
|
RuntimeError
|
def instance_info(since):
info = {}
instances = models.Instance.objects.values_list("hostname").values(
"uuid",
"version",
"capacity",
"cpu",
"memory",
"managed_by_policy",
"hostname",
"last_isolated_check",
"enabled",
)
for instance in instances:
instance_info = {
"uuid": instance["uuid"],
"version": instance["version"],
"capacity": instance["capacity"],
"cpu": instance["cpu"],
"memory": instance["memory"],
"managed_by_policy": instance["managed_by_policy"],
"last_isolated_check": _get_isolated_datetime(
instance["last_isolated_check"]
),
"enabled": instance["enabled"],
}
info[instance["uuid"]] = instance_info
return info
|
def instance_info(since):
info = {}
instances = models.Instance.objects.values_list("hostname").values(
"uuid",
"version",
"capacity",
"cpu",
"memory",
"managed_by_policy",
"hostname",
"last_isolated_check",
"enabled",
)
for instance in instances:
instance_info = {
"uuid": instance["uuid"],
"version": instance["version"],
"capacity": instance["capacity"],
"cpu": instance["cpu"],
"memory": instance["memory"],
"managed_by_policy": instance["managed_by_policy"],
"last_isolated_check": instance["last_isolated_check"],
"enabled": instance["enabled"],
}
info[instance["uuid"]] = instance_info
return info
|
https://github.com/ansible/awx/issues/4170
|
2019-06-25 00:24:00,434 ERROR awx.main.analytics Could not generate metric instance_info.json
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/analytics/core.py", line 94, in gather
json.dump(func(last_run), f)
File "/opt/rh/rh-python36/root/usr/lib64/python3.6/json/__init__.py", line 179, in dump
for chunk in iterable:
File "/opt/rh/rh-python36/root/usr/lib64/python3.6/json/encoder.py", line 430, in _iterencode
yield from _iterencode_dict(o, _current_indent_level)
File "/opt/rh/rh-python36/root/usr/lib64/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
File "/opt/rh/rh-python36/root/usr/lib64/python3.6/json/encoder.py", line 404, in _iterencode_dict
yield from chunks
File "/opt/rh/rh-python36/root/usr/lib64/python3.6/json/encoder.py", line 437, in _iterencode
o = _default(o)
File "/opt/rh/rh-python36/root/usr/lib64/python3.6/json/encoder.py", line 180, in default
o.__class__.__name__)
TypeError: Object of type 'datetime' is not JSON serializable
2019-06-25 00:24:00,469 ERROR awx.main.analytics could not find insights-client on PATH
|
TypeError
|
def to_internal_value(self, pk):
try:
pk = int(pk)
except ValueError:
self.fail("invalid")
try:
Credential.objects.get(pk=pk)
except ObjectDoesNotExist:
raise serializers.ValidationError(_("Credential {} does not exist").format(pk))
return pk
|
def to_internal_value(self, pk):
try:
Credential.objects.get(pk=pk)
except ObjectDoesNotExist:
raise serializers.ValidationError(_("Credential {} does not exist").format(pk))
return pk
|
https://github.com/ansible/awx/issues/3346
|
2019-03-01 13:12:09,346 ERROR django.request Internal Server Error: /api/v2/job_templates/
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/wsgi.py", line 71, in _legacy_get_response
return super(AWXWSGIHandler, self)._legacy_get_response(request)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/core/handlers/base.py", line 249, in _legacy_get_response
response = self._get_response(request)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/usr/lib64/python3.6/contextlib.py", line 52, in inner
return func(*args, **kwds)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
return view_func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/views/generic/base.py", line 68, in view
return self.dispatch(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/api/generics.py", line 324, in dispatch
return super(APIView, self).dispatch(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/rest_framework/views.py", line 494, in dispatch
response = self.handle_exception(exc)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/rest_framework/views.py", line 454, in handle_exception
self.raise_uncaught_exception(exc)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/rest_framework/views.py", line 491, in dispatch
response = handler(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/api/views/__init__.py", line 2195, in post
ret = super(JobTemplateList, self).post(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/rest_framework/generics.py", line 244, in post
return self.create(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/rest_framework/mixins.py", line 20, in create
serializer.is_valid(raise_exception=True)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/rest_framework/serializers.py", line 236, in is_valid
self._validated_data = self.run_validation(self.initial_data)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/api/serializers.py", line 552, in run_validation
return super(BaseSerializer, self).run_validation(data)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/rest_framework/serializers.py", line 435, in run_validation
value = self.to_internal_value(data)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/rest_framework/serializers.py", line 465, in to_internal_value
validated_value = field.run_validation(primitive_value)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/rest_framework/fields.py", line 524, in run_validation
self.run_validators(value)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/rest_framework/fields.py", line 538, in run_validators
validator(value)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/core/validators.py", line 325, in __call__
if self.compare(cleaned, self.limit_value):
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/core/validators.py", line 358, in compare
return a < b
TypeError: '<' not supported between instances of 'str' and 'int'
|
TypeError
|
def get_field_info(self, field):
res = super(JobTypeMetadata, self).get_field_info(field)
if field.field_name == "job_type":
res["choices"] = [choice for choice in res["choices"] if choice[0] != "scan"]
return res
|
def get_field_info(self, field):
res = super(JobTypeMetadata, self).get_field_info(field)
if field.field_name == "job_type":
index = 0
for choice in res["choices"]:
if choice[0] == "scan":
res["choices"].pop(index)
break
index += 1
return res
|
https://github.com/ansible/awx/issues/3329
|
awx_1 | 18:58:54 uwsgi.1 | 2019-02-27 18:58:54,069 ERROR django.request Internal Server Error: /api/v2/instance_groups/1/instances/
awx_1 | 18:58:54 uwsgi.1 | Traceback (most recent call last):
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/core/handlers/exception.py", line 41, in inner
awx_1 | 18:58:54 uwsgi.1 | response = get_response(request)
awx_1 | 18:58:54 uwsgi.1 | File "./awx/wsgi.py", line 72, in _legacy_get_response
awx_1 | 18:58:54 uwsgi.1 | return super(AWXWSGIHandler, self)._legacy_get_response(request)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/core/handlers/base.py", line 249, in _legacy_get_response
awx_1 | 18:58:54 uwsgi.1 | response = self._get_response(request)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/core/handlers/base.py", line 187, in _get_response
awx_1 | 18:58:54 uwsgi.1 | response = self.process_exception_by_middleware(e, request)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/core/handlers/base.py", line 185, in _get_response
awx_1 | 18:58:54 uwsgi.1 | response = wrapped_callback(request, *callback_args, **callback_kwargs)
awx_1 | 18:58:54 uwsgi.1 | File "/usr/lib64/python3.6/contextlib.py", line 52, in inner
awx_1 | 18:58:54 uwsgi.1 | return func(*args, **kwds)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
awx_1 | 18:58:54 uwsgi.1 | return view_func(*args, **kwargs)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/views/generic/base.py", line 68, in view
awx_1 | 18:58:54 uwsgi.1 | return self.dispatch(request, *args, **kwargs)
awx_1 | 18:58:54 uwsgi.1 | File "./awx/api/generics.py", line 334, in dispatch
awx_1 | 18:58:54 uwsgi.1 | return super(APIView, self).dispatch(request, *args, **kwargs)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/rest_framework/views.py", line 494, in dispatch
awx_1 | 18:58:54 uwsgi.1 | response = self.handle_exception(exc)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/rest_framework/views.py", line 454, in handle_exception
awx_1 | 18:58:54 uwsgi.1 | self.raise_uncaught_exception(exc)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/rest_framework/views.py", line 491, in dispatch
awx_1 | 18:58:54 uwsgi.1 | response = handler(request, *args, **kwargs)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/rest_framework/views.py", line 505, in options
awx_1 | 18:58:54 uwsgi.1 | data = self.metadata_class().determine_metadata(request, self)
awx_1 | 18:58:54 uwsgi.1 | File "./awx/api/metadata.py", line 198, in determine_metadata
awx_1 | 18:58:54 uwsgi.1 | metadata = super(Metadata, self).determine_metadata(request, view)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/rest_framework/metadata.py", line 69, in determine_metadata
awx_1 | 18:58:54 uwsgi.1 | actions = self.determine_actions(request, view)
awx_1 | 18:58:54 uwsgi.1 | File "./awx/api/metadata.py", line 256, in determine_actions
awx_1 | 18:58:54 uwsgi.1 | for field in actions[method]:
awx_1 | 18:58:54 uwsgi.1 | RuntimeError: OrderedDict mutated during iteration
awx_1 | 18:58:54 uwsgi.1 | 2019-02-27 18:58:54,069 ERROR django.request Internal Server Error: /api/v2/instance_groups/1/instances/
awx_1 | 18:58:54 uwsgi.1 | Traceback (most recent call last):
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/core/handlers/exception.py", line 41, in inner
awx_1 | 18:58:54 uwsgi.1 | response = get_response(request)
awx_1 | 18:58:54 uwsgi.1 | File "./awx/wsgi.py", line 72, in _legacy_get_response
awx_1 | 18:58:54 uwsgi.1 | return super(AWXWSGIHandler, self)._legacy_get_response(request)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/core/handlers/base.py", line 249, in _legacy_get_response
awx_1 | 18:58:54 uwsgi.1 | response = self._get_response(request)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/core/handlers/base.py", line 187, in _get_response
awx_1 | 18:58:54 uwsgi.1 | response = self.process_exception_by_middleware(e, request)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/core/handlers/base.py", line 185, in _get_response
awx_1 | 18:58:54 uwsgi.1 | response = wrapped_callback(request, *callback_args, **callback_kwargs)
awx_1 | 18:58:54 uwsgi.1 | File "/usr/lib64/python3.6/contextlib.py", line 52, in inner
awx_1 | 18:58:54 uwsgi.1 | return func(*args, **kwds)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
awx_1 | 18:58:54 uwsgi.1 | return view_func(*args, **kwargs)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/views/generic/base.py", line 68, in view
awx_1 | 18:58:54 uwsgi.1 | return self.dispatch(request, *args, **kwargs)
awx_1 | 18:58:54 uwsgi.1 | File "./awx/api/generics.py", line 334, in dispatch
awx_1 | 18:58:54 uwsgi.1 | return super(APIView, self).dispatch(request, *args, **kwargs)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/rest_framework/views.py", line 494, in dispatch
awx_1 | 18:58:54 uwsgi.1 | response = self.handle_exception(exc)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/rest_framework/views.py", line 454, in handle_exception
awx_1 | 18:58:54 uwsgi.1 | self.raise_uncaught_exception(exc)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/rest_framework/views.py", line 491, in dispatch
awx_1 | 18:58:54 uwsgi.1 | response = handler(request, *args, **kwargs)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/rest_framework/views.py", line 505, in options
awx_1 | 18:58:54 uwsgi.1 | data = self.metadata_class().determine_metadata(request, self)
awx_1 | 18:58:54 uwsgi.1 | File "./awx/api/metadata.py", line 198, in determine_metadata
awx_1 | 18:58:54 uwsgi.1 | metadata = super(Metadata, self).determine_metadata(request, view)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/rest_framework/metadata.py", line 69, in determine_metadata
awx_1 | 18:58:54 uwsgi.1 | actions = self.determine_actions(request, view)
awx_1 | 18:58:54 uwsgi.1 | File "./awx/api/metadata.py", line 256, in determine_actions
awx_1 | 18:58:54 uwsgi.1 | for field in actions[method]:
awx_1 | 18:58:54 uwsgi.1 | RuntimeError: OrderedDict mutated during iteration
awx_1 | 18:58:54 uwsgi.1 | 172.18.0.1 OPTIONS /api/v2/instance_groups/1/instances/ - HTTP/1.1 500
|
RuntimeError
|
def determine_actions(self, request, view):
actions = super(SublistAttachDetatchMetadata, self).determine_actions(request, view)
method = "POST"
if method in actions:
for field in list(actions[method].keys()):
if field == "id":
continue
actions[method].pop(field)
return actions
|
def determine_actions(self, request, view):
actions = super(SublistAttachDetatchMetadata, self).determine_actions(request, view)
method = "POST"
if method in actions:
for field in actions[method]:
if field == "id":
continue
actions[method].pop(field)
return actions
|
https://github.com/ansible/awx/issues/3329
|
awx_1 | 18:58:54 uwsgi.1 | 2019-02-27 18:58:54,069 ERROR django.request Internal Server Error: /api/v2/instance_groups/1/instances/
awx_1 | 18:58:54 uwsgi.1 | Traceback (most recent call last):
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/core/handlers/exception.py", line 41, in inner
awx_1 | 18:58:54 uwsgi.1 | response = get_response(request)
awx_1 | 18:58:54 uwsgi.1 | File "./awx/wsgi.py", line 72, in _legacy_get_response
awx_1 | 18:58:54 uwsgi.1 | return super(AWXWSGIHandler, self)._legacy_get_response(request)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/core/handlers/base.py", line 249, in _legacy_get_response
awx_1 | 18:58:54 uwsgi.1 | response = self._get_response(request)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/core/handlers/base.py", line 187, in _get_response
awx_1 | 18:58:54 uwsgi.1 | response = self.process_exception_by_middleware(e, request)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/core/handlers/base.py", line 185, in _get_response
awx_1 | 18:58:54 uwsgi.1 | response = wrapped_callback(request, *callback_args, **callback_kwargs)
awx_1 | 18:58:54 uwsgi.1 | File "/usr/lib64/python3.6/contextlib.py", line 52, in inner
awx_1 | 18:58:54 uwsgi.1 | return func(*args, **kwds)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
awx_1 | 18:58:54 uwsgi.1 | return view_func(*args, **kwargs)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/views/generic/base.py", line 68, in view
awx_1 | 18:58:54 uwsgi.1 | return self.dispatch(request, *args, **kwargs)
awx_1 | 18:58:54 uwsgi.1 | File "./awx/api/generics.py", line 334, in dispatch
awx_1 | 18:58:54 uwsgi.1 | return super(APIView, self).dispatch(request, *args, **kwargs)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/rest_framework/views.py", line 494, in dispatch
awx_1 | 18:58:54 uwsgi.1 | response = self.handle_exception(exc)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/rest_framework/views.py", line 454, in handle_exception
awx_1 | 18:58:54 uwsgi.1 | self.raise_uncaught_exception(exc)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/rest_framework/views.py", line 491, in dispatch
awx_1 | 18:58:54 uwsgi.1 | response = handler(request, *args, **kwargs)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/rest_framework/views.py", line 505, in options
awx_1 | 18:58:54 uwsgi.1 | data = self.metadata_class().determine_metadata(request, self)
awx_1 | 18:58:54 uwsgi.1 | File "./awx/api/metadata.py", line 198, in determine_metadata
awx_1 | 18:58:54 uwsgi.1 | metadata = super(Metadata, self).determine_metadata(request, view)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/rest_framework/metadata.py", line 69, in determine_metadata
awx_1 | 18:58:54 uwsgi.1 | actions = self.determine_actions(request, view)
awx_1 | 18:58:54 uwsgi.1 | File "./awx/api/metadata.py", line 256, in determine_actions
awx_1 | 18:58:54 uwsgi.1 | for field in actions[method]:
awx_1 | 18:58:54 uwsgi.1 | RuntimeError: OrderedDict mutated during iteration
awx_1 | 18:58:54 uwsgi.1 | 2019-02-27 18:58:54,069 ERROR django.request Internal Server Error: /api/v2/instance_groups/1/instances/
awx_1 | 18:58:54 uwsgi.1 | Traceback (most recent call last):
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/core/handlers/exception.py", line 41, in inner
awx_1 | 18:58:54 uwsgi.1 | response = get_response(request)
awx_1 | 18:58:54 uwsgi.1 | File "./awx/wsgi.py", line 72, in _legacy_get_response
awx_1 | 18:58:54 uwsgi.1 | return super(AWXWSGIHandler, self)._legacy_get_response(request)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/core/handlers/base.py", line 249, in _legacy_get_response
awx_1 | 18:58:54 uwsgi.1 | response = self._get_response(request)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/core/handlers/base.py", line 187, in _get_response
awx_1 | 18:58:54 uwsgi.1 | response = self.process_exception_by_middleware(e, request)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/core/handlers/base.py", line 185, in _get_response
awx_1 | 18:58:54 uwsgi.1 | response = wrapped_callback(request, *callback_args, **callback_kwargs)
awx_1 | 18:58:54 uwsgi.1 | File "/usr/lib64/python3.6/contextlib.py", line 52, in inner
awx_1 | 18:58:54 uwsgi.1 | return func(*args, **kwds)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
awx_1 | 18:58:54 uwsgi.1 | return view_func(*args, **kwargs)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/django/views/generic/base.py", line 68, in view
awx_1 | 18:58:54 uwsgi.1 | return self.dispatch(request, *args, **kwargs)
awx_1 | 18:58:54 uwsgi.1 | File "./awx/api/generics.py", line 334, in dispatch
awx_1 | 18:58:54 uwsgi.1 | return super(APIView, self).dispatch(request, *args, **kwargs)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/rest_framework/views.py", line 494, in dispatch
awx_1 | 18:58:54 uwsgi.1 | response = self.handle_exception(exc)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/rest_framework/views.py", line 454, in handle_exception
awx_1 | 18:58:54 uwsgi.1 | self.raise_uncaught_exception(exc)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/rest_framework/views.py", line 491, in dispatch
awx_1 | 18:58:54 uwsgi.1 | response = handler(request, *args, **kwargs)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/rest_framework/views.py", line 505, in options
awx_1 | 18:58:54 uwsgi.1 | data = self.metadata_class().determine_metadata(request, self)
awx_1 | 18:58:54 uwsgi.1 | File "./awx/api/metadata.py", line 198, in determine_metadata
awx_1 | 18:58:54 uwsgi.1 | metadata = super(Metadata, self).determine_metadata(request, view)
awx_1 | 18:58:54 uwsgi.1 | File "/venv/awx/lib64/python3.6/site-packages/rest_framework/metadata.py", line 69, in determine_metadata
awx_1 | 18:58:54 uwsgi.1 | actions = self.determine_actions(request, view)
awx_1 | 18:58:54 uwsgi.1 | File "./awx/api/metadata.py", line 256, in determine_actions
awx_1 | 18:58:54 uwsgi.1 | for field in actions[method]:
awx_1 | 18:58:54 uwsgi.1 | RuntimeError: OrderedDict mutated during iteration
awx_1 | 18:58:54 uwsgi.1 | 172.18.0.1 OPTIONS /api/v2/instance_groups/1/instances/ - HTTP/1.1 500
|
RuntimeError
|
def awx_periodic_scheduler():
with advisory_lock("awx_periodic_scheduler_lock", wait=False) as acquired:
if acquired is False:
logger.debug("Not running periodic scheduler, another task holds lock")
return
logger.debug("Starting periodic scheduler")
run_now = now()
state = TowerScheduleState.get_solo()
last_run = state.schedule_last_run
logger.debug("Last scheduler run was: %s", last_run)
state.schedule_last_run = run_now
state.save()
old_schedules = Schedule.objects.enabled().before(last_run)
for schedule in old_schedules:
schedule.save()
schedules = Schedule.objects.enabled().between(last_run, run_now)
invalid_license = False
try:
access_registry[Job](None).check_license()
except PermissionDenied as e:
invalid_license = e
for schedule in schedules:
template = schedule.unified_job_template
schedule.save() # To update next_run timestamp.
if template.cache_timeout_blocked:
logger.warn(
"Cache timeout is in the future, bypassing schedule for template %s"
% str(template.id)
)
continue
try:
job_kwargs = schedule.get_job_kwargs()
new_unified_job = schedule.unified_job_template.create_unified_job(
**job_kwargs
)
logger.info(
"Spawned {} from schedule {}-{}.".format(
new_unified_job.log_format, schedule.name, schedule.pk
)
)
if invalid_license:
new_unified_job.status = "failed"
new_unified_job.job_explanation = str(invalid_license)
new_unified_job.save(update_fields=["status", "job_explanation"])
new_unified_job.websocket_emit_status("failed")
raise invalid_license
can_start = new_unified_job.signal_start()
except Exception:
logger.exception("Error spawning scheduled job.")
continue
if not can_start:
new_unified_job.status = "failed"
new_unified_job.job_explanation = "Scheduled job could not start because it was not in the right state or required manual credentials"
new_unified_job.save(update_fields=["status", "job_explanation"])
new_unified_job.websocket_emit_status("failed")
emit_channel_notification(
"schedules-changed", dict(id=schedule.id, group_name="schedules")
)
state.save()
|
def awx_periodic_scheduler():
with advisory_lock("awx_periodic_scheduler_lock", wait=False) as acquired:
if acquired is False:
logger.debug("Not running periodic scheduler, another task holds lock")
return
logger.debug("Starting periodic scheduler")
run_now = now()
state = TowerScheduleState.get_solo()
last_run = state.schedule_last_run
logger.debug("Last scheduler run was: %s", last_run)
state.schedule_last_run = run_now
state.save()
old_schedules = Schedule.objects.enabled().before(last_run)
for schedule in old_schedules:
schedule.save()
schedules = Schedule.objects.enabled().between(last_run, run_now)
invalid_license = False
try:
access_registry[Job](None).check_license()
except PermissionDenied as e:
invalid_license = e
for schedule in schedules:
template = schedule.unified_job_template
schedule.save() # To update next_run timestamp.
if template.cache_timeout_blocked:
logger.warn(
"Cache timeout is in the future, bypassing schedule for template %s"
% str(template.id)
)
continue
try:
job_kwargs = schedule.get_job_kwargs()
new_unified_job = schedule.unified_job_template.create_unified_job(
**job_kwargs
)
logger.info(
six.text_type("Spawned {} from schedule {}-{}.").format(
new_unified_job.log_format, schedule.name, schedule.pk
)
)
if invalid_license:
new_unified_job.status = "failed"
new_unified_job.job_explanation = str(invalid_license)
new_unified_job.save(update_fields=["status", "job_explanation"])
new_unified_job.websocket_emit_status("failed")
raise invalid_license
can_start = new_unified_job.signal_start()
except Exception:
logger.exception("Error spawning scheduled job.")
continue
if not can_start:
new_unified_job.status = "failed"
new_unified_job.job_explanation = "Scheduled job could not start because it was not in the right state or required manual credentials"
new_unified_job.save(update_fields=["status", "job_explanation"])
new_unified_job.websocket_emit_status("failed")
emit_channel_notification(
"schedules-changed", dict(id=schedule.id, group_name="schedules")
)
state.save()
|
https://github.com/ansible/awx/issues/3200
|
2019-02-10 11:40:25,316 ERROR awx.main.tasks Error spawning scheduled job.
Traceback (most recent call last):
File "/tasks.py", line 471, in awx_periodic_scheduler
NameError: name 'six' is not defined
|
NameError
|
def get_path_to_ansible_inventory(self):
venv_exe = os.path.join(self.venv_path, "bin", "ansible-inventory")
if os.path.exists(venv_exe):
return venv_exe
elif os.path.exists(os.path.join(self.venv_path, "bin", "ansible")):
# if bin/ansible exists but bin/ansible-inventory doesn't, it's
# probably a really old version of ansible that doesn't support
# ansible-inventory
raise RuntimeError(
"{} does not exist (please upgrade to ansible >= 2.4)".format(venv_exe)
)
return shutil.which("ansible-inventory")
|
def get_path_to_ansible_inventory(self):
venv_exe = os.path.join(self.venv_path, "bin", "ansible-inventory")
if os.path.exists(venv_exe):
return venv_exe
return shutil.which("ansible-inventory")
|
https://github.com/ansible/awx/issues/3139
|
stdout:
6.399 INFO Updating inventory 239: Inventory - StaffSalary\ufffd
6.438 INFO Reading Ansible inventory source: /awx_devel/awx/plugins/inventory/openstack_inventory.py
6.439 INFO Using VIRTUAL_ENV: /venv/python2_ansible23/
6.439 INFO Using PATH: /venv/python2_ansible23/bin:/venv/awx/bin:/venv/awx/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
6.439 INFO Using PYTHONPATH: /venv/python2_ansible23/lib/python2.7/site-packages:
Traceback (most recent call last):
File "/usr/local/bin/awx-manage", line 9, in <module>
load_entry_point('awx', 'console_scripts', 'awx-manage')()
File "/awx_devel/awx/__init__.py", line 150, in manage
execute_from_command_line(sys.argv)
File "/venv/awx/lib64/python3.6/site-packages/django/core/management/__init__.py", line 364, in execute_from_command_line
utility.execute()
File "/venv/awx/lib64/python3.6/site-packages/django/core/management/__init__.py", line 356, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/venv/awx/lib64/python3.6/site-packages/django/core/management/base.py", line 283, in run_from_argv
self.execute(*args, **cmd_options)
File "/venv/awx/lib64/python3.6/site-packages/django/core/management/base.py", line 330, in execute
output = self.handle(*args, **options)
File "/awx_devel/awx/main/management/commands/inventory_import.py", line 1049, in handle
raise exc
File "/awx_devel/awx/main/management/commands/inventory_import.py", line 949, in handle
data = AnsibleInventoryLoader(source=source, is_custom=self.is_custom, venv_path=venv_path).load()
File "/awx_devel/awx/main/management/commands/inventory_import.py", line 194, in load
return self.command_to_json(base_args + ['--list'])
File "/awx_devel/awx/main/management/commands/inventory_import.py", line 171, in command_to_json
self.method, proc.returncode, raw_stdout, stderr))
RuntimeError: ansible-inventory failed (rc=1) with stdout:
stderr:
ERROR! Ansible sub-program not implemented: ansible-inventory
|
RuntimeError
|
def get_base_args(self):
# get ansible-inventory absolute path for running in bubblewrap/proot, in Popen
bargs = [self.get_path_to_ansible_inventory(), "-i", self.source]
logger.debug("Using base command: {}".format(" ".join(bargs)))
return bargs
|
def get_base_args(self):
# get ansible-inventory absolute path for running in bubblewrap/proot, in Popen
abs_ansible_inventory = shutil.which("ansible-inventory")
bargs = [abs_ansible_inventory, "-i", self.source]
logger.debug("Using base command: {}".format(" ".join(bargs)))
return bargs
|
https://github.com/ansible/awx/issues/3056
|
1.570 INFO Updating inventory 3: roman cluster
1.584 INFO Reading Ansible inventory source: /var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/plugins/inventory/ec2.py
1.586 INFO Using VIRTUAL_ENV: /var/lib/awx/venv/my-custom-venv/
1.586 INFO Using PATH: /var/lib/awx/venv/my-custom-venv/bin:/var/lib/awx/venv/awx/bin:/var/lib/awx/venv/awx/bin:/var/lib/awx/venv/awx/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
1.586 INFO Using PYTHONPATH: /var/lib/awx/venv/my-custom-venv/lib/python3.6/site-packages:
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/bin/awx-manage", line 11, in <module>
load_entry_point('awx==3.0.0.0', 'console_scripts', 'awx-manage')()
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/__init__.py", line 150, in manage
execute_from_command_line(sys.argv)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/core/management/__init__.py", line 364, in execute_from_command_line
utility.execute()
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/core/management/__init__.py", line 356, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/core/management/base.py", line 283, in run_from_argv
self.execute(*args, **cmd_options)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/core/management/base.py", line 330, in execute
output = self.handle(*args, **options)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/management/commands/inventory_import.py", line 1030, in handle
raise exc
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/management/commands/inventory_import.py", line 930, in handle
data = AnsibleInventoryLoader(source=source, is_custom=self.is_custom, venv_path=venv_path).load()
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/management/commands/inventory_import.py", line 176, in load
return self.command_to_json(base_args + ['--list'])
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/management/commands/inventory_import.py", line 159, in command_to_json
self.method, proc.returncode, stdout, stderr))
RuntimeError: ansible-inventory failed (rc=1) with stdout:
stderr:
Traceback (most recent call last):
File "/usr/bin/ansible-inventory", line 67, in <module>
import ansible.constants as C
File "/var/lib/awx/venv/my-custom-venv/lib/python3.6/site-packages/ansible/constants.py", line 17, in <module>
from ansible.config.manager import ConfigManager, ensure_type, get_ini_config_value
File "/var/lib/awx/venv/my-custom-venv/lib/python3.6/site-packages/ansible/config/manager.py", line 16, in <module>
from yaml import load as yaml_load
File "/var/lib/awx/venv/my-custom-venv/lib/python3.6/site-packages/yaml/__init__.py", line 284
class YAMLObject(metaclass=YAMLObjectMetaclass):
^
SyntaxError: invalid syntax
|
RuntimeError
|
def _delete_hosts(self):
"""
For each host in the database that is NOT in the local list, delete
it. When importing from a cloud inventory source attached to a
specific group, only delete hosts beneath that group. Delete each
host individually so signal handlers will run.
"""
if settings.SQL_DEBUG:
queries_before = len(connection.queries)
hosts_qs = self.inventory_source.hosts
# Build list of all host pks, remove all that should not be deleted.
del_host_pks = set(hosts_qs.values_list("pk", flat=True))
if self.instance_id_var:
all_instance_ids = list(self.mem_instance_id_map.keys())
instance_ids = []
for offset in range(0, len(all_instance_ids), self._batch_size):
instance_ids = all_instance_ids[offset : (offset + self._batch_size)]
for host_pk in hosts_qs.filter(instance_id__in=instance_ids).values_list(
"pk", flat=True
):
del_host_pks.discard(host_pk)
for host_pk in set(
[v for k, v in self.db_instance_id_map.items() if k in instance_ids]
):
del_host_pks.discard(host_pk)
all_host_names = list(
set(self.mem_instance_id_map.values())
- set(self.all_group.all_hosts.keys())
)
else:
all_host_names = list(self.all_group.all_hosts.keys())
for offset in range(0, len(all_host_names), self._batch_size):
host_names = all_host_names[offset : (offset + self._batch_size)]
for host_pk in hosts_qs.filter(name__in=host_names).values_list(
"pk", flat=True
):
del_host_pks.discard(host_pk)
# Now delete all remaining hosts in batches.
all_del_pks = sorted(list(del_host_pks))
for offset in range(0, len(all_del_pks), self._batch_size):
del_pks = all_del_pks[offset : (offset + self._batch_size)]
for host in hosts_qs.filter(pk__in=del_pks):
host_name = host.name
host.delete()
logger.debug('Deleted host "%s"', host_name)
if settings.SQL_DEBUG:
logger.warning(
"host deletions took %d queries for %d hosts",
len(connection.queries) - queries_before,
len(all_del_pks),
)
|
def _delete_hosts(self):
"""
For each host in the database that is NOT in the local list, delete
it. When importing from a cloud inventory source attached to a
specific group, only delete hosts beneath that group. Delete each
host individually so signal handlers will run.
"""
if settings.SQL_DEBUG:
queries_before = len(connection.queries)
hosts_qs = self.inventory_source.hosts
# Build list of all host pks, remove all that should not be deleted.
del_host_pks = set(hosts_qs.values_list("pk", flat=True))
if self.instance_id_var:
all_instance_ids = self.mem_instance_id_map.keys()
instance_ids = []
for offset in range(0, len(all_instance_ids), self._batch_size):
instance_ids = all_instance_ids[offset : (offset + self._batch_size)]
for host_pk in hosts_qs.filter(instance_id__in=instance_ids).values_list(
"pk", flat=True
):
del_host_pks.discard(host_pk)
for host_pk in set(
[v for k, v in self.db_instance_id_map.items() if k in instance_ids]
):
del_host_pks.discard(host_pk)
all_host_names = list(
set(self.mem_instance_id_map.values())
- set(self.all_group.all_hosts.keys())
)
else:
all_host_names = list(self.all_group.all_hosts.keys())
for offset in range(0, len(all_host_names), self._batch_size):
host_names = all_host_names[offset : (offset + self._batch_size)]
for host_pk in hosts_qs.filter(name__in=host_names).values_list(
"pk", flat=True
):
del_host_pks.discard(host_pk)
# Now delete all remaining hosts in batches.
all_del_pks = sorted(list(del_host_pks))
for offset in range(0, len(all_del_pks), self._batch_size):
del_pks = all_del_pks[offset : (offset + self._batch_size)]
for host in hosts_qs.filter(pk__in=del_pks):
host_name = host.name
host.delete()
logger.debug('Deleted host "%s"', host_name)
if settings.SQL_DEBUG:
logger.warning(
"host deletions took %d queries for %d hosts",
len(connection.queries) - queries_before,
len(all_del_pks),
)
|
https://github.com/ansible/awx/issues/3056
|
1.570 INFO Updating inventory 3: roman cluster
1.584 INFO Reading Ansible inventory source: /var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/plugins/inventory/ec2.py
1.586 INFO Using VIRTUAL_ENV: /var/lib/awx/venv/my-custom-venv/
1.586 INFO Using PATH: /var/lib/awx/venv/my-custom-venv/bin:/var/lib/awx/venv/awx/bin:/var/lib/awx/venv/awx/bin:/var/lib/awx/venv/awx/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
1.586 INFO Using PYTHONPATH: /var/lib/awx/venv/my-custom-venv/lib/python3.6/site-packages:
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/bin/awx-manage", line 11, in <module>
load_entry_point('awx==3.0.0.0', 'console_scripts', 'awx-manage')()
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/__init__.py", line 150, in manage
execute_from_command_line(sys.argv)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/core/management/__init__.py", line 364, in execute_from_command_line
utility.execute()
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/core/management/__init__.py", line 356, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/core/management/base.py", line 283, in run_from_argv
self.execute(*args, **cmd_options)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/django/core/management/base.py", line 330, in execute
output = self.handle(*args, **options)
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/management/commands/inventory_import.py", line 1030, in handle
raise exc
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/management/commands/inventory_import.py", line 930, in handle
data = AnsibleInventoryLoader(source=source, is_custom=self.is_custom, venv_path=venv_path).load()
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/management/commands/inventory_import.py", line 176, in load
return self.command_to_json(base_args + ['--list'])
File "/var/lib/awx/venv/awx/lib64/python3.6/site-packages/awx/main/management/commands/inventory_import.py", line 159, in command_to_json
self.method, proc.returncode, stdout, stderr))
RuntimeError: ansible-inventory failed (rc=1) with stdout:
stderr:
Traceback (most recent call last):
File "/usr/bin/ansible-inventory", line 67, in <module>
import ansible.constants as C
File "/var/lib/awx/venv/my-custom-venv/lib/python3.6/site-packages/ansible/constants.py", line 17, in <module>
from ansible.config.manager import ConfigManager, ensure_type, get_ini_config_value
File "/var/lib/awx/venv/my-custom-venv/lib/python3.6/site-packages/ansible/config/manager.py", line 16, in <module>
from yaml import load as yaml_load
File "/var/lib/awx/venv/my-custom-venv/lib/python3.6/site-packages/yaml/__init__.py", line 284
class YAMLObject(metaclass=YAMLObjectMetaclass):
^
SyntaxError: invalid syntax
|
RuntimeError
|
def api_exception_handler(exc, context):
"""
Override default API exception handler to catch IntegrityError exceptions.
"""
if isinstance(exc, IntegrityError):
exc = ParseError(exc.args[0])
if isinstance(exc, FieldError):
exc = ParseError(exc.args[0])
if isinstance(context["view"], UnifiedJobStdout):
context["view"].renderer_classes = [
BrowsableAPIRenderer,
renderers.JSONRenderer,
]
return exception_handler(exc, context)
|
def api_exception_handler(exc, context):
"""
Override default API exception handler to catch IntegrityError exceptions.
"""
if isinstance(exc, IntegrityError):
exc = ParseError(exc.args[0])
if isinstance(exc, FieldError):
exc = ParseError(exc.args[0])
return exception_handler(exc, context)
|
https://github.com/ansible/awx/issues/2112
|
2018-07-31 08:52:33,509 ERROR django.request Internal Server Error: /api/v2/settings/all/
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/usr/lib/python2.7/site-packages/awx/wsgi.py", line 65, in _legacy_get_response
return super(AWXWSGIHandler, self)._legacy_get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 249, in _legacy_get_response
response = self._get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/utils/decorators.py", line 185, in inner
return func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
return view_func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/generic/base.py", line 68, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/api/generics.py", line 284, in dispatch
return super(APIView, self).dispatch(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 494, in dispatch
response = self.handle_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 454, in handle_exception
self.raise_uncaught_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 491, in dispatch
response = handler(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/generics.py", line 260, in patch
return self.partial_update(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/api/generics.py", line 745, in partial_update
return super(RetrieveUpdateAPIView, self).partial_update(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/mixins.py", line 84, in partial_update
return self.update(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/api/generics.py", line 741, in update
return super(RetrieveUpdateAPIView, self).update(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/mixins.py", line 69, in update
serializer.is_valid(raise_exception=True)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 236, in is_valid
self._validated_data = self.run_validation(self.initial_data)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 435, in run_validation
value = self.to_internal_value(data)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 465, in to_internal_value
validated_value = field.run_validation(primitive_value)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/fields.py", line 523, in run_validation
value = self.to_internal_value(data)
File "/usr/lib/python2.7/site-packages/awx/conf/serializers.py", line 57, in to_internal_value
obj = super(SettingFieldMixin, self).to_internal_value(value)
File "/usr/lib/python2.7/site-packages/awx/sso/fields.py", line 401, in to_internal_value
return cls(**params_sanitized)
TypeError: __init__() takes at least 2 arguments (2 given)
|
TypeError
|
def run_pexpect(
args,
cwd,
env,
logfile,
cancelled_callback=None,
expect_passwords={},
extra_update_fields=None,
idle_timeout=None,
job_timeout=0,
pexpect_timeout=5,
proot_cmd="bwrap",
):
"""
Run the given command using pexpect to capture output and provide
passwords when requested.
:param args: a list of `subprocess.call`-style arguments
representing a subprocess e.g., ['ls', '-la']
:param cwd: the directory in which the subprocess should
run
:param env: a dict containing environment variables for the
subprocess, ala `os.environ`
:param logfile: a file-like object for capturing stdout
:param cancelled_callback: a callable - which returns `True` or `False`
- signifying if the job has been prematurely
cancelled
:param expect_passwords: a dict of regular expression password prompts
to input values, i.e., {r'Password:\s*?$':
'some_password'}
:param extra_update_fields: a dict used to specify DB fields which should
be updated on the underlying model
object after execution completes
:param idle_timeout a timeout (in seconds); if new output is not
sent to stdout in this interval, the process
will be terminated
:param job_timeout a timeout (in seconds); if the total job runtime
exceeds this, the process will be killed
:param pexpect_timeout a timeout (in seconds) to wait on
`pexpect.spawn().expect()` calls
:param proot_cmd the command used to isolate processes, `bwrap`
Returns a tuple (status, return_code) i.e., `('successful', 0)`
"""
expect_passwords[pexpect.TIMEOUT] = None
expect_passwords[pexpect.EOF] = None
if not isinstance(expect_passwords, collections.OrderedDict):
# We iterate over `expect_passwords.keys()` and
# `expect_passwords.values()` separately to map matched inputs to
# patterns and choose the proper string to send to the subprocess;
# enforce usage of an OrderedDict so that the ordering of elements in
# `keys()` matches `values()`.
expect_passwords = collections.OrderedDict(expect_passwords)
password_patterns = expect_passwords.keys()
password_values = expect_passwords.values()
child = pexpect.spawn(
args[0],
args[1:],
cwd=cwd,
env=env,
ignore_sighup=True,
encoding="utf-8",
echo=False,
use_poll=True,
)
child.logfile_read = logfile
canceled = False
timed_out = False
errored = False
last_stdout_update = time.time()
job_start = time.time()
while child.isalive():
result_id = child.expect(
password_patterns, timeout=pexpect_timeout, searchwindowsize=100
)
password = password_values[result_id]
if password is not None:
child.sendline(password)
last_stdout_update = time.time()
if cancelled_callback:
try:
canceled = cancelled_callback()
except Exception:
logger.exception(
"Could not check cancel callback - canceling immediately"
)
if isinstance(extra_update_fields, dict):
extra_update_fields["job_explanation"] = (
"System error during job execution, check system logs"
)
errored = True
else:
canceled = False
if (
not canceled
and job_timeout != 0
and (time.time() - job_start) > job_timeout
):
timed_out = True
if isinstance(extra_update_fields, dict):
extra_update_fields["job_explanation"] = "Job terminated due to timeout"
if canceled or timed_out or errored:
handle_termination(child.pid, child.args, proot_cmd, is_cancel=canceled)
if idle_timeout and (time.time() - last_stdout_update) > idle_timeout:
child.close(True)
canceled = True
if errored:
return "error", child.exitstatus
elif canceled:
return "canceled", child.exitstatus
elif child.exitstatus == 0 and not timed_out:
return "successful", child.exitstatus
else:
return "failed", child.exitstatus
|
def run_pexpect(
args,
cwd,
env,
logfile,
cancelled_callback=None,
expect_passwords={},
extra_update_fields=None,
idle_timeout=None,
job_timeout=0,
pexpect_timeout=5,
proot_cmd="bwrap",
):
"""
Run the given command using pexpect to capture output and provide
passwords when requested.
:param args: a list of `subprocess.call`-style arguments
representing a subprocess e.g., ['ls', '-la']
:param cwd: the directory in which the subprocess should
run
:param env: a dict containing environment variables for the
subprocess, ala `os.environ`
:param logfile: a file-like object for capturing stdout
:param cancelled_callback: a callable - which returns `True` or `False`
- signifying if the job has been prematurely
cancelled
:param expect_passwords: a dict of regular expression password prompts
to input values, i.e., {r'Password:\s*?$':
'some_password'}
:param extra_update_fields: a dict used to specify DB fields which should
be updated on the underlying model
object after execution completes
:param idle_timeout a timeout (in seconds); if new output is not
sent to stdout in this interval, the process
will be terminated
:param job_timeout a timeout (in seconds); if the total job runtime
exceeds this, the process will be killed
:param pexpect_timeout a timeout (in seconds) to wait on
`pexpect.spawn().expect()` calls
:param proot_cmd the command used to isolate processes, `bwrap`
Returns a tuple (status, return_code) i.e., `('successful', 0)`
"""
expect_passwords[pexpect.TIMEOUT] = None
expect_passwords[pexpect.EOF] = None
if not isinstance(expect_passwords, collections.OrderedDict):
# We iterate over `expect_passwords.keys()` and
# `expect_passwords.values()` separately to map matched inputs to
# patterns and choose the proper string to send to the subprocess;
# enforce usage of an OrderedDict so that the ordering of elements in
# `keys()` matches `values()`.
expect_passwords = collections.OrderedDict(expect_passwords)
password_patterns = expect_passwords.keys()
password_values = expect_passwords.values()
child = pexpect.spawn(
args[0],
args[1:],
cwd=cwd,
env=env,
ignore_sighup=True,
encoding="utf-8",
echo=False,
poll=True,
)
child.logfile_read = logfile
canceled = False
timed_out = False
errored = False
last_stdout_update = time.time()
job_start = time.time()
while child.isalive():
result_id = child.expect(
password_patterns, timeout=pexpect_timeout, searchwindowsize=100
)
password = password_values[result_id]
if password is not None:
child.sendline(password)
last_stdout_update = time.time()
if cancelled_callback:
try:
canceled = cancelled_callback()
except Exception:
logger.exception(
"Could not check cancel callback - canceling immediately"
)
if isinstance(extra_update_fields, dict):
extra_update_fields["job_explanation"] = (
"System error during job execution, check system logs"
)
errored = True
else:
canceled = False
if (
not canceled
and job_timeout != 0
and (time.time() - job_start) > job_timeout
):
timed_out = True
if isinstance(extra_update_fields, dict):
extra_update_fields["job_explanation"] = "Job terminated due to timeout"
if canceled or timed_out or errored:
handle_termination(child.pid, child.args, proot_cmd, is_cancel=canceled)
if idle_timeout and (time.time() - last_stdout_update) > idle_timeout:
child.close(True)
canceled = True
if errored:
return "error", child.exitstatus
elif canceled:
return "canceled", child.exitstatus
elif child.exitstatus == 0 and not timed_out:
return "successful", child.exitstatus
else:
return "failed", child.exitstatus
|
https://github.com/ansible/awx/issues/1972
|
2018-06-08T09:44:25.203865658Z Using /etc/ansible/ansible.cfg as config file
2018-06-08T09:44:25.924522979Z 127.0.0.1 | SUCCESS => {
2018-06-08T09:44:25.924550559Z "changed": false,
2018-06-08T09:44:25.924554135Z "elapsed": 0,
2018-06-08T09:44:25.924557026Z "path": null,
2018-06-08T09:44:25.924559897Z "port": 5432,
2018-06-08T09:44:25.924562551Z "search_regex": null,
2018-06-08T09:44:25.924565301Z "state": "started"
2018-06-08T09:44:25.924568201Z }
2018-06-08T09:44:26.233707860Z Using /etc/ansible/ansible.cfg as config file
2018-06-08T09:44:26.922357723Z 127.0.0.1 | SUCCESS => {
2018-06-08T09:44:26.922393193Z "changed": false,
2018-06-08T09:44:26.922397506Z "elapsed": 0,
2018-06-08T09:44:26.922400662Z "path": null,
2018-06-08T09:44:26.922403883Z "port": 11211,
2018-06-08T09:44:26.922406969Z "search_regex": null,
2018-06-08T09:44:26.922410214Z "state": "started"
2018-06-08T09:44:26.922413438Z }
2018-06-08T09:44:27.339105885Z Using /etc/ansible/ansible.cfg as config file
2018-06-08T09:44:28.131366751Z 127.0.0.1 | SUCCESS => {
2018-06-08T09:44:28.131407560Z "changed": false,
2018-06-08T09:44:28.131416179Z "elapsed": 0,
2018-06-08T09:44:28.131421344Z "path": null,
2018-06-08T09:44:28.131426416Z "port": 5672,
2018-06-08T09:44:28.131431649Z "search_regex": null,
2018-06-08T09:44:28.131436075Z "state": "started"
2018-06-08T09:44:28.131440581Z }
2018-06-08T09:44:28.516278745Z Using /etc/ansible/ansible.cfg as config file
2018-06-08T09:44:29.410034950Z 127.0.0.1 | SUCCESS => {
2018-06-08T09:44:29.410088710Z "changed": true,
2018-06-08T09:44:29.410094849Z "db": "awx"
2018-06-08T09:44:29.410098297Z }
2018-06-08T09:44:32.007342016Z Operations to perform:
2018-06-08T09:44:32.007383063Z Apply all migrations: auth, conf, contenttypes, djcelery, main, network_ui, oauth2_provider, sessions, sites, social_django, sso, taggit
2018-06-08T09:44:32.007390575Z Running migrations:
2018-06-08T09:44:32.024935105Z Applying contenttypes.0001_initial... OK
2018-06-08T09:44:32.042836650Z Applying contenttypes.0002_remove_content_type_name... OK
2018-06-08T09:44:32.127353303Z Applying auth.0001_initial... OK
2018-06-08T09:44:32.145135123Z Applying auth.0002_alter_permission_name_max_length... OK
2018-06-08T09:44:32.165300076Z Applying auth.0003_alter_user_email_max_length... OK
2018-06-08T09:44:32.178506403Z Applying auth.0004_alter_user_username_opts... OK
2018-06-08T09:44:32.198106968Z Applying auth.0005_alter_user_last_login_null... OK
2018-06-08T09:44:32.203816548Z Applying auth.0006_require_contenttypes_0002... OK
2018-06-08T09:44:32.218492670Z Applying auth.0007_alter_validators_add_error_messages... OK
2018-06-08T09:44:32.234294362Z Applying auth.0008_alter_user_username_max_length... OK
2018-06-08T09:44:32.285106264Z Applying taggit.0001_initial... OK
2018-06-08T09:44:32.302759193Z Applying taggit.0002_auto_20150616_2121... OK
2018-06-08T09:44:44.838536570Z Applying main.0001_initial... OK
2018-06-08T09:45:05.662400582Z Applying main.0002_squashed_v300_release... OK
2018-06-08T09:45:10.106556771Z Applying main.0003_squashed_v300_v303_updates... OK
2018-06-08T09:45:35.586672374Z Applying main.0004_squashed_v310_release... OK
2018-06-08T09:45:35.807040763Z Applying conf.0001_initial... OK
2018-06-08T09:45:36.001122421Z Applying conf.0002_v310_copy_tower_settings... OK
2018-06-08T09:45:36.137892279Z Applying conf.0003_v310_JSONField_changes... OK
2018-06-08T09:45:36.366005723Z Applying conf.0004_v320_reencrypt... OK
2018-06-08T09:45:36.595713184Z Applying djcelery.0001_initial... OK
2018-06-08T09:45:36.626829028Z Applying sessions.0001_initial... OK
2018-06-08T09:45:38.322220008Z Applying main.0005_squashed_v310_v313_updates... OK
2018-06-08T09:46:02.372829148Z Applying main.0006_v320_release... OK
2018-06-08T09:46:02.952111866Z 2018-06-08 09:46:02,951 DEBUG awx.main.migrations Removing all Rackspace InventorySource from database.
2018-06-08T09:46:03.436861105Z 2018-06-08 09:46:03,436 DEBUG awx.main.migrations Removing all Azure Credentials from database.
2018-06-08T09:46:03.693091536Z 2018-06-08 09:46:03,692 DEBUG awx.main.migrations Removing all Azure InventorySource from database.
2018-06-08T09:46:03.930165070Z 2018-06-08 09:46:03,929 DEBUG awx.main.migrations Removing all InventorySource that have no link to an Inventory from database.
2018-06-08T09:46:05.695018847Z 2018-06-08 09:46:05,694 DEBUG awx.main.models.credential adding Machine credential type
2018-06-08T09:46:05.698944841Z 2018-06-08 09:46:05,698 DEBUG awx.main.models.credential adding Source Control credential type
2018-06-08T09:46:05.703267352Z 2018-06-08 09:46:05,702 DEBUG awx.main.models.credential adding Vault credential type
2018-06-08T09:46:05.708868367Z 2018-06-08 09:46:05,708 DEBUG awx.main.models.credential adding Network credential type
2018-06-08T09:46:05.714614390Z 2018-06-08 09:46:05,714 DEBUG awx.main.models.credential adding Amazon Web Services credential type
2018-06-08T09:46:05.723113588Z 2018-06-08 09:46:05,722 DEBUG awx.main.models.credential adding OpenStack credential type
2018-06-08T09:46:05.729460063Z 2018-06-08 09:46:05,729 DEBUG awx.main.models.credential adding VMware vCenter credential type
2018-06-08T09:46:05.735112784Z 2018-06-08 09:46:05,734 DEBUG awx.main.models.credential adding Red Hat Satellite 6 credential type
2018-06-08T09:46:05.740621522Z 2018-06-08 09:46:05,740 DEBUG awx.main.models.credential adding Red Hat CloudForms credential type
2018-06-08T09:46:05.747086747Z 2018-06-08 09:46:05,746 DEBUG awx.main.models.credential adding Google Compute Engine credential type
2018-06-08T09:46:05.752800505Z 2018-06-08 09:46:05,752 DEBUG awx.main.models.credential adding Microsoft Azure Resource Manager credential type
2018-06-08T09:46:05.757823326Z 2018-06-08 09:46:05,757 DEBUG awx.main.models.credential adding Insights credential type
2018-06-08T09:46:05.763457411Z 2018-06-08 09:46:05,763 DEBUG awx.main.models.credential adding Red Hat Virtualization credential type
2018-06-08T09:46:05.768385153Z 2018-06-08 09:46:05,768 DEBUG awx.main.models.credential adding Ansible Tower credential type
2018-06-08T09:46:06.008435544Z Applying main.0007_v320_data_migrations... OK
2018-06-08T09:46:12.933848696Z Applying main.0008_v320_drop_v1_credential_fields... OK
2018-06-08T09:46:13.232944928Z Applying main.0009_v322_add_setting_field_for_activity_stream... OK
2018-06-08T09:46:14.896590550Z Applying main.0010_v322_add_ovirt4_tower_inventory... OK
2018-06-08T09:46:15.322624979Z Applying main.0011_v322_encrypt_survey_passwords... OK
2018-06-08T09:46:15.602059828Z Applying main.0012_v322_update_cred_types... OK
2018-06-08T09:46:18.891145025Z Applying main.0013_v330_multi_credential... OK
2018-06-08T09:46:25.282022384Z Applying main.0014_v330_saved_launchtime_configs... OK
2018-06-08T09:46:25.891994173Z Applying main.0015_v330_blank_start_args... OK
2018-06-08T09:46:27.024002808Z Applying main.0016_v330_non_blank_workflow... OK
2018-06-08T09:46:27.482027189Z Applying main.0017_v330_move_deprecated_stdout... OK
2018-06-08T09:46:28.489212584Z Applying main.0018_v330_add_additional_stdout_events... OK
2018-06-08T09:46:29.090407324Z Applying main.0019_v330_custom_virtualenv... OK
2018-06-08T09:46:29.623309143Z Applying main.0020_v330_instancegroup_policies... OK
2018-06-08T09:46:34.225422014Z Applying main.0021_v330_declare_new_rbac_roles... OK
2018-06-08T09:46:34.969383223Z Applying main.0022_v330_create_new_rbac_roles... OK
2018-06-08T09:46:37.280314382Z Applying main.0023_v330_inventory_multicred... OK
2018-06-08T09:46:37.496984889Z Applying main.0024_v330_create_user_session_membership... OK
2018-06-08T09:46:38.801332606Z Applying main.0025_v330_add_oauth_activity_stream_registrar... OK
2018-06-08T09:46:39.122608577Z Applying main.0026_v330_delete_authtoken... OK
2018-06-08T09:46:39.421801004Z Applying main.0027_v330_emitted_events... OK
2018-06-08T09:46:39.791118027Z Applying main.0028_v330_add_tower_verify... OK
2018-06-08T09:46:40.203292440Z Applying main.0030_v330_modify_application... OK
2018-06-08T09:46:40.576987689Z Applying main.0031_v330_encrypt_oauth2_secret... OK
2018-06-08T09:46:41.027046883Z Applying main.0032_v330_polymorphic_delete... OK
2018-06-08T09:46:43.555485198Z Applying main.0033_v330_oauth_help_text... OK
2018-06-08T09:46:44.503618481Z 2018-06-08 09:46:44,503 INFO rbac_migrations Computing role roots..
2018-06-08T09:46:44.504439240Z 2018-06-08 09:46:44,504 INFO rbac_migrations Found 0 roots in 0.000193 seconds, rebuilding ancestry map
2018-06-08T09:46:44.504553948Z 2018-06-08 09:46:44,504 INFO rbac_migrations Rebuild completed in 0.000007 seconds
2018-06-08T09:46:44.504567198Z 2018-06-08 09:46:44,504 INFO rbac_migrations Done.
2018-06-08T09:46:44.510720538Z Applying main.0034_v330_delete_user_role... OK
2018-06-08T09:46:44.726854041Z Applying main.0035_v330_more_oauth2_help_text... OK
2018-06-08T09:46:45.014445988Z Applying main.0036_v330_credtype_remove_become_methods... OK
2018-06-08T09:46:45.430984415Z Applying main.0037_v330_remove_legacy_fact_cleanup... OK
2018-06-08T09:46:48.512977506Z Applying network_ui.0001_initial... OK
2018-06-08T09:46:49.792390169Z Applying oauth2_provider.0001_initial... OK
2018-06-08T09:46:50.521797046Z Applying oauth2_provider.0002_08_updates... OK
2018-06-08T09:46:50.777808169Z Applying oauth2_provider.0003_auto_20160316_1503... OK
2018-06-08T09:46:51.364219303Z Applying oauth2_provider.0004_auto_20160525_1623... OK
2018-06-08T09:46:55.938329130Z Applying oauth2_provider.0005_auto_20170514_1141... OK
2018-06-08T09:46:55.956703684Z Applying sites.0001_initial... OK
2018-06-08T09:46:55.980176273Z Applying sites.0002_alter_domain_unique... OK
2018-06-08T09:46:56.550988518Z Applying social_django.0001_initial... OK
2018-06-08T09:46:56.897441029Z Applying social_django.0002_add_related_name... OK
2018-06-08T09:46:56.928890998Z Applying social_django.0003_alter_email_max_length... OK
2018-06-08T09:46:57.273418609Z Applying social_django.0004_auto_20160423_0400... OK
2018-06-08T09:46:57.325622661Z Applying social_django.0005_auto_20160727_2333... OK
2018-06-08T09:46:57.396781204Z Applying social_django.0006_partial... OK
2018-06-08T09:46:57.446589275Z Applying social_django.0007_code_timestamp... OK
2018-06-08T09:46:57.489062867Z Applying social_django.0008_partial_timestamp... OK
2018-06-08T09:46:58.327336660Z Applying sso.0001_initial... OK
2018-06-08T09:47:00.812236111Z Applying sso.0002_expand_provider_options... OK
2018-06-08T09:47:07.528082784Z Traceback (most recent call last):
2018-06-08T09:47:07.528235426Z File "/usr/bin/awx-manage", line 9, in <module>
2018-06-08T09:47:07.531646889Z load_entry_point('awx==1.0.6.15', 'console_scripts', 'awx-manage')()
2018-06-08T09:47:07.531681489Z File "/usr/lib/python2.7/site-packages/awx/__init__.py", line 109, in manage
2018-06-08T09:47:07.531820552Z execute_from_command_line(sys.argv)
2018-06-08T09:47:07.531849077Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/__init__.py", line 364, in execute_from_command_line
2018-06-08T09:47:07.532838889Z utility.execute()
2018-06-08T09:47:07.532860930Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/__init__.py", line 356, in execute
2018-06-08T09:47:07.532868631Z self.fetch_command(subcommand).run_from_argv(self.argv)
2018-06-08T09:47:07.532875206Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/base.py", line 283, in run_from_argv
2018-06-08T09:47:07.532881899Z self.execute(*args, **cmd_options)
2018-06-08T09:47:07.532888291Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/base.py", line 330, in execute
2018-06-08T09:47:07.532895159Z output = self.handle(*args, **options)
2018-06-08T09:47:07.532901461Z File "/usr/lib/python2.7/site-packages/awx/main/management/commands/create_preload_data.py", line 47, in handle
2018-06-08T09:47:07.532927140Z created_by=superuser)
2018-06-08T09:47:07.532933858Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/manager.py", line 85, in manager_method
2018-06-08T09:47:07.532940729Z return getattr(self.get_queryset(), name)(*args, **kwargs)
2018-06-08T09:47:07.532947074Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/query.py", line 394, in create
2018-06-08T09:47:07.532953727Z obj.save(force_insert=True, using=self.db)
2018-06-08T09:47:07.532960099Z File "/usr/lib/python2.7/site-packages/awx/main/models/inventory.py", line 700, in save
2018-06-08T09:47:07.532966787Z super(Host, self).save(*args, **kwargs)
2018-06-08T09:47:07.532973006Z File "/usr/lib/python2.7/site-packages/awx/main/models/base.py", line 275, in save
2018-06-08T09:47:07.532979506Z super(PrimordialModel, self).save(*args, **kwargs)
2018-06-08T09:47:07.532985715Z File "/usr/lib/python2.7/site-packages/awx/main/models/base.py", line 164, in save
2018-06-08T09:47:07.533016667Z super(CreatedModifiedModel, self).save(*args, **kwargs)
2018-06-08T09:47:07.533023094Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/base.py", line 808, in save
2018-06-08T09:47:07.533029752Z force_update=force_update, update_fields=update_fields)
2018-06-08T09:47:07.533036137Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/base.py", line 848, in save_base
2018-06-08T09:47:07.533042899Z update_fields=update_fields, raw=raw, using=using,
2018-06-08T09:47:07.533049199Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/dispatch/dispatcher.py", line 193, in send
2018-06-08T09:47:07.533055855Z for receiver in self._live_receivers(sender)
2018-06-08T09:47:07.533062097Z File "/usr/lib/python2.7/site-packages/awx/main/signals.py", line 146, in emit_update_inventory_on_created_or_deleted
2018-06-08T09:47:07.533101465Z update_inventory_computed_fields.delay(inventory.id, True)
2018-06-08T09:47:07.533109814Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/task.py", line 461, in delay
2018-06-08T09:47:07.533116632Z return self.apply_async(args, kwargs)
2018-06-08T09:47:07.533122797Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/task.py", line 573, in apply_async
2018-06-08T09:47:07.533129389Z **dict(self._get_exec_options(), **options)
2018-06-08T09:47:07.533135549Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/base.py", line 345, in send_task
2018-06-08T09:47:07.533142214Z options = router.route(options, name, args, kwargs)
2018-06-08T09:47:07.533165187Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/routes.py", line 50, in route
2018-06-08T09:47:07.533173296Z route = self.lookup_route(task, args, kwargs)
2018-06-08T09:47:07.533179610Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/routes.py", line 78, in lookup_route
2018-06-08T09:47:07.533186326Z return _first_route(self.routes, task, args, kwargs)
2018-06-08T09:47:07.533192567Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/utils/functional.py", line 233, in _matcher
2018-06-08T09:47:07.533199323Z answer = getattr(maybe_evaluate(obj), method)(*args, **kwargs)
2018-06-08T09:47:07.533205798Z File "/usr/lib/python2.7/site-packages/awx/main/utils/ha.py", line 58, in route_for_task
2018-06-08T09:47:07.533212571Z (changed, instance) = Instance.objects.get_or_register()
2018-06-08T09:47:07.533218890Z File "/usr/lib/python2.7/site-packages/awx/main/managers.py", line 106, in get_or_register
2018-06-08T09:47:07.533225500Z return (False, self.me())
2018-06-08T09:47:07.533246715Z File "/usr/lib/python2.7/site-packages/awx/main/managers.py", line 88, in me
2018-06-08T09:47:07.533254282Z raise RuntimeError("No instance found with the current cluster host id")
2018-06-08T09:47:07.533261053Z RuntimeError: No instance found with the current cluster host id
2018-06-08T09:55:56.011873319Z Traceback (most recent call last):
2018-06-08T09:55:56.011942768Z File "/usr/bin/awx-manage", line 9, in <module>
2018-06-08T09:55:56.012043641Z load_entry_point('awx==1.0.6.15', 'console_scripts', 'awx-manage')()
2018-06-08T09:55:56.012053201Z File "/usr/lib/python2.7/site-packages/awx/__init__.py", line 109, in manage
2018-06-08T09:55:56.012056860Z execute_from_command_line(sys.argv)
2018-06-08T09:55:56.012060093Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/__init__.py", line 364, in execute_from_command_line
2018-06-08T09:55:56.012064075Z utility.execute()
2018-06-08T09:55:56.012067205Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/__init__.py", line 356, in execute
2018-06-08T09:55:56.012070604Z self.fetch_command(subcommand).run_from_argv(self.argv)
2018-06-08T09:55:56.012094764Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/base.py", line 283, in run_from_argv
2018-06-08T09:55:56.012098928Z self.execute(*args, **cmd_options)
2018-06-08T09:55:56.012101661Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/base.py", line 330, in execute
2018-06-08T09:55:56.012104689Z output = self.handle(*args, **options)
2018-06-08T09:55:56.012107380Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/utils/decorators.py", line 185, in inner
2018-06-08T09:55:56.012110360Z return func(*args, **kwargs)
2018-06-08T09:55:56.012113064Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/transaction.py", line 267, in __exit__
2018-06-08T09:55:56.012116028Z connection.set_autocommit(True)
2018-06-08T09:55:56.012118649Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/backends/base/base.py", line 416, in set_autocommit
2018-06-08T09:55:56.012121560Z self.run_and_clear_commit_hooks()
2018-06-08T09:55:56.012124162Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/backends/base/base.py", line 651, in run_and_clear_commit_hooks
2018-06-08T09:55:56.012127408Z func()
2018-06-08T09:55:56.012130072Z File "/usr/lib/python2.7/site-packages/awx/main/models/ha.py", line 203, in <lambda>
2018-06-08T09:55:56.012133118Z connection.on_commit(lambda: apply_cluster_membership_policies.apply_async())
2018-06-08T09:55:56.012135972Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/task.py", line 573, in apply_async
2018-06-08T09:55:56.012138771Z **dict(self._get_exec_options(), **options)
2018-06-08T09:55:56.012141442Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/base.py", line 354, in send_task
2018-06-08T09:55:56.012148106Z reply_to=reply_to or self.oid, **options
2018-06-08T09:55:56.012180832Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/amqp.py", line 310, in publish_task
2018-06-08T09:55:56.012185276Z **kwargs
2018-06-08T09:55:56.012187984Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/messaging.py", line 172, in publish
2018-06-08T09:55:56.012190838Z routing_key, mandatory, immediate, exchange, declare)
2018-06-08T09:55:56.012193618Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/connection.py", line 470, in _ensured
2018-06-08T09:55:56.012196500Z interval_max)
2018-06-08T09:55:56.012199141Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/connection.py", line 382, in ensure_connection
2018-06-08T09:55:56.012201985Z interval_start, interval_step, interval_max, callback)
2018-06-08T09:55:56.012204674Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/utils/__init__.py", line 246, in retry_over_time
2018-06-08T09:55:56.012207561Z return fun(*args, **kwargs)
2018-06-08T09:55:56.012214728Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/connection.py", line 250, in connect
2018-06-08T09:55:56.012217666Z return self.connection
2018-06-08T09:55:56.012220329Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/connection.py", line 756, in connection
2018-06-08T09:55:56.012223211Z self._connection = self._establish_connection()
2018-06-08T09:55:56.012225834Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/connection.py", line 711, in _establish_connection
2018-06-08T09:55:56.012242588Z conn = self.transport.establish_connection()
2018-06-08T09:55:56.012245343Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/transport/pyamqp.py", line 116, in establish_connection
2018-06-08T09:55:56.012248270Z conn = self.Connection(**opts)
2018-06-08T09:55:56.012251177Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/connection.py", line 171, in __init__
2018-06-08T09:55:56.012254081Z (10, 10), # start
2018-06-08T09:55:56.012256719Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/abstract_channel.py", line 67, in wait
2018-06-08T09:55:56.012259573Z self.channel_id, allowed_methods, timeout)
2018-06-08T09:55:56.012262271Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/connection.py", line 241, in _wait_method
2018-06-08T09:55:56.012265144Z channel, method_sig, args, content = read_timeout(timeout)
2018-06-08T09:55:56.012267871Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/connection.py", line 330, in read_timeout
2018-06-08T09:55:56.012270742Z return self.method_reader.read_method()
2018-06-08T09:55:56.012273435Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/method_framing.py", line 189, in read_method
2018-06-08T09:55:56.012276280Z raise m
2018-06-08T09:55:56.012278888Z socket.error: [Errno 104] Connection reset by peer
2018-06-08T09:55:56.011802960Z Successfully registered instance awx
2018-06-08T09:55:56.012305738Z (changed: True)
2018-06-08T10:04:42.857335051Z Traceback (most recent call last):
2018-06-08T10:04:42.857722102Z File "/usr/bin/awx-manage", line 9, in <module>
2018-06-08T10:04:42.857739150Z load_entry_point('awx==1.0.6.15', 'console_scripts', 'awx-manage')()
2018-06-08T10:04:42.857746411Z File "/usr/lib/python2.7/site-packages/awx/__init__.py", line 109, in manage
2018-06-08T10:04:42.857864449Z execute_from_command_line(sys.argv)
2018-06-08T10:04:42.857881740Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/__init__.py", line 364, in execute_from_command_line
2018-06-08T10:04:42.857913422Z utility.execute()
2018-06-08T10:04:42.857921399Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/__init__.py", line 356, in execute
2018-06-08T10:04:42.863630450Z self.fetch_command(subcommand).run_from_argv(self.argv)
2018-06-08T10:04:42.863685784Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/base.py", line 283, in run_from_argv
2018-06-08T10:04:42.863695522Z self.execute(*args, **cmd_options)
2018-06-08T10:04:42.863700252Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/base.py", line 330, in execute
2018-06-08T10:04:42.863838268Z output = self.handle(*args, **options)
2018-06-08T10:04:42.863851632Z File "/usr/lib/python2.7/site-packages/awx/main/management/commands/register_queue.py", line 100, in handle
2018-06-08T10:04:42.863857055Z (ig, created, changed) = self.get_create_update_instance_group(queuename, inst_per, inst_min)
2018-06-08T10:04:42.863879403Z File "/usr/lib/python2.7/site-packages/awx/main/management/commands/register_queue.py", line 39, in get_create_update_instance_group
2018-06-08T10:04:42.863884889Z (ig, created) = InstanceGroup.objects.get_or_create(name=queuename)
2018-06-08T10:04:42.863889619Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/manager.py", line 85, in manager_method
2018-06-08T10:04:42.863894821Z return getattr(self.get_queryset(), name)(*args, **kwargs)
2018-06-08T10:04:42.863899594Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/query.py", line 466, in get_or_create
2018-06-08T10:04:42.863904641Z return self._create_object_from_params(lookup, params)
2018-06-08T10:04:42.863909184Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/models/query.py", line 498, in _create_object_from_params
2018-06-08T10:04:42.863914194Z obj = self.create(**params)
2018-06-08T10:04:42.863918696Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/transaction.py", line 267, in __exit__
2018-06-08T10:04:42.863923604Z connection.set_autocommit(True)
2018-06-08T10:04:42.863928075Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/backends/base/base.py", line 416, in set_autocommit
2018-06-08T10:04:42.863933009Z self.run_and_clear_commit_hooks()
2018-06-08T10:04:42.863937458Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/db/backends/base/base.py", line 651, in run_and_clear_commit_hooks
2018-06-08T10:04:42.863953420Z func()
2018-06-08T10:04:42.863958258Z File "/usr/lib/python2.7/site-packages/awx/main/models/ha.py", line 196, in <lambda>
2018-06-08T10:04:42.863963817Z connection.on_commit(lambda: apply_cluster_membership_policies.apply_async())
2018-06-08T10:04:42.863968716Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/task.py", line 573, in apply_async
2018-06-08T10:04:42.863973759Z **dict(self._get_exec_options(), **options)
2018-06-08T10:04:42.863978575Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/base.py", line 354, in send_task
2018-06-08T10:04:42.863983333Z reply_to=reply_to or self.oid, **options
2018-06-08T10:04:42.864002311Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/amqp.py", line 310, in publish_task
2018-06-08T10:04:42.864007479Z **kwargs
2018-06-08T10:04:42.864012925Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/messaging.py", line 172, in publish
2018-06-08T10:04:42.864029213Z routing_key, mandatory, immediate, exchange, declare)
2018-06-08T10:04:42.864034611Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/connection.py", line 470, in _ensured
2018-06-08T10:04:42.864039363Z interval_max)
2018-06-08T10:04:42.864043798Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/connection.py", line 382, in ensure_connection
2018-06-08T10:04:42.864048578Z interval_start, interval_step, interval_max, callback)
2018-06-08T10:04:42.864052712Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/utils/__init__.py", line 246, in retry_over_time
2018-06-08T10:04:42.864057143Z return fun(*args, **kwargs)
2018-06-08T10:04:42.864061366Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/connection.py", line 250, in connect
2018-06-08T10:04:42.864066013Z return self.connection
2018-06-08T10:04:42.864070517Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/connection.py", line 756, in connection
2018-06-08T10:04:42.864074967Z self._connection = self._establish_connection()
2018-06-08T10:04:42.864079252Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/connection.py", line 711, in _establish_connection
2018-06-08T10:04:42.864083777Z conn = self.transport.establish_connection()
2018-06-08T10:04:42.864088127Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/transport/pyamqp.py", line 116, in establish_connection
2018-06-08T10:04:42.864092776Z conn = self.Connection(**opts)
2018-06-08T10:04:42.864115176Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/connection.py", line 171, in __init__
2018-06-08T10:04:42.864120796Z (10, 10), # start
2018-06-08T10:04:42.864125154Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/abstract_channel.py", line 67, in wait
2018-06-08T10:04:42.864129491Z self.channel_id, allowed_methods, timeout)
2018-06-08T10:04:42.864133925Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/connection.py", line 241, in _wait_method
2018-06-08T10:04:42.864138347Z channel, method_sig, args, content = read_timeout(timeout)
2018-06-08T10:04:42.864142641Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/connection.py", line 330, in read_timeout
2018-06-08T10:04:42.864147139Z return self.method_reader.read_method()
2018-06-08T10:04:42.864151820Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/method_framing.py", line 189, in read_method
2018-06-08T10:04:42.864156287Z raise m
2018-06-08T10:04:42.864160393Z socket.error: [Errno 104] Connection reset by peer
2018-06-08T10:04:43.089572693Z 2018-06-08 10:04:43,089 CRIT Supervisor is running as root. Privileges were not dropped because no user is specified in the config file. If you intend to run as root, you can set user=root in the config file to avoid this message.
2018-06-08T10:04:43.103396894Z 2018-06-08 10:04:43,102 INFO RPC interface 'supervisor' initialized
2018-06-08T10:04:43.103508203Z 2018-06-08 10:04:43,102 CRIT Server 'unix_http_server' running without any HTTP authentication checking
2018-06-08T10:04:43.107666094Z 2018-06-08 10:04:43,103 INFO supervisord started with pid 186
2018-06-08T10:04:44.109027319Z 2018-06-08 10:04:44,106 INFO spawned: 'awx-config-watcher' with pid 189
2018-06-08T10:04:44.111463250Z 2018-06-08 10:04:44,108 INFO spawned: 'celery' with pid 190
2018-06-08T10:04:44.112582351Z 2018-06-08 10:04:44,111 INFO spawned: 'channels-worker' with pid 191
2018-06-08T10:04:44.113479225Z 2018-06-08 10:04:44,113 INFO spawned: 'callback-receiver' with pid 192
2018-06-08T10:04:44.190085858Z READY
2018-06-08T10:04:45.191520064Z 2018-06-08 10:04:45,191 INFO success: awx-config-watcher entered RUNNING state, process has stayed up for > than 1 seconds (startsecs)
2018-06-08T10:04:45.191590284Z 2018-06-08 10:04:45,191 INFO success: celery entered RUNNING state, process has stayed up for > than 1 seconds (startsecs)
2018-06-08T10:04:45.191732227Z 2018-06-08 10:04:45,191 INFO success: channels-worker entered RUNNING state, process has stayed up for > than 1 seconds (startsecs)
2018-06-08T10:04:45.191755811Z 2018-06-08 10:04:45,191 INFO success: callback-receiver entered RUNNING state, process has stayed up for > than 1 seconds (startsecs)
2018-06-08T10:04:46.694300426Z 2018-06-08 10:04:46,693 INFO awx.main.tasks Syncing Schedules
2018-06-08T10:04:46.724503713Z 2018-06-08 10:04:46,724 - INFO - runworker - Using single-threaded worker.
2018-06-08T10:04:46.726218420Z 2018-06-08 10:04:46,724 - INFO - runworker - Running worker against channel layer default (asgi_amqp.core.AMQPChannelLayer)
2018-06-08T10:04:46.726245432Z 2018-06-08 10:04:46,725 - INFO - worker - Listening on channels websocket.connect, websocket.disconnect, websocket.receive
2018-06-08T10:04:46.744816098Z /var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/platforms.py:812: RuntimeWarning: You are running the worker with superuser privileges, which is
2018-06-08T10:04:46.744883126Z absolutely not recommended!
2018-06-08T10:04:46.744891139Z
2018-06-08T10:04:46.744894409Z Please specify a different user using the -u option.
2018-06-08T10:04:46.744897721Z
2018-06-08T10:04:46.744901021Z User information: uid=0 euid=0 gid=0 egid=0
2018-06-08T10:04:46.744904159Z
2018-06-08T10:04:46.744907146Z uid=uid, euid=euid, gid=gid, egid=egid,
2018-06-08T10:04:46.752662535Z [2018-06-08 10:04:46,752: DEBUG/MainProcess] | Worker: Preparing bootsteps.
2018-06-08T10:04:46.756751333Z [2018-06-08 10:04:46,756: DEBUG/MainProcess] | Worker: Building graph...
2018-06-08T10:04:46.757722612Z [2018-06-08 10:04:46,757: DEBUG/MainProcess] | Worker: New boot order: {Timer, Hub, Queues (intra), Pool, Autoscaler, StateDB, Autoreloader, Beat, Consumer}
2018-06-08T10:04:46.763787806Z 2018-06-08 10:04:46,759 WARNING awx.main.tasks celery worker dynamic --autoscale=50,4
2018-06-08T10:04:46.772987173Z [2018-06-08 10:04:46,771: DEBUG/MainProcess] | Consumer: Preparing bootsteps.
2018-06-08T10:04:46.773019504Z [2018-06-08 10:04:46,772: DEBUG/MainProcess] | Consumer: Building graph...
2018-06-08T10:04:46.774322348Z [2018-06-08 10:04:46,773: DEBUG/MainProcess] | Consumer: New boot order: {Connection, Events, Mingle, Tasks, Control, Gossip, Agent, Heart, event loop}
2018-06-08T10:04:46.781905347Z 2018-06-08 10:04:46,781 WARNING awx.main.tasks Set hostname to celery@awx
2018-06-08T10:04:46.787553266Z [2018-06-08 10:04:46,786: DEBUG/MainProcess] | Worker: Starting Hub
2018-06-08T10:04:46.787596457Z [2018-06-08 10:04:46,786: DEBUG/MainProcess] ^-- substep ok
2018-06-08T10:04:46.787606749Z [2018-06-08 10:04:46,786: DEBUG/MainProcess] | Worker: Starting Pool
2018-06-08T10:04:47.298666597Z [2018-06-08 10:04:47,297: DEBUG/MainProcess] ^-- substep ok
2018-06-08T10:04:47.298755079Z [2018-06-08 10:04:47,297: DEBUG/MainProcess] | Worker: Starting Autoscaler
2018-06-08T10:04:47.299604429Z [2018-06-08 10:04:47,298: DEBUG/MainProcess] ^-- substep ok
2018-06-08T10:04:47.299622371Z [2018-06-08 10:04:47,298: DEBUG/MainProcess] | Worker: Starting Beat
2018-06-08T10:04:47.306812616Z [2018-06-08 10:04:47,301: DEBUG/MainProcess] ^-- substep ok
2018-06-08T10:04:47.306834572Z [2018-06-08 10:04:47,302: DEBUG/MainProcess] | Worker: Starting Consumer
2018-06-08T10:04:47.306838847Z [2018-06-08 10:04:47,302: DEBUG/MainProcess] | Consumer: Starting Connection
2018-06-08T10:04:47.422910478Z [2018-06-08 10:04:47,421: INFO/Beat] beat: Starting...
2018-06-08T10:04:47.449327737Z [2018-06-08 10:04:47,448: DEBUG/Beat] Current schedule:
2018-06-08T10:04:47.449361345Z <Entry: purge_stdout_files awx.main.tasks.purge_old_stdout_files() <freq: 7.00 days>
2018-06-08T10:04:47.449367198Z <Entry: cluster_heartbeat awx.main.tasks.cluster_node_heartbeat() <freq: 1.00 minute>
2018-06-08T10:04:47.449370440Z <Entry: isolated_heartbeat awx.main.tasks.awx_isolated_heartbeat() <freq: 10.00 minutes>
2018-06-08T10:04:47.449374486Z <Entry: task_manager awx.main.scheduler.tasks.run_task_manager() <freq: 20.00 seconds>
2018-06-08T10:04:47.449377859Z <Entry: tower_scheduler awx.main.tasks.awx_periodic_scheduler() <freq: 30.00 seconds>
2018-06-08T10:04:47.449381280Z <Entry: celery.backend_cleanup celery.backend_cleanup() <crontab: 0 4 * * * (m/h/d/dM/MY)>
2018-06-08T10:04:47.449384529Z <Entry: admin_checks awx.main.tasks.run_administrative_checks() <freq: 30.00 days>
2018-06-08T10:04:47.449387621Z [2018-06-08 10:04:47,448: DEBUG/Beat] beat: Ticking with max interval->1.00 minute
2018-06-08T10:05:01.470460249Z RESULT 2
2018-06-08T10:05:01.470534770Z OKREADY
2018-06-08T10:06:01.544773272Z No previous hash foundRESULT 2
2018-06-08T10:06:01.544830978Z OKREADY
2018-06-08T10:06:31.402713141Z Traceback (most recent call last):
2018-06-08T10:06:31.402843306Z File "/usr/bin/awx-manage", line 9, in <module>
2018-06-08T10:06:31.408065416Z load_entry_point('awx==1.0.6.15', 'console_scripts', 'awx-manage')()
2018-06-08T10:06:31.408110795Z File "/usr/lib/python2.7/site-packages/awx/__init__.py", line 109, in manage
2018-06-08T10:06:31.408159022Z execute_from_command_line(sys.argv)
2018-06-08T10:06:31.408168228Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/__init__.py", line 364, in execute_from_command_line
2018-06-08T10:06:31.408172845Z utility.execute()
2018-06-08T10:06:31.408176544Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/__init__.py", line 356, in execute
2018-06-08T10:06:31.408180465Z self.fetch_command(subcommand).run_from_argv(self.argv)
2018-06-08T10:06:31.408184055Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/base.py", line 283, in run_from_argv
2018-06-08T10:06:31.408187684Z self.execute(*args, **cmd_options)
2018-06-08T10:06:31.408191115Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/base.py", line 330, in execute
2018-06-08T10:06:31.408194452Z output = self.handle(*args, **options)
2018-06-08T10:06:31.408197882Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/channels/management/commands/runworker.py", line 83, in handle
2018-06-08T10:06:31.408213810Z worker.run()
2018-06-08T10:06:31.408217099Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/channels/worker.py", line 87, in run
2018-06-08T10:06:31.408220471Z channel, content = self.channel_layer.receive_many(channels, block=True)
2018-06-08T10:06:31.408224243Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/asgiref/base_layer.py", line 43, in receive_many
2018-06-08T10:06:31.408227817Z return self.receive(channels, block)
2018-06-08T10:06:31.408231170Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/asgi_amqp/core.py", line 102, in receive
2018-06-08T10:06:31.408234628Z self._init_thread()
2018-06-08T10:06:31.408237867Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/asgi_amqp/core.py", line 63, in _init_thread
2018-06-08T10:06:31.408241169Z self.tdata.connection.default_channel.basic_qos(0, 1, False)
2018-06-08T10:06:31.408244354Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/connection.py", line 771, in default_channel
2018-06-08T10:06:31.408247691Z self.connection
2018-06-08T10:06:31.408250880Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/connection.py", line 756, in connection
2018-06-08T10:06:31.408254177Z self._connection = self._establish_connection()
2018-06-08T10:06:31.408294371Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/connection.py", line 711, in _establish_connection
2018-06-08T10:06:31.408298902Z conn = self.transport.establish_connection()
2018-06-08T10:06:31.408301941Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/transport/pyamqp.py", line 116, in establish_connection
2018-06-08T10:06:31.408305014Z conn = self.Connection(**opts)
2018-06-08T10:06:31.408308611Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/connection.py", line 171, in __init__
2018-06-08T10:06:31.408311634Z (10, 10), # start
2018-06-08T10:06:31.408314544Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/abstract_channel.py", line 67, in wait
2018-06-08T10:06:31.408317559Z self.channel_id, allowed_methods, timeout)
2018-06-08T10:06:31.408320385Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/connection.py", line 241, in _wait_method
2018-06-08T10:06:31.408323427Z channel, method_sig, args, content = read_timeout(timeout)
2018-06-08T10:06:31.408326439Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/connection.py", line 330, in read_timeout
2018-06-08T10:06:31.408329455Z return self.method_reader.read_method()
2018-06-08T10:06:31.408332286Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/method_framing.py", line 189, in read_method
2018-06-08T10:06:31.408335316Z raise m
2018-06-08T10:06:31.408338257Z socket.error: [Errno 104] Connection reset by peer
2018-06-08T10:06:31.609201999Z 2018-06-08 10:06:31,608 INFO exited: channels-worker (exit status 1; not expected)
2018-06-08T10:06:32.613337258Z 2018-06-08 10:06:32,612 INFO spawned: 'channels-worker' with pid 230
2018-06-08T10:06:33.065300661Z [2018-06-08 10:06:33,064: ERROR/MainProcess] consumer: Cannot connect to amqp://guest:**@rabbitmq:5672/awx: [Errno 104] Connection reset by peer.
2018-06-08T10:06:33.065359309Z Trying again in 2.00 seconds...
2018-06-08T10:06:33.065365639Z
2018-06-08T10:06:33.192730312Z [2018-06-08 10:06:33,192: ERROR/Beat] beat: Connection error: [Errno 104] Connection reset by peer. Trying again in 2.0 seconds...
2018-06-08T10:06:34.194237874Z 2018-06-08 10:06:34,193 INFO success: channels-worker entered RUNNING state, process has stayed up for > than 1 seconds (startsecs)
2018-06-08T10:06:34.978216775Z 2018-06-08 10:06:34,977 - INFO - runworker - Using single-threaded worker.
2018-06-08T10:06:34.982521834Z 2018-06-08 10:06:34,978 - INFO - runworker - Running worker against channel layer default (asgi_amqp.core.AMQPChannelLayer)
2018-06-08T10:06:34.982546477Z 2018-06-08 10:06:34,979 - INFO - worker - Listening on channels websocket.connect, websocket.disconnect, websocket.receive
2018-06-08T10:07:01.011688942Z RESULT 2
2018-06-08T10:07:01.011744823Z OKREADY
2018-06-08T10:08:01.089765113Z RESULT 2
2018-06-08T10:08:01.089842888Z OKREADY
2018-06-08T10:08:19.692101223Z Traceback (most recent call last):
2018-06-08T10:08:19.692210203Z File "/usr/bin/awx-manage", line 9, in <module>
2018-06-08T10:08:19.695679482Z load_entry_point('awx==1.0.6.15', 'console_scripts', 'awx-manage')()
2018-06-08T10:08:19.695700053Z File "/usr/lib/python2.7/site-packages/awx/__init__.py", line 109, in manage
2018-06-08T10:08:19.695705168Z execute_from_command_line(sys.argv)
2018-06-08T10:08:19.695708361Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/__init__.py", line 364, in execute_from_command_line
2018-06-08T10:08:19.695711830Z utility.execute()
2018-06-08T10:08:19.695714835Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/__init__.py", line 356, in execute
2018-06-08T10:08:19.695718395Z self.fetch_command(subcommand).run_from_argv(self.argv)
2018-06-08T10:08:19.695721336Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/base.py", line 283, in run_from_argv
2018-06-08T10:08:19.695736240Z self.execute(*args, **cmd_options)
2018-06-08T10:08:19.695739358Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/base.py", line 330, in execute
2018-06-08T10:08:19.695742688Z output = self.handle(*args, **options)
2018-06-08T10:08:19.695745744Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/channels/management/commands/runworker.py", line 83, in handle
2018-06-08T10:08:19.695748989Z worker.run()
2018-06-08T10:08:19.695751939Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/channels/worker.py", line 87, in run
2018-06-08T10:08:19.695755355Z channel, content = self.channel_layer.receive_many(channels, block=True)
2018-06-08T10:08:19.695758251Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/asgiref/base_layer.py", line 43, in receive_many
2018-06-08T10:08:19.695762484Z return self.receive(channels, block)
2018-06-08T10:08:19.695767315Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/asgi_amqp/core.py", line 102, in receive
2018-06-08T10:08:19.695772296Z self._init_thread()
2018-06-08T10:08:19.695778430Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/asgi_amqp/core.py", line 63, in _init_thread
2018-06-08T10:08:19.695784368Z self.tdata.connection.default_channel.basic_qos(0, 1, False)
2018-06-08T10:08:19.695789053Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/connection.py", line 771, in default_channel
2018-06-08T10:08:19.695794259Z self.connection
2018-06-08T10:08:19.695798734Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/connection.py", line 756, in connection
2018-06-08T10:08:19.695803632Z self._connection = self._establish_connection()
2018-06-08T10:08:19.695812408Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/connection.py", line 711, in _establish_connection
2018-06-08T10:08:19.695837956Z conn = self.transport.establish_connection()
2018-06-08T10:08:19.695841455Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/kombu/transport/pyamqp.py", line 116, in establish_connection
2018-06-08T10:08:19.695844560Z conn = self.Connection(**opts)
2018-06-08T10:08:19.695847306Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/connection.py", line 171, in __init__
2018-06-08T10:08:19.695850286Z (10, 10), # start
2018-06-08T10:08:19.695852994Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/abstract_channel.py", line 67, in wait
2018-06-08T10:08:19.695855936Z self.channel_id, allowed_methods, timeout)
2018-06-08T10:08:19.695858698Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/connection.py", line 241, in _wait_method
2018-06-08T10:08:19.695861740Z channel, method_sig, args, content = read_timeout(timeout)
2018-06-08T10:08:19.695864511Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/connection.py", line 330, in read_timeout
2018-06-08T10:08:19.695867444Z return self.method_reader.read_method()
2018-06-08T10:08:19.695870181Z File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/amqp/method_framing.py", line 189, in read_method
2018-06-08T10:08:19.695873222Z raise m
2018-06-08T10:08:19.695875945Z socket.error: [Errno 104] Connection reset by peer
2018-06-08T10:08:19.817022905Z [2018-06-08 10:08:19,816: ERROR/MainProcess] consumer: Cannot connect to amqp://guest:**@rabbitmq:5672/awx: [Errno 104] Connection reset by peer.
2018-06-08T10:08:19.817088636Z Trying again in 4.00 seconds...
2018-06-08T10:08:19.817094646Z
2018-06-08T10:08:19.876568040Z 2018-06-08 10:08:19,876 INFO exited: channels-worker (exit status 1; not expected)
2018-06-08T10:08:20.880076422Z 2018-06-08 10:08:20,879 INFO spawned: 'channels-worker' with pid 241
2018-06-08T10:08:21.882123169Z 2018-06-08 10:08:21,881 INFO success: channels-worker entered RUNNING state, process has stayed up for > than 1 seconds (startsecs)
2018-06-08T10:08:22.888318987Z [2018-06-08 10:08:22,887: ERROR/Beat] beat: Connection error: [Errno 104] Connection reset by peer. Trying again in 4.0 seconds...
2018-06-08T10:08:23.456245395Z 2018-06-08 10:08:23,455 - INFO - runworker - Using single-threaded worker.
2018-06-08T10:08:23.459792862Z 2018-06-08 10:08:23,456 - INFO - runworker - Running worker against channel layer default (asgi_amqp.core.AMQPChannelLayer)
2018-06-08T10:08:23.459847169Z 2018-06-08 10:08:23,456 - INFO - worker - Listening on channels websocket.connect, websocket.disconnect, websocket.receive
2018-06-08T10:09:01.506534481Z RESULT 2
2018-06-08T10:09:01.506587366Z OKREADY
|
RuntimeError
|
def _accept_or_ignore_job_kwargs(self, **kwargs):
exclude_errors = kwargs.pop("_exclude_errors", [])
prompted_data = {}
rejected_data = {}
accepted_vars, rejected_vars, errors_dict = self.accept_or_ignore_variables(
kwargs.get("extra_vars", {}),
_exclude_errors=exclude_errors,
extra_passwords=kwargs.get("survey_passwords", {}),
)
if accepted_vars:
prompted_data["extra_vars"] = accepted_vars
if rejected_vars:
rejected_data["extra_vars"] = rejected_vars
# Handle all the other fields that follow the simple prompting rule
for field_name, ask_field_name in self.get_ask_mapping().items():
if (
field_name not in kwargs
or field_name == "extra_vars"
or kwargs[field_name] is None
):
continue
new_value = kwargs[field_name]
old_value = getattr(self, field_name)
field = self._meta.get_field(field_name)
if isinstance(field, models.ManyToManyField):
old_value = set(old_value.all())
if getattr(self, "_deprecated_credential_launch", False):
# TODO: remove this code branch when support for `extra_credentials` goes away
new_value = set(kwargs[field_name])
else:
new_value = set(kwargs[field_name]) - old_value
if not new_value:
continue
if new_value == old_value:
# no-op case: Fields the same as template's value
# counted as neither accepted or ignored
continue
elif getattr(self, ask_field_name):
# accepted prompt
prompted_data[field_name] = new_value
else:
# unprompted - template is not configured to accept field on launch
rejected_data[field_name] = new_value
# Not considered an error for manual launch, to support old
# behavior of putting them in ignored_fields and launching anyway
if "prompts" not in exclude_errors:
errors_dict[field_name] = _(
"Field is not configured to prompt on launch."
).format(field_name=field_name)
if (
"prompts" not in exclude_errors
and (not getattr(self, "ask_credential_on_launch", False))
and self.passwords_needed_to_start
):
errors_dict["passwords_needed_to_start"] = _(
"Saved launch configurations cannot provide passwords needed to start."
)
needed = self.resources_needed_to_start
if needed:
needed_errors = []
for resource in needed:
if resource in prompted_data:
continue
needed_errors.append(
_("Job Template {} is missing or undefined.").format(resource)
)
if needed_errors:
errors_dict["resources_needed_to_start"] = needed_errors
return prompted_data, rejected_data, errors_dict
|
def _accept_or_ignore_job_kwargs(self, **kwargs):
exclude_errors = kwargs.pop("_exclude_errors", [])
prompted_data = {}
rejected_data = {}
accepted_vars, rejected_vars, errors_dict = self.accept_or_ignore_variables(
kwargs.get("extra_vars", {}),
_exclude_errors=exclude_errors,
extra_passwords=kwargs.get("survey_passwords", {}),
)
if accepted_vars:
prompted_data["extra_vars"] = accepted_vars
if rejected_vars:
rejected_data["extra_vars"] = rejected_vars
# Handle all the other fields that follow the simple prompting rule
for field_name, ask_field_name in self.get_ask_mapping().items():
if (
field_name not in kwargs
or field_name == "extra_vars"
or kwargs[field_name] is None
):
continue
new_value = kwargs[field_name]
old_value = getattr(self, field_name)
field = self._meta.get_field(field_name)
if isinstance(field, models.ManyToManyField):
old_value = set(old_value.all())
if getattr(self, "_deprecated_credential_launch", False):
# TODO: remove this code branch when support for `extra_credentials` goes away
new_value = set(kwargs[field_name])
else:
new_value = set(kwargs[field_name]) - old_value
if not new_value:
continue
if new_value == old_value:
# no-op case: Fields the same as template's value
# counted as neither accepted or ignored
continue
elif getattr(self, ask_field_name):
# accepted prompt
prompted_data[field_name] = new_value
else:
# unprompted - template is not configured to accept field on launch
rejected_data[field_name] = new_value
# Not considered an error for manual launch, to support old
# behavior of putting them in ignored_fields and launching anyway
if "prompts" not in exclude_errors:
errors_dict[field_name] = _(
"Field is not configured to prompt on launch."
).format(field_name=field_name)
if "prompts" not in exclude_errors and self.passwords_needed_to_start:
errors_dict["passwords_needed_to_start"] = _(
"Saved launch configurations cannot provide passwords needed to start."
)
needed = self.resources_needed_to_start
if needed:
needed_errors = []
for resource in needed:
if resource in prompted_data:
continue
needed_errors.append(
_("Job Template {} is missing or undefined.").format(resource)
)
if needed_errors:
errors_dict["resources_needed_to_start"] = needed_errors
return prompted_data, rejected_data, errors_dict
|
https://github.com/ansible/awx/issues/2041
|
./awx_install.sh
HEAD is now at 2e98029 Merge pull request #2009 from cdvv7788/Issue/1506
Already up-to-date.
PLAY [Build and deploy AWX] *********************************************************************************************************************************
TASK [check_vars : include_tasks] ***************************************************************************************************************************
skipping: [localhost]
TASK [check_vars : include_tasks] ***************************************************************************************************************************
included: /<install dir>/installer/roles/check_vars/tasks/check_docker.yml for localhost
TASK [check_vars : postgres_data_dir should be defined] *****************************************************************************************************
ok: [localhost] => {
"changed": false,
"msg": "All assertions passed"
}
TASK [check_vars : host_port should be defined] *************************************************************************************************************
ok: [localhost] => {
"changed": false,
"msg": "All assertions passed"
}
TASK [image_build : Get Version from checkout if not provided] **********************************************************************************************
skipping: [localhost]
TASK [image_build : Set global version if not provided] *****************************************************************************************************
skipping: [localhost]
TASK [image_build : Verify awx-logos directory exists for official install] *********************************************************************************
skipping: [localhost]
TASK [image_build : Copy logos for inclusion in sdist] ******************************************************************************************************
skipping: [localhost]
TASK [image_build : Set sdist file name] ********************************************************************************************************************
skipping: [localhost]
TASK [image_build : AWX Distribution] ***********************************************************************************************************************
skipping: [localhost]
TASK [image_build : Stat distribution file] *****************************************************************************************************************
skipping: [localhost]
TASK [image_build : Clean distribution] *********************************************************************************************************************
skipping: [localhost]
TASK [image_build : Build sdist builder image] **************************************************************************************************************
skipping: [localhost]
TASK [image_build : Build AWX distribution using container] *************************************************************************************************
skipping: [localhost]
TASK [image_build : Build AWX distribution locally] *********************************************************************************************************
skipping: [localhost]
TASK [image_build : Set docker build base path] *************************************************************************************************************
skipping: [localhost]
TASK [image_build : Set awx_web image name] *****************************************************************************************************************
skipping: [localhost]
TASK [image_build : Set awx_task image name] ****************************************************************************************************************
skipping: [localhost]
TASK [image_build : Ensure directory exists] ****************************************************************************************************************
skipping: [localhost]
TASK [image_build : Stage sdist] ****************************************************************************************************************************
skipping: [localhost]
TASK [image_build : Template web Dockerfile] ****************************************************************************************************************
skipping: [localhost]
TASK [image_build : Template task Dockerfile] ***************************************************************************************************************
skipping: [localhost]
TASK [image_build : Stage launch_awx] ***********************************************************************************************************************
skipping: [localhost]
TASK [image_build : Stage launch_awx_task] ******************************************************************************************************************
skipping: [localhost]
TASK [image_build : Stage nginx.conf] ***********************************************************************************************************************
skipping: [localhost]
TASK [image_build : Stage supervisor.conf] ******************************************************************************************************************
skipping: [localhost]
TASK [image_build : Stage supervisor_task.conf] *************************************************************************************************************
skipping: [localhost]
TASK [image_build : Stage settings.py] **********************************************************************************************************************
skipping: [localhost]
TASK [image_build : Stage requirements] *********************************************************************************************************************
skipping: [localhost]
TASK [image_build : Stage config watcher] *******************************************************************************************************************
skipping: [localhost]
TASK [image_build : Stage Makefile] *************************************************************************************************************************
skipping: [localhost]
TASK [image_build : Stage ansible repo] *********************************************************************************************************************
skipping: [localhost]
TASK [image_build : Stage ansible repo key] *****************************************************************************************************************
skipping: [localhost]
TASK [image_build : Build base web image] *******************************************************************************************************************
skipping: [localhost]
TASK [image_build : Build base task image] ******************************************************************************************************************
skipping: [localhost]
TASK [image_build : Tag task and web images as latest] ******************************************************************************************************
skipping: [localhost]
TASK [image_build : Clean docker base directory] ************************************************************************************************************
skipping: [localhost]
TASK [image_push : Authenticate with Docker registry if registry password given] ****************************************************************************
skipping: [localhost]
TASK [image_push : Remove web image] ************************************************************************************************************************
skipping: [localhost]
TASK [image_push : Remove task image] ***********************************************************************************************************************
skipping: [localhost]
TASK [image_push : Tag and push web image to registry] ******************************************************************************************************
skipping: [localhost]
TASK [image_push : Tag and push task image to registry] *****************************************************************************************************
skipping: [localhost]
TASK [image_push : Set full image path for Registry] ********************************************************************************************************
skipping: [localhost]
TASK [kubernetes : fail] ************************************************************************************************************************************
skipping: [localhost]
TASK [kubernetes : Set kubernetes base path] ****************************************************************************************************************
skipping: [localhost]
TASK [kubernetes : include_tasks] ***************************************************************************************************************************
skipping: [localhost]
TASK [kubernetes : include_tasks] ***************************************************************************************************************************
skipping: [localhost]
TASK [kubernetes : Use kubectl or oc] ***********************************************************************************************************************
skipping: [localhost]
TASK [kubernetes : Get Postgres Service Detail] *************************************************************************************************************
skipping: [localhost]
TASK [kubernetes : Template PostgreSQL Deployment] **********************************************************************************************************
skipping: [localhost]
TASK [kubernetes : Deploy and Activate Postgres] ************************************************************************************************************
skipping: [localhost]
TASK [kubernetes : Deploy and Activate Postgres (Kubernetes)] ***********************************************************************************************
skipping: [localhost]
TASK [kubernetes : Set postgresql hostname to helm package service] *****************************************************************************************
skipping: [localhost]
TASK [kubernetes : Wait for Postgres to activate] ***********************************************************************************************************
skipping: [localhost]
TASK [kubernetes : Ensure directory exists] *****************************************************************************************************************
skipping: [localhost]
TASK [kubernetes : Template Kubernetes AWX Config] **********************************************************************************************************
skipping: [localhost]
TASK [kubernetes : Set task image name] *********************************************************************************************************************
skipping: [localhost]
TASK [kubernetes : Set web image name] **********************************************************************************************************************
skipping: [localhost]
TASK [kubernetes : Template Kubernetes AWX Deployment] ******************************************************************************************************
skipping: [localhost]
TASK [kubernetes : Apply Configmap] *************************************************************************************************************************
skipping: [localhost]
TASK [kubernetes : Apply Deployment] ************************************************************************************************************************
skipping: [localhost]
TASK [local_docker : Export Docker web image if it isnt local and there isnt a registry defined] ************************************************************
skipping: [localhost]
TASK [local_docker : Export Docker task image if it isnt local and there isnt a registry defined] ***********************************************************
skipping: [localhost]
TASK [local_docker : Set docker base path] ******************************************************************************************************************
skipping: [localhost]
TASK [local_docker : Ensure directory exists] ***************************************************************************************************************
skipping: [localhost]
TASK [local_docker : Copy web image to docker execution] ****************************************************************************************************
skipping: [localhost]
TASK [local_docker : Copy task image to docker execution] ***************************************************************************************************
skipping: [localhost]
TASK [local_docker : Load web image] ************************************************************************************************************************
skipping: [localhost]
TASK [local_docker : Load task image] ***********************************************************************************************************************
skipping: [localhost]
TASK [local_docker : Set full image path for local install] *************************************************************************************************
skipping: [localhost]
TASK [local_docker : Set DockerHub Image Paths] *************************************************************************************************************
ok: [localhost]
TASK [local_docker : Activate postgres container] ***********************************************************************************************************
An exception occurred during task execution. To see the full traceback, use -vvv. The error was: TypeError: create_host_config() got an unexpected keyword argument 'init'
fatal: [localhost]: FAILED! => {"changed": false, "module_stderr": "Traceback (most recent call last):\n File \"/tmp/ansible_f2TUMm/ansible_module_docker_container.py\", line 2081, in <module>\n main()\n File \"/tmp/ansible_f2TUMm/ansible_module_docker_container.py\", line 2076, in main\n cm = ContainerManager(client)\n File \"/tmp/ansible_f2TUMm/ansible_module_docker_container.py\", line 1703, in __init__\n self.present(state)\n File \"/tmp/ansible_f2TUMm/ansible_module_docker_container.py\", line 1723, in present\n new_container = self.container_create(self.parameters.image, self.parameters.create_parameters)\n File \"/tmp/ansible_f2TUMm/ansible_module_docker_container.py\", line 825, in create_parameters\n host_config=self._host_config(),\n File \"/tmp/ansible_f2TUMm/ansible_module_docker_container.py\", line 931, in _host_config\n return self.client.create_host_config(**params)\n File \"/usr/local/lib/python2.7/dist-packages/docker/api/container.py\", line 157, in create_host_config\n return utils.create_host_config(*args, **kwargs)\nTypeError: create_host_config() got an unexpected keyword argument 'init'\n", "module_stdout": "", "msg": "MODULE FAILURE", "rc": 1}
to retry, use: --limit @/<install dir>/installer/install.retry
PLAY RECAP **************************************************************************************************************************************************
localhost : ok=4 changed=0 unreachable=0 failed=1
|
TypeError
|
def parse_inventory_id(data):
inventory_id = data.get("inventory_id", ["null"])
try:
inventory_id = int(inventory_id[0])
except ValueError:
inventory_id = None
except IndexError:
inventory_id = None
except TypeError:
inventory_id = None
if not inventory_id:
inventory_id = None
return inventory_id
|
def parse_inventory_id(data):
inventory_id = data.get("inventory_id", ["null"])
try:
inventory_id = int(inventory_id[0])
except ValueError:
inventory_id = None
if not inventory_id:
inventory_id = None
return inventory_id
|
https://github.com/ansible/awx/issues/1257
|
[2018-02-15 10:31:51,202: DEBUG/MainProcess] TaskPool: Apply <function _fast_trace_task at 0x1d71c80> (args:('awx.main.tasks.run_project_update', '69545664-1752-4c48-a46c-41b7976d5bd3', {'origin': 'gen280@awx', 'lang': 'py', 'task': 'awx.main.tasks.run_project_update', 'group': None, 'root_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', u'delivery_info': {u'priority': None, u'redelivered': False, u'routing_key': u'tower', u'exchange': u''}, 'expires': None, u'correlation_id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'retries': 0, 'timelimit': [None, None], 'argsrepr': '[18]', 'eta': None, 'parent_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', u'reply_to': 'aef45ebd-7309-34e3-8669-7f378d04708f', 'id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'kwargsrepr': '{}'}, u'[[18], {}, {"chord": null, "callbacks": [{"chord_size": null, "task": "awx.main.tasks.handle_work_success", "subtask_type": null, "kwargs": {"task_actual": {"type": "project_update", "id": 18}}, "args": [], "options": {}, "immutable": false}], "errbacks": [{"chord_size": null, "task": "awx.main.tasks.handle_work_error", "subtask_type": null, "kwargs":... kwargs:{})
[2018-02-15 10:31:51,205: DEBUG/MainProcess] Task accepted: awx.main.tasks.run_project_update[69545664-1752-4c48-a46c-41b7976d5bd3] pid:279
[2018-02-15 10:31:51,221: INFO/ForkPoolWorker-50] Task awx.main.scheduler.tasks.run_job_launch[e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a] succeeded in 0.280421956s: None
2018-02-15 10:31:52,001 ERROR awx.main.tasks project_update 18 (running) Post run hook errored.
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/awx/main/tasks.py", line 997, in run
self.post_run_hook(instance, status, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/tasks.py", line 1647, in post_run_hook
fd = open(self.revision_path, 'r')
AttributeError: 'RunProjectUpdate' object has no attribute 'revision_path'
2018-02-15 10:31:52,089 DEBUG awx.main.tasks Executing error task id <Context: {'origin': 'gen280@awx', u'args': [18], u'chain': None, 'root_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', 'expires': None, u'is_eager': False, u'correlation_id': '69545664-1752-4c48-a46c-41b7976d5bd3', u'chord': None, u'reply_to': 'aef45ebd-7309-34e3-8669-7f378d04708f', '_children': [], 'id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'kwargsrepr': '{}', 'lang': 'py', 'retries': 0, 'task': 'awx.main.tasks.run_project_update', 'group': None, 'timelimit': [None, None], u'delivery_info': {u'priority': None, u'redelivered': False, u'routing_key': u'tower', u'exchange': u''}, u'hostname': u'celery@awx', 'called_directly': False, 'parent_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', 'argsrepr': '[18]', u'errbacks': [{u'chord_size': None, u'task': u'awx.main.tasks.handle_work_error', u'args': [], u'immutable': False, u'subtask_type': None, u'kwargs': {u'subtasks': [{u'type': u'project_update', u'id': 18}]}, u'options': {}}], u'callbacks': [{u'chord_size': None, u'task': u'awx.main.tasks.handle_work_success', u'args': [], u'immutable': False, u'subtask_type': None, u'kwargs': {u'task_actual': {u'type': u'project_update', u'id': 18}}, u'options': {}}], u'kwargs': {}, 'eta': None, '_protected': 1}>, subtasks: [{u'type': u'project_update', u'id': 18}]
2018-02-15 10:31:52,180 WARNING awx.main.tasks project_update 18 (error) encountered an error (rc=None), please see task stdout for details.
[2018-02-15 10:31:52,181: ERROR/ForkPoolWorker-49] Task awx.main.tasks.run_project_update[69545664-1752-4c48-a46c-41b7976d5bd3] raised unexpected: Exception('project_update 18 (error) encountered an error (rc=None), please see task stdout for details.',)
|
AttributeError
|
def parse_message_text(self, message_text, client_id):
"""
See the Messages of CONTRIBUTING.md for the message format.
"""
data = json.loads(message_text)
if len(data) == 2:
message_type = data.pop(0)
message_value = data.pop(0)
if isinstance(message_value, list):
logger.warning("Message has no sender")
return None, None
if isinstance(message_value, dict) and client_id != message_value.get("sender"):
logger.warning(
"client_id mismatch expected: %s actual %s",
client_id,
message_value.get("sender"),
)
return None, None
return message_type, message_value
else:
logger.error("Invalid message text")
return None, None
|
def parse_message_text(self, message_text, client_id):
"""
See the Messages of CONTRIBUTING.md for the message format.
"""
data = json.loads(message_text)
if len(data) == 2:
message_type = data.pop(0)
message_value = data.pop(0)
if isinstance(message_value, list):
logger.error("Message has no sender")
return None, None
if isinstance(message_value, dict) and client_id != message_value.get("sender"):
logger.error(
"client_id mismatch expected: %s actual %s",
client_id,
message_value.get("sender"),
)
return None, None
return message_type, message_value
else:
logger.error("Invalid message text")
return None, None
|
https://github.com/ansible/awx/issues/1257
|
[2018-02-15 10:31:51,202: DEBUG/MainProcess] TaskPool: Apply <function _fast_trace_task at 0x1d71c80> (args:('awx.main.tasks.run_project_update', '69545664-1752-4c48-a46c-41b7976d5bd3', {'origin': 'gen280@awx', 'lang': 'py', 'task': 'awx.main.tasks.run_project_update', 'group': None, 'root_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', u'delivery_info': {u'priority': None, u'redelivered': False, u'routing_key': u'tower', u'exchange': u''}, 'expires': None, u'correlation_id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'retries': 0, 'timelimit': [None, None], 'argsrepr': '[18]', 'eta': None, 'parent_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', u'reply_to': 'aef45ebd-7309-34e3-8669-7f378d04708f', 'id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'kwargsrepr': '{}'}, u'[[18], {}, {"chord": null, "callbacks": [{"chord_size": null, "task": "awx.main.tasks.handle_work_success", "subtask_type": null, "kwargs": {"task_actual": {"type": "project_update", "id": 18}}, "args": [], "options": {}, "immutable": false}], "errbacks": [{"chord_size": null, "task": "awx.main.tasks.handle_work_error", "subtask_type": null, "kwargs":... kwargs:{})
[2018-02-15 10:31:51,205: DEBUG/MainProcess] Task accepted: awx.main.tasks.run_project_update[69545664-1752-4c48-a46c-41b7976d5bd3] pid:279
[2018-02-15 10:31:51,221: INFO/ForkPoolWorker-50] Task awx.main.scheduler.tasks.run_job_launch[e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a] succeeded in 0.280421956s: None
2018-02-15 10:31:52,001 ERROR awx.main.tasks project_update 18 (running) Post run hook errored.
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/awx/main/tasks.py", line 997, in run
self.post_run_hook(instance, status, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/tasks.py", line 1647, in post_run_hook
fd = open(self.revision_path, 'r')
AttributeError: 'RunProjectUpdate' object has no attribute 'revision_path'
2018-02-15 10:31:52,089 DEBUG awx.main.tasks Executing error task id <Context: {'origin': 'gen280@awx', u'args': [18], u'chain': None, 'root_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', 'expires': None, u'is_eager': False, u'correlation_id': '69545664-1752-4c48-a46c-41b7976d5bd3', u'chord': None, u'reply_to': 'aef45ebd-7309-34e3-8669-7f378d04708f', '_children': [], 'id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'kwargsrepr': '{}', 'lang': 'py', 'retries': 0, 'task': 'awx.main.tasks.run_project_update', 'group': None, 'timelimit': [None, None], u'delivery_info': {u'priority': None, u'redelivered': False, u'routing_key': u'tower', u'exchange': u''}, u'hostname': u'celery@awx', 'called_directly': False, 'parent_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', 'argsrepr': '[18]', u'errbacks': [{u'chord_size': None, u'task': u'awx.main.tasks.handle_work_error', u'args': [], u'immutable': False, u'subtask_type': None, u'kwargs': {u'subtasks': [{u'type': u'project_update', u'id': 18}]}, u'options': {}}], u'callbacks': [{u'chord_size': None, u'task': u'awx.main.tasks.handle_work_success', u'args': [], u'immutable': False, u'subtask_type': None, u'kwargs': {u'task_actual': {u'type': u'project_update', u'id': 18}}, u'options': {}}], u'kwargs': {}, 'eta': None, '_protected': 1}>, subtasks: [{u'type': u'project_update', u'id': 18}]
2018-02-15 10:31:52,180 WARNING awx.main.tasks project_update 18 (error) encountered an error (rc=None), please see task stdout for details.
[2018-02-15 10:31:52,181: ERROR/ForkPoolWorker-49] Task awx.main.tasks.run_project_update[69545664-1752-4c48-a46c-41b7976d5bd3] raised unexpected: Exception('project_update 18 (error) encountered an error (rc=None), please see task stdout for details.',)
|
AttributeError
|
def handle(self, message):
"""
Dispatches a message based on the message type to a handler function
of name onX where X is the message type.
"""
topology_id = message.get("topology")
if topology_id is None:
logger.warning("Unsupported message %s: no topology", message)
return
client_id = message.get("client")
if client_id is None:
logger.warning("Unsupported message %s: no client", message)
return
if "text" not in message:
logger.warning("Unsupported message %s: no data", message)
return
message_type, message_value = self.parse_message_text(message["text"], client_id)
if message_type is None:
logger.warning("Unsupported message %s: no message type", message)
return
handler = self.get_handler(message_type)
if handler is not None:
handler(message_value, topology_id, client_id)
else:
logger.warning("Unsupported message %s: no handler", message_type)
|
def handle(self, message):
"""
Dispatches a message based on the message type to a handler function
of name onX where X is the message type.
"""
topology_id = message.get("topology")
assert topology_id is not None, "No topology_id"
client_id = message.get("client")
assert client_id is not None, "No client_id"
message_type, message_value = self.parse_message_text(message["text"], client_id)
if message_type is None:
return
handler = self.get_handler(message_type)
if handler is not None:
handler(message_value, topology_id, client_id)
else:
logger.warning("Unsupported message %s: no handler", message_type)
|
https://github.com/ansible/awx/issues/1257
|
[2018-02-15 10:31:51,202: DEBUG/MainProcess] TaskPool: Apply <function _fast_trace_task at 0x1d71c80> (args:('awx.main.tasks.run_project_update', '69545664-1752-4c48-a46c-41b7976d5bd3', {'origin': 'gen280@awx', 'lang': 'py', 'task': 'awx.main.tasks.run_project_update', 'group': None, 'root_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', u'delivery_info': {u'priority': None, u'redelivered': False, u'routing_key': u'tower', u'exchange': u''}, 'expires': None, u'correlation_id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'retries': 0, 'timelimit': [None, None], 'argsrepr': '[18]', 'eta': None, 'parent_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', u'reply_to': 'aef45ebd-7309-34e3-8669-7f378d04708f', 'id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'kwargsrepr': '{}'}, u'[[18], {}, {"chord": null, "callbacks": [{"chord_size": null, "task": "awx.main.tasks.handle_work_success", "subtask_type": null, "kwargs": {"task_actual": {"type": "project_update", "id": 18}}, "args": [], "options": {}, "immutable": false}], "errbacks": [{"chord_size": null, "task": "awx.main.tasks.handle_work_error", "subtask_type": null, "kwargs":... kwargs:{})
[2018-02-15 10:31:51,205: DEBUG/MainProcess] Task accepted: awx.main.tasks.run_project_update[69545664-1752-4c48-a46c-41b7976d5bd3] pid:279
[2018-02-15 10:31:51,221: INFO/ForkPoolWorker-50] Task awx.main.scheduler.tasks.run_job_launch[e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a] succeeded in 0.280421956s: None
2018-02-15 10:31:52,001 ERROR awx.main.tasks project_update 18 (running) Post run hook errored.
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/awx/main/tasks.py", line 997, in run
self.post_run_hook(instance, status, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/tasks.py", line 1647, in post_run_hook
fd = open(self.revision_path, 'r')
AttributeError: 'RunProjectUpdate' object has no attribute 'revision_path'
2018-02-15 10:31:52,089 DEBUG awx.main.tasks Executing error task id <Context: {'origin': 'gen280@awx', u'args': [18], u'chain': None, 'root_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', 'expires': None, u'is_eager': False, u'correlation_id': '69545664-1752-4c48-a46c-41b7976d5bd3', u'chord': None, u'reply_to': 'aef45ebd-7309-34e3-8669-7f378d04708f', '_children': [], 'id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'kwargsrepr': '{}', 'lang': 'py', 'retries': 0, 'task': 'awx.main.tasks.run_project_update', 'group': None, 'timelimit': [None, None], u'delivery_info': {u'priority': None, u'redelivered': False, u'routing_key': u'tower', u'exchange': u''}, u'hostname': u'celery@awx', 'called_directly': False, 'parent_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', 'argsrepr': '[18]', u'errbacks': [{u'chord_size': None, u'task': u'awx.main.tasks.handle_work_error', u'args': [], u'immutable': False, u'subtask_type': None, u'kwargs': {u'subtasks': [{u'type': u'project_update', u'id': 18}]}, u'options': {}}], u'callbacks': [{u'chord_size': None, u'task': u'awx.main.tasks.handle_work_success', u'args': [], u'immutable': False, u'subtask_type': None, u'kwargs': {u'task_actual': {u'type': u'project_update', u'id': 18}}, u'options': {}}], u'kwargs': {}, 'eta': None, '_protected': 1}>, subtasks: [{u'type': u'project_update', u'id': 18}]
2018-02-15 10:31:52,180 WARNING awx.main.tasks project_update 18 (error) encountered an error (rc=None), please see task stdout for details.
[2018-02-15 10:31:52,181: ERROR/ForkPoolWorker-49] Task awx.main.tasks.run_project_update[69545664-1752-4c48-a46c-41b7976d5bd3] raised unexpected: Exception('project_update 18 (error) encountered an error (rc=None), please see task stdout for details.',)
|
AttributeError
|
def onLinkCreate(self, link, topology_id, client_id):
logger.debug("Link created %s", link)
device_map = dict(
Device.objects.filter(
topology_id=topology_id,
cid__in=[link["from_device_id"], link["to_device_id"]],
).values_list("cid", "pk")
)
if link["from_device_id"] not in device_map:
logger.warning("Device not found")
return
if link["to_device_id"] not in device_map:
logger.warning("Device not found")
return
Link.objects.get_or_create(
cid=link["id"],
name=link["name"],
from_device_id=device_map[link["from_device_id"]],
to_device_id=device_map[link["to_device_id"]],
from_interface_id=Interface.objects.get(
device_id=device_map[link["from_device_id"]], cid=link["from_interface_id"]
).pk,
to_interface_id=Interface.objects.get(
device_id=device_map[link["to_device_id"]], cid=link["to_interface_id"]
).pk,
)
(
Topology.objects.filter(pk=topology_id, link_id_seq__lt=link["id"]).update(
link_id_seq=link["id"]
)
)
|
def onLinkCreate(self, link, topology_id, client_id):
logger.debug("Link created %s", link)
device_map = dict(
Device.objects.filter(
topology_id=topology_id,
cid__in=[link["from_device_id"], link["to_device_id"]],
).values_list("cid", "pk")
)
Link.objects.get_or_create(
cid=link["id"],
name=link["name"],
from_device_id=device_map[link["from_device_id"]],
to_device_id=device_map[link["to_device_id"]],
from_interface_id=Interface.objects.get(
device_id=device_map[link["from_device_id"]], cid=link["from_interface_id"]
).pk,
to_interface_id=Interface.objects.get(
device_id=device_map[link["to_device_id"]], cid=link["to_interface_id"]
).pk,
)
(
Topology.objects.filter(pk=topology_id, link_id_seq__lt=link["id"]).update(
link_id_seq=link["id"]
)
)
|
https://github.com/ansible/awx/issues/1257
|
[2018-02-15 10:31:51,202: DEBUG/MainProcess] TaskPool: Apply <function _fast_trace_task at 0x1d71c80> (args:('awx.main.tasks.run_project_update', '69545664-1752-4c48-a46c-41b7976d5bd3', {'origin': 'gen280@awx', 'lang': 'py', 'task': 'awx.main.tasks.run_project_update', 'group': None, 'root_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', u'delivery_info': {u'priority': None, u'redelivered': False, u'routing_key': u'tower', u'exchange': u''}, 'expires': None, u'correlation_id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'retries': 0, 'timelimit': [None, None], 'argsrepr': '[18]', 'eta': None, 'parent_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', u'reply_to': 'aef45ebd-7309-34e3-8669-7f378d04708f', 'id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'kwargsrepr': '{}'}, u'[[18], {}, {"chord": null, "callbacks": [{"chord_size": null, "task": "awx.main.tasks.handle_work_success", "subtask_type": null, "kwargs": {"task_actual": {"type": "project_update", "id": 18}}, "args": [], "options": {}, "immutable": false}], "errbacks": [{"chord_size": null, "task": "awx.main.tasks.handle_work_error", "subtask_type": null, "kwargs":... kwargs:{})
[2018-02-15 10:31:51,205: DEBUG/MainProcess] Task accepted: awx.main.tasks.run_project_update[69545664-1752-4c48-a46c-41b7976d5bd3] pid:279
[2018-02-15 10:31:51,221: INFO/ForkPoolWorker-50] Task awx.main.scheduler.tasks.run_job_launch[e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a] succeeded in 0.280421956s: None
2018-02-15 10:31:52,001 ERROR awx.main.tasks project_update 18 (running) Post run hook errored.
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/awx/main/tasks.py", line 997, in run
self.post_run_hook(instance, status, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/tasks.py", line 1647, in post_run_hook
fd = open(self.revision_path, 'r')
AttributeError: 'RunProjectUpdate' object has no attribute 'revision_path'
2018-02-15 10:31:52,089 DEBUG awx.main.tasks Executing error task id <Context: {'origin': 'gen280@awx', u'args': [18], u'chain': None, 'root_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', 'expires': None, u'is_eager': False, u'correlation_id': '69545664-1752-4c48-a46c-41b7976d5bd3', u'chord': None, u'reply_to': 'aef45ebd-7309-34e3-8669-7f378d04708f', '_children': [], 'id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'kwargsrepr': '{}', 'lang': 'py', 'retries': 0, 'task': 'awx.main.tasks.run_project_update', 'group': None, 'timelimit': [None, None], u'delivery_info': {u'priority': None, u'redelivered': False, u'routing_key': u'tower', u'exchange': u''}, u'hostname': u'celery@awx', 'called_directly': False, 'parent_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', 'argsrepr': '[18]', u'errbacks': [{u'chord_size': None, u'task': u'awx.main.tasks.handle_work_error', u'args': [], u'immutable': False, u'subtask_type': None, u'kwargs': {u'subtasks': [{u'type': u'project_update', u'id': 18}]}, u'options': {}}], u'callbacks': [{u'chord_size': None, u'task': u'awx.main.tasks.handle_work_success', u'args': [], u'immutable': False, u'subtask_type': None, u'kwargs': {u'task_actual': {u'type': u'project_update', u'id': 18}}, u'options': {}}], u'kwargs': {}, 'eta': None, '_protected': 1}>, subtasks: [{u'type': u'project_update', u'id': 18}]
2018-02-15 10:31:52,180 WARNING awx.main.tasks project_update 18 (error) encountered an error (rc=None), please see task stdout for details.
[2018-02-15 10:31:52,181: ERROR/ForkPoolWorker-49] Task awx.main.tasks.run_project_update[69545664-1752-4c48-a46c-41b7976d5bd3] raised unexpected: Exception('project_update 18 (error) encountered an error (rc=None), please see task stdout for details.',)
|
AttributeError
|
def onLinkDestroy(self, link, topology_id, client_id):
logger.debug("Link deleted %s", link)
device_map = dict(
Device.objects.filter(
topology_id=topology_id,
cid__in=[link["from_device_id"], link["to_device_id"]],
).values_list("cid", "pk")
)
if link["from_device_id"] not in device_map:
logger.warning("Device not found")
return
if link["to_device_id"] not in device_map:
logger.warning("Device not found")
return
Link.objects.filter(
cid=link["id"],
from_device_id=device_map[link["from_device_id"]],
to_device_id=device_map[link["to_device_id"]],
from_interface_id=Interface.objects.get(
device_id=device_map[link["from_device_id"]], cid=link["from_interface_id"]
).pk,
to_interface_id=Interface.objects.get(
device_id=device_map[link["to_device_id"]], cid=link["to_interface_id"]
).pk,
).delete()
|
def onLinkDestroy(self, link, topology_id, client_id):
logger.debug("Link deleted %s", link)
device_map = dict(
Device.objects.filter(
topology_id=topology_id,
cid__in=[link["from_device_id"], link["to_device_id"]],
).values_list("cid", "pk")
)
if link["from_device_id"] not in device_map:
return
if link["to_device_id"] not in device_map:
return
Link.objects.filter(
cid=link["id"],
from_device_id=device_map[link["from_device_id"]],
to_device_id=device_map[link["to_device_id"]],
from_interface_id=Interface.objects.get(
device_id=device_map[link["from_device_id"]], cid=link["from_interface_id"]
).pk,
to_interface_id=Interface.objects.get(
device_id=device_map[link["to_device_id"]], cid=link["to_interface_id"]
).pk,
).delete()
|
https://github.com/ansible/awx/issues/1257
|
[2018-02-15 10:31:51,202: DEBUG/MainProcess] TaskPool: Apply <function _fast_trace_task at 0x1d71c80> (args:('awx.main.tasks.run_project_update', '69545664-1752-4c48-a46c-41b7976d5bd3', {'origin': 'gen280@awx', 'lang': 'py', 'task': 'awx.main.tasks.run_project_update', 'group': None, 'root_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', u'delivery_info': {u'priority': None, u'redelivered': False, u'routing_key': u'tower', u'exchange': u''}, 'expires': None, u'correlation_id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'retries': 0, 'timelimit': [None, None], 'argsrepr': '[18]', 'eta': None, 'parent_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', u'reply_to': 'aef45ebd-7309-34e3-8669-7f378d04708f', 'id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'kwargsrepr': '{}'}, u'[[18], {}, {"chord": null, "callbacks": [{"chord_size": null, "task": "awx.main.tasks.handle_work_success", "subtask_type": null, "kwargs": {"task_actual": {"type": "project_update", "id": 18}}, "args": [], "options": {}, "immutable": false}], "errbacks": [{"chord_size": null, "task": "awx.main.tasks.handle_work_error", "subtask_type": null, "kwargs":... kwargs:{})
[2018-02-15 10:31:51,205: DEBUG/MainProcess] Task accepted: awx.main.tasks.run_project_update[69545664-1752-4c48-a46c-41b7976d5bd3] pid:279
[2018-02-15 10:31:51,221: INFO/ForkPoolWorker-50] Task awx.main.scheduler.tasks.run_job_launch[e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a] succeeded in 0.280421956s: None
2018-02-15 10:31:52,001 ERROR awx.main.tasks project_update 18 (running) Post run hook errored.
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/awx/main/tasks.py", line 997, in run
self.post_run_hook(instance, status, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/tasks.py", line 1647, in post_run_hook
fd = open(self.revision_path, 'r')
AttributeError: 'RunProjectUpdate' object has no attribute 'revision_path'
2018-02-15 10:31:52,089 DEBUG awx.main.tasks Executing error task id <Context: {'origin': 'gen280@awx', u'args': [18], u'chain': None, 'root_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', 'expires': None, u'is_eager': False, u'correlation_id': '69545664-1752-4c48-a46c-41b7976d5bd3', u'chord': None, u'reply_to': 'aef45ebd-7309-34e3-8669-7f378d04708f', '_children': [], 'id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'kwargsrepr': '{}', 'lang': 'py', 'retries': 0, 'task': 'awx.main.tasks.run_project_update', 'group': None, 'timelimit': [None, None], u'delivery_info': {u'priority': None, u'redelivered': False, u'routing_key': u'tower', u'exchange': u''}, u'hostname': u'celery@awx', 'called_directly': False, 'parent_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', 'argsrepr': '[18]', u'errbacks': [{u'chord_size': None, u'task': u'awx.main.tasks.handle_work_error', u'args': [], u'immutable': False, u'subtask_type': None, u'kwargs': {u'subtasks': [{u'type': u'project_update', u'id': 18}]}, u'options': {}}], u'callbacks': [{u'chord_size': None, u'task': u'awx.main.tasks.handle_work_success', u'args': [], u'immutable': False, u'subtask_type': None, u'kwargs': {u'task_actual': {u'type': u'project_update', u'id': 18}}, u'options': {}}], u'kwargs': {}, 'eta': None, '_protected': 1}>, subtasks: [{u'type': u'project_update', u'id': 18}]
2018-02-15 10:31:52,180 WARNING awx.main.tasks project_update 18 (error) encountered an error (rc=None), please see task stdout for details.
[2018-02-15 10:31:52,181: ERROR/ForkPoolWorker-49] Task awx.main.tasks.run_project_update[69545664-1752-4c48-a46c-41b7976d5bd3] raised unexpected: Exception('project_update 18 (error) encountered an error (rc=None), please see task stdout for details.',)
|
AttributeError
|
def ws_connect(message):
if not message.user.is_authenticated():
logger.error("Request user is not authenticated to use websocket.")
message.reply_channel.send({"close": True})
return
else:
message.reply_channel.send({"accept": True})
data = urlparse.parse_qs(message.content["query_string"])
inventory_id = parse_inventory_id(data)
topology_ids = list(
TopologyInventory.objects.filter(inventory_id=inventory_id).values_list(
"pk", flat=True
)
)
topology_id = None
if len(topology_ids) > 0:
topology_id = topology_ids[0]
if topology_id is not None:
topology = Topology.objects.get(pk=topology_id)
else:
topology = Topology(name="topology", scale=1.0, panX=0, panY=0)
topology.save()
TopologyInventory(inventory_id=inventory_id, topology_id=topology.pk).save()
topology_id = topology.pk
message.channel_session["topology_id"] = topology_id
channels.Group("topology-%s" % topology_id).add(message.reply_channel)
client = Client()
client.save()
message.channel_session["client_id"] = client.pk
channels.Group("client-%s" % client.pk).add(message.reply_channel)
message.reply_channel.send({"text": json.dumps(["id", client.pk])})
message.reply_channel.send({"text": json.dumps(["topology_id", topology_id])})
topology_data = transform_dict(
dict(
id="topology_id",
name="name",
panX="panX",
panY="panY",
scale="scale",
link_id_seq="link_id_seq",
device_id_seq="device_id_seq",
),
topology.__dict__,
)
message.reply_channel.send({"text": json.dumps(["Topology", topology_data])})
send_snapshot(message.reply_channel, topology_id)
|
def ws_connect(message):
if not message.user.is_authenticated():
logger.error("Request user is not authenticated to use websocket.")
message.reply_channel.send({"close": True})
return
else:
message.reply_channel.send({"accept": True})
data = urlparse.parse_qs(message.content["query_string"])
inventory_id = parse_inventory_id(data)
topology_ids = list(
TopologyInventory.objects.filter(inventory_id=inventory_id).values_list(
"pk", flat=True
)
)
topology_id = None
if len(topology_ids) > 0:
topology_id = topology_ids[0]
if topology_id is not None:
topology = Topology.objects.get(pk=topology_id)
else:
topology = Topology(name="topology", scale=1.0, panX=0, panY=0)
topology.save()
TopologyInventory(inventory_id=inventory_id, topology_id=topology.pk).save()
topology_id = topology.pk
message.channel_session["topology_id"] = topology_id
Group("topology-%s" % topology_id).add(message.reply_channel)
client = Client()
client.save()
message.channel_session["client_id"] = client.pk
Group("client-%s" % client.pk).add(message.reply_channel)
message.reply_channel.send({"text": json.dumps(["id", client.pk])})
message.reply_channel.send({"text": json.dumps(["topology_id", topology_id])})
topology_data = transform_dict(
dict(
id="topology_id",
name="name",
panX="panX",
panY="panY",
scale="scale",
link_id_seq="link_id_seq",
device_id_seq="device_id_seq",
),
topology.__dict__,
)
message.reply_channel.send({"text": json.dumps(["Topology", topology_data])})
send_snapshot(message.reply_channel, topology_id)
|
https://github.com/ansible/awx/issues/1257
|
[2018-02-15 10:31:51,202: DEBUG/MainProcess] TaskPool: Apply <function _fast_trace_task at 0x1d71c80> (args:('awx.main.tasks.run_project_update', '69545664-1752-4c48-a46c-41b7976d5bd3', {'origin': 'gen280@awx', 'lang': 'py', 'task': 'awx.main.tasks.run_project_update', 'group': None, 'root_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', u'delivery_info': {u'priority': None, u'redelivered': False, u'routing_key': u'tower', u'exchange': u''}, 'expires': None, u'correlation_id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'retries': 0, 'timelimit': [None, None], 'argsrepr': '[18]', 'eta': None, 'parent_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', u'reply_to': 'aef45ebd-7309-34e3-8669-7f378d04708f', 'id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'kwargsrepr': '{}'}, u'[[18], {}, {"chord": null, "callbacks": [{"chord_size": null, "task": "awx.main.tasks.handle_work_success", "subtask_type": null, "kwargs": {"task_actual": {"type": "project_update", "id": 18}}, "args": [], "options": {}, "immutable": false}], "errbacks": [{"chord_size": null, "task": "awx.main.tasks.handle_work_error", "subtask_type": null, "kwargs":... kwargs:{})
[2018-02-15 10:31:51,205: DEBUG/MainProcess] Task accepted: awx.main.tasks.run_project_update[69545664-1752-4c48-a46c-41b7976d5bd3] pid:279
[2018-02-15 10:31:51,221: INFO/ForkPoolWorker-50] Task awx.main.scheduler.tasks.run_job_launch[e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a] succeeded in 0.280421956s: None
2018-02-15 10:31:52,001 ERROR awx.main.tasks project_update 18 (running) Post run hook errored.
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/awx/main/tasks.py", line 997, in run
self.post_run_hook(instance, status, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/tasks.py", line 1647, in post_run_hook
fd = open(self.revision_path, 'r')
AttributeError: 'RunProjectUpdate' object has no attribute 'revision_path'
2018-02-15 10:31:52,089 DEBUG awx.main.tasks Executing error task id <Context: {'origin': 'gen280@awx', u'args': [18], u'chain': None, 'root_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', 'expires': None, u'is_eager': False, u'correlation_id': '69545664-1752-4c48-a46c-41b7976d5bd3', u'chord': None, u'reply_to': 'aef45ebd-7309-34e3-8669-7f378d04708f', '_children': [], 'id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'kwargsrepr': '{}', 'lang': 'py', 'retries': 0, 'task': 'awx.main.tasks.run_project_update', 'group': None, 'timelimit': [None, None], u'delivery_info': {u'priority': None, u'redelivered': False, u'routing_key': u'tower', u'exchange': u''}, u'hostname': u'celery@awx', 'called_directly': False, 'parent_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', 'argsrepr': '[18]', u'errbacks': [{u'chord_size': None, u'task': u'awx.main.tasks.handle_work_error', u'args': [], u'immutable': False, u'subtask_type': None, u'kwargs': {u'subtasks': [{u'type': u'project_update', u'id': 18}]}, u'options': {}}], u'callbacks': [{u'chord_size': None, u'task': u'awx.main.tasks.handle_work_success', u'args': [], u'immutable': False, u'subtask_type': None, u'kwargs': {u'task_actual': {u'type': u'project_update', u'id': 18}}, u'options': {}}], u'kwargs': {}, 'eta': None, '_protected': 1}>, subtasks: [{u'type': u'project_update', u'id': 18}]
2018-02-15 10:31:52,180 WARNING awx.main.tasks project_update 18 (error) encountered an error (rc=None), please see task stdout for details.
[2018-02-15 10:31:52,181: ERROR/ForkPoolWorker-49] Task awx.main.tasks.run_project_update[69545664-1752-4c48-a46c-41b7976d5bd3] raised unexpected: Exception('project_update 18 (error) encountered an error (rc=None), please see task stdout for details.',)
|
AttributeError
|
def ws_message(message):
# Send to all clients editing the topology
channels.Group("topology-%s" % message.channel_session["topology_id"]).send(
{"text": message["text"]}
)
# Send to networking_events handler
networking_events_dispatcher.handle(
{
"text": message["text"],
"topology": message.channel_session["topology_id"],
"client": message.channel_session["client_id"],
}
)
|
def ws_message(message):
# Send to all clients editing the topology
Group("topology-%s" % message.channel_session["topology_id"]).send(
{"text": message["text"]}
)
# Send to networking_events handler
networking_events_dispatcher.handle(
{
"text": message["text"],
"topology": message.channel_session["topology_id"],
"client": message.channel_session["client_id"],
}
)
|
https://github.com/ansible/awx/issues/1257
|
[2018-02-15 10:31:51,202: DEBUG/MainProcess] TaskPool: Apply <function _fast_trace_task at 0x1d71c80> (args:('awx.main.tasks.run_project_update', '69545664-1752-4c48-a46c-41b7976d5bd3', {'origin': 'gen280@awx', 'lang': 'py', 'task': 'awx.main.tasks.run_project_update', 'group': None, 'root_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', u'delivery_info': {u'priority': None, u'redelivered': False, u'routing_key': u'tower', u'exchange': u''}, 'expires': None, u'correlation_id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'retries': 0, 'timelimit': [None, None], 'argsrepr': '[18]', 'eta': None, 'parent_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', u'reply_to': 'aef45ebd-7309-34e3-8669-7f378d04708f', 'id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'kwargsrepr': '{}'}, u'[[18], {}, {"chord": null, "callbacks": [{"chord_size": null, "task": "awx.main.tasks.handle_work_success", "subtask_type": null, "kwargs": {"task_actual": {"type": "project_update", "id": 18}}, "args": [], "options": {}, "immutable": false}], "errbacks": [{"chord_size": null, "task": "awx.main.tasks.handle_work_error", "subtask_type": null, "kwargs":... kwargs:{})
[2018-02-15 10:31:51,205: DEBUG/MainProcess] Task accepted: awx.main.tasks.run_project_update[69545664-1752-4c48-a46c-41b7976d5bd3] pid:279
[2018-02-15 10:31:51,221: INFO/ForkPoolWorker-50] Task awx.main.scheduler.tasks.run_job_launch[e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a] succeeded in 0.280421956s: None
2018-02-15 10:31:52,001 ERROR awx.main.tasks project_update 18 (running) Post run hook errored.
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/awx/main/tasks.py", line 997, in run
self.post_run_hook(instance, status, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/tasks.py", line 1647, in post_run_hook
fd = open(self.revision_path, 'r')
AttributeError: 'RunProjectUpdate' object has no attribute 'revision_path'
2018-02-15 10:31:52,089 DEBUG awx.main.tasks Executing error task id <Context: {'origin': 'gen280@awx', u'args': [18], u'chain': None, 'root_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', 'expires': None, u'is_eager': False, u'correlation_id': '69545664-1752-4c48-a46c-41b7976d5bd3', u'chord': None, u'reply_to': 'aef45ebd-7309-34e3-8669-7f378d04708f', '_children': [], 'id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'kwargsrepr': '{}', 'lang': 'py', 'retries': 0, 'task': 'awx.main.tasks.run_project_update', 'group': None, 'timelimit': [None, None], u'delivery_info': {u'priority': None, u'redelivered': False, u'routing_key': u'tower', u'exchange': u''}, u'hostname': u'celery@awx', 'called_directly': False, 'parent_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', 'argsrepr': '[18]', u'errbacks': [{u'chord_size': None, u'task': u'awx.main.tasks.handle_work_error', u'args': [], u'immutable': False, u'subtask_type': None, u'kwargs': {u'subtasks': [{u'type': u'project_update', u'id': 18}]}, u'options': {}}], u'callbacks': [{u'chord_size': None, u'task': u'awx.main.tasks.handle_work_success', u'args': [], u'immutable': False, u'subtask_type': None, u'kwargs': {u'task_actual': {u'type': u'project_update', u'id': 18}}, u'options': {}}], u'kwargs': {}, 'eta': None, '_protected': 1}>, subtasks: [{u'type': u'project_update', u'id': 18}]
2018-02-15 10:31:52,180 WARNING awx.main.tasks project_update 18 (error) encountered an error (rc=None), please see task stdout for details.
[2018-02-15 10:31:52,181: ERROR/ForkPoolWorker-49] Task awx.main.tasks.run_project_update[69545664-1752-4c48-a46c-41b7976d5bd3] raised unexpected: Exception('project_update 18 (error) encountered an error (rc=None), please see task stdout for details.',)
|
AttributeError
|
def ws_disconnect(message):
if "topology_id" in message.channel_session:
channels.Group("topology-%s" % message.channel_session["topology_id"]).discard(
message.reply_channel
)
|
def ws_disconnect(message):
if "topology_id" in message.channel_session:
Group("topology-%s" % message.channel_session["topology_id"]).discard(
message.reply_channel
)
|
https://github.com/ansible/awx/issues/1257
|
[2018-02-15 10:31:51,202: DEBUG/MainProcess] TaskPool: Apply <function _fast_trace_task at 0x1d71c80> (args:('awx.main.tasks.run_project_update', '69545664-1752-4c48-a46c-41b7976d5bd3', {'origin': 'gen280@awx', 'lang': 'py', 'task': 'awx.main.tasks.run_project_update', 'group': None, 'root_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', u'delivery_info': {u'priority': None, u'redelivered': False, u'routing_key': u'tower', u'exchange': u''}, 'expires': None, u'correlation_id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'retries': 0, 'timelimit': [None, None], 'argsrepr': '[18]', 'eta': None, 'parent_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', u'reply_to': 'aef45ebd-7309-34e3-8669-7f378d04708f', 'id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'kwargsrepr': '{}'}, u'[[18], {}, {"chord": null, "callbacks": [{"chord_size": null, "task": "awx.main.tasks.handle_work_success", "subtask_type": null, "kwargs": {"task_actual": {"type": "project_update", "id": 18}}, "args": [], "options": {}, "immutable": false}], "errbacks": [{"chord_size": null, "task": "awx.main.tasks.handle_work_error", "subtask_type": null, "kwargs":... kwargs:{})
[2018-02-15 10:31:51,205: DEBUG/MainProcess] Task accepted: awx.main.tasks.run_project_update[69545664-1752-4c48-a46c-41b7976d5bd3] pid:279
[2018-02-15 10:31:51,221: INFO/ForkPoolWorker-50] Task awx.main.scheduler.tasks.run_job_launch[e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a] succeeded in 0.280421956s: None
2018-02-15 10:31:52,001 ERROR awx.main.tasks project_update 18 (running) Post run hook errored.
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/awx/main/tasks.py", line 997, in run
self.post_run_hook(instance, status, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/tasks.py", line 1647, in post_run_hook
fd = open(self.revision_path, 'r')
AttributeError: 'RunProjectUpdate' object has no attribute 'revision_path'
2018-02-15 10:31:52,089 DEBUG awx.main.tasks Executing error task id <Context: {'origin': 'gen280@awx', u'args': [18], u'chain': None, 'root_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', 'expires': None, u'is_eager': False, u'correlation_id': '69545664-1752-4c48-a46c-41b7976d5bd3', u'chord': None, u'reply_to': 'aef45ebd-7309-34e3-8669-7f378d04708f', '_children': [], 'id': '69545664-1752-4c48-a46c-41b7976d5bd3', 'kwargsrepr': '{}', 'lang': 'py', 'retries': 0, 'task': 'awx.main.tasks.run_project_update', 'group': None, 'timelimit': [None, None], u'delivery_info': {u'priority': None, u'redelivered': False, u'routing_key': u'tower', u'exchange': u''}, u'hostname': u'celery@awx', 'called_directly': False, 'parent_id': 'e22c4be1-8d1a-4193-a5e8-03b8c6b25f3a', 'argsrepr': '[18]', u'errbacks': [{u'chord_size': None, u'task': u'awx.main.tasks.handle_work_error', u'args': [], u'immutable': False, u'subtask_type': None, u'kwargs': {u'subtasks': [{u'type': u'project_update', u'id': 18}]}, u'options': {}}], u'callbacks': [{u'chord_size': None, u'task': u'awx.main.tasks.handle_work_success', u'args': [], u'immutable': False, u'subtask_type': None, u'kwargs': {u'task_actual': {u'type': u'project_update', u'id': 18}}, u'options': {}}], u'kwargs': {}, 'eta': None, '_protected': 1}>, subtasks: [{u'type': u'project_update', u'id': 18}]
2018-02-15 10:31:52,180 WARNING awx.main.tasks project_update 18 (error) encountered an error (rc=None), please see task stdout for details.
[2018-02-15 10:31:52,181: ERROR/ForkPoolWorker-49] Task awx.main.tasks.run_project_update[69545664-1752-4c48-a46c-41b7976d5bd3] raised unexpected: Exception('project_update 18 (error) encountered an error (rc=None), please see task stdout for details.',)
|
AttributeError
|
def send_notifications(notification_list, job_id=None):
if not isinstance(notification_list, list):
raise TypeError("notification_list should be of type list")
if job_id is not None:
job_actual = UnifiedJob.objects.get(id=job_id)
notifications = Notification.objects.filter(id__in=notification_list)
if job_id is not None:
job_actual.notifications.add(*notifications)
for notification in notifications:
update_fields = ["status", "notifications_sent"]
try:
sent = notification.notification_template.send(
notification.subject, notification.body
)
notification.status = "successful"
notification.notifications_sent = sent
except Exception as e:
logger.error(six.text_type("Send Notification Failed {}").format(e))
notification.status = "failed"
notification.error = smart_str(e)
update_fields.append("error")
finally:
try:
notification.save(update_fields=update_fields)
except Exception as e:
logger.exception(
six.text_type("Error saving notification {} result.").format(
notification.id
)
)
|
def send_notifications(notification_list, job_id=None):
if not isinstance(notification_list, list):
raise TypeError("notification_list should be of type list")
if job_id is not None:
job_actual = UnifiedJob.objects.get(id=job_id)
notifications = Notification.objects.filter(id__in=notification_list)
if job_id is not None:
job_actual.notifications.add(*notifications)
for notification in notifications:
update_fields = ["status", "notifications_sent"]
try:
sent = notification.notification_template.send(
notification.subject, notification.body
)
notification.status = "successful"
notification.notifications_sent = sent
except Exception as e:
logger.error(six.text_type("Send Notification Failed {}").format(e))
notification.status = "failed"
notification.error = smart_str(e)
update_fields.append("error")
finally:
notification.save(update_fields=update_fields)
|
https://github.com/ansible/awx/issues/1817
|
xxx@dddd-01:~$ docker logs awx_task_1
Using /etc/ansible/ansible.cfg as config file
...
Traceback (most recent call last):
File "/usr/bin/awx-manage", line 9, in <module>
load_entry_point('awx==1.0.6.0', 'console_scripts', 'awx-manage')()
File "/usr/lib/python2.7/site-packages/awx/__init__.py", line 109, in manage
execute_from_command_line(sys.argv)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/management/__init__.py", line 364, in execute_from_command_line
Operations to perform:
Apply all migrations: auth, conf, contenttypes, djcelery, main, network_ui, oauth2_provider, sessions, sites, social_django, sso, taggit
utility.execute()
...
ValueError: The field oauth2_provider.AccessToken.application was declared with a lazy reference to 'main.oauth2application', but app 'main' doesn't provide model 'oauth2application'.
The field oauth2_provider.Grant.application was declared with a lazy reference to 'main.oauth2application', but app 'main' doesn't provide model 'oauth2application'.
The field oauth2_provider.RefreshToken.access_token was declared with a lazy reference to 'main.oauth2accesstoken', but app 'main' doesn't provide model 'oauth2accesstoken'.
The field oauth2_provider.RefreshToken.application was declared with a lazy reference to 'main.oauth2application', but app 'main' doesn't provide model 'oauth2application'.
...
django.db.utils.IntegrityError: duplicate key value violates unique constraint "auth_user_username_key"
DETAIL: Key (username)=(admin) already exists.
Instance already registered awx
Instance Group already registered tower
...
2018-04-27 17:58:18,281 CRIT Supervisor is running as root. Privileges were not dropped because no user is specified in the config file. If you intend to run as root, you can set user=root in the config file to avoid this message.
...
|
ValueError
|
def dispatch(self, request, *args, **kwargs):
response = super(CompleteView, self).dispatch(request, *args, **kwargs)
if self.request.user and self.request.user.is_authenticated():
logger.info(smart_text("User {} logged in".format(self.request.user.username)))
response.set_cookie("userLoggedIn", "true")
current_user = UserSerializer(self.request.user)
current_user = JSONRenderer().render(current_user.data)
current_user = urllib.quote("%s" % current_user, "")
response.set_cookie("current_user", current_user)
return response
|
def dispatch(self, request, *args, **kwargs):
response = super(CompleteView, self).dispatch(request, *args, **kwargs)
if self.request.user and self.request.user.is_authenticated():
auth.login(self.request, self.request.user)
logger.info(smart_text("User {} logged in".format(self.request.user.username)))
return response
|
https://github.com/ansible/awx/issues/1418
|
2018/03/01 21:33:27 [warn] 31#0: *24 upstream server temporarily disabled while reading response header from upstream, client: 172.18.0.4, server: _, request: "GET /sso/complete/ HTTP/1.1", upstream: "uwsgi://127.0.0.1:8050", host: "awx.example.com", referrer: "https://logon.example.com/adfs/ls?SAMLRequest=<RESPONSE>&RelayState=IDP"
new_obj = func(obj, *arg_vals)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/templatetags/rest_framework.py", line 244, in items
return value.items()
AttributeError: 'NoneType' object has no attribute 'items'
[pid: 42|app: 0|req: 2/8] 172.18.0.4 () {56 vars in 1674 bytes} [Thu Mar 1 21:33:26 2018] GET /sso/complete/ => generated 0 bytes in 271 msecs (HTTP/1.1 500) 0 headers in 0 bytes (0 switches on core 0)
2018-03-01 21:33:27,330 ERROR django.request Internal Server Error: /sso/complete/
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 249, in _legacy_get_response
response = self._get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/utils/decorators.py", line 185, in inner
return func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/generic/base.py", line 68, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/sso/views.py", line 42, in dispatch
auth.login(self.request, self.request.user)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/contrib/auth/__init__.py", line 149, in login
'You have multiple authentication backends configured and '
ValueError: You have multiple authentication backends configured and therefore must provide the `backend` argument or set the `backend` attribute on the user.
2018-03-01 21:33:27,330 ERROR django.request Internal Server Error: /sso/complete/
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 249, in _legacy_get_response
response = self._get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/utils/decorators.py", line 185, in inner
return func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/generic/base.py", line 68, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/sso/views.py", line 42, in dispatch
auth.login(self.request, self.request.user)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/contrib/auth/__init__.py", line 149, in login
'You have multiple authentication backends configured and '
ValueError: You have multiple authentication backends configured and therefore must provide the `backend` argument or set the `backend` attribute on the user.
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/wsgi.py", line 157, in __call__
response = self.get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 124, in get_response
response = self._middleware_chain(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/exception.py", line 43, in inner
response = response_for_exception(request, exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/exception.py", line 93, in response_for_exception
response = handle_uncaught_exception(request, get_resolver(get_urlconf()), sys.exc_info())
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/exception.py", line 143, in handle_uncaught_exception
return callback(request, **param_dict)
File "/usr/lib/python2.7/site-packages/awx/main/views.py", line 88, in handle_500
return handle_error(request, 500, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/views.py", line 56, in handle_error
return render(request, 'error.html', kwargs, status=status)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/shortcuts.py", line 30, in render
content = loader.render_to_string(template_name, context, request, using=using)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/loader.py", line 68, in render_to_string
return template.render(context, request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/backends/django.py", line 66, in render
return self.template.render(context)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/base.py", line 207, in render
return self._render(context)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/base.py", line 199, in _render
return self.nodelist.render(context)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/base.py", line 990, in render
bit = node.render_annotated(context)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/base.py", line 957, in render_annotated
return self.render(context)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/loader_tags.py", line 177, in render
return compiled_parent._render(context)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/base.py", line 199, in _render
return self.nodelist.render(context)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/base.py", line 990, in render
bit = node.render_annotated(context)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/base.py", line 957, in render_annotated
return self.render(context)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/loader_tags.py", line 177, in render
return compiled_parent._render(context)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/base.py", line 199, in _render
return self.nodelist.render(context)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/base.py", line 990, in render
bit = node.render_annotated(context)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/base.py", line 957, in render_annotated
return self.render(context)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/loader_tags.py", line 72, in render
result = block.nodelist.render(context)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/base.py", line 990, in render
bit = node.render_annotated(context)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/base.py", line 957, in render_annotated
return self.render(context)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/loader_tags.py", line 72, in render
result = block.nodelist.render(context)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/base.py", line 990, in render
bit = node.render_annotated(context)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/base.py", line 957, in render_annotated
return self.render(context)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/defaulttags.py", line 40, in render
output = self.nodelist.render(context)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/base.py", line 990, in render
bit = node.render_annotated(context)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/base.py", line 957, in render_annotated
return self.render(context)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/defaulttags.py", line 166, in render
values = self.sequence.resolve(context, True)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/template/base.py", line 736, in resolve
new_obj = func(obj, *arg_vals)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/templatetags/rest_framework.py", line 244, in items
return value.items()
AttributeError: 'NoneType' object has no attribute 'items'
|
AttributeError
|
def can_change(self, obj, data):
# Checks for admin change permission on inventory.
if obj and obj.inventory:
return self.user.can_access(
Inventory, "change", obj.inventory, None
) and self.check_related(
"source_project", Project, data, obj=obj, role_field="use_role"
)
# Can't change inventory sources attached to only the inventory, since
# these are created automatically from the management command.
else:
return False
|
def can_change(self, obj, data):
# Checks for admin change permission on inventory.
if obj and obj.inventory:
return (
self.user.can_access(Inventory, "change", obj.inventory, None)
and self.check_related(
"credential", Credential, data, obj=obj, role_field="use_role"
)
and self.check_related(
"source_project", Project, data, obj=obj, role_field="use_role"
)
)
# Can't change inventory sources attached to only the inventory, since
# these are created automatically from the management command.
else:
return False
|
https://github.com/ansible/awx/issues/1664
|
2018-03-23 15:38:36,900 ERROR django.request Internal Server Error: /api/v2/inventory_sources/25/
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/usr/lib/python2.7/site-packages/awx/wsgi.py", line 65, in _legacy_get_response
return super(AWXWSGIHandler, self)._legacy_get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 249, in _legacy_get_response
response = self._get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/utils/decorators.py", line 185, in inner
return func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
return view_func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/generic/base.py", line 68, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/api/generics.py", line 284, in dispatch
return super(APIView, self).dispatch(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 494, in dispatch
response = self.handle_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 454, in handle_exception
self.raise_uncaught_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 491, in dispatch
response = handler(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/generics.py", line 257, in put
return self.update(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/api/generics.py", line 741, in update
return super(RetrieveUpdateAPIView, self).update(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/mixins.py", line 67, in update
instance = self.get_object()
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/generics.py", line 101, in get_object
self.check_object_permissions(self.request, obj)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 344, in check_object_permissions
if not permission.has_object_permission(request, self, obj):
File "/usr/lib/python2.7/site-packages/awx/api/permissions.py", line 138, in has_object_permission
return self.has_permission(request, view, obj)
File "/usr/lib/python2.7/site-packages/awx/api/permissions.py", line 129, in has_permission
response = self.check_permissions(request, view, obj)
File "/usr/lib/python2.7/site-packages/awx/api/permissions.py", line 118, in check_permissions
result = check_method and check_method(request, view, obj)
File "/usr/lib/python2.7/site-packages/awx/api/permissions.py", line 82, in check_put_permissions
request.data)
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 117, in check_user_access
result = access_method(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 157, in wrapper
return func(self, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 931, in can_change
self.check_related('credential', Credential, data, obj=obj, role_field='use_role') and
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 290, in check_related
if current and (changed or mandatory) and (not user_has_resource_access(current)):
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 283, in user_has_resource_access
access_method_type = {'admin_role': 'change', 'execute_role': 'start'}[role_field]
KeyError: 'use_role'
|
KeyError
|
def copy_model_obj(
old_parent, new_parent, model, obj, creater, copy_name="", create_kwargs=None
):
fields_to_preserve = set(getattr(model, "FIELDS_TO_PRESERVE_AT_COPY", []))
fields_to_discard = set(getattr(model, "FIELDS_TO_DISCARD_AT_COPY", []))
m2m_to_preserve = {}
o2m_to_preserve = {}
create_kwargs = create_kwargs or {}
for field_name in fields_to_discard:
create_kwargs.pop(field_name, None)
for field in model._meta.get_fields():
try:
field_val = getattr(obj, field.name)
except AttributeError:
continue
# Adjust copy blacklist fields here.
if (
field.name in fields_to_discard
or field.name
in [
"id",
"pk",
"polymorphic_ctype",
"unifiedjobtemplate_ptr",
"created_by",
"modified_by",
]
or field.name.endswith("_role")
):
create_kwargs.pop(field.name, None)
continue
if field.one_to_many:
if field.name in fields_to_preserve:
o2m_to_preserve[field.name] = field_val
elif field.many_to_many:
if field.name in fields_to_preserve and not old_parent:
m2m_to_preserve[field.name] = field_val
elif field.many_to_one and not field_val:
create_kwargs.pop(field.name, None)
elif field.many_to_one and field_val == old_parent:
create_kwargs[field.name] = new_parent
elif field.name == "name" and not old_parent:
create_kwargs[field.name] = copy_name or field_val + " copy"
elif field.name in fields_to_preserve:
create_kwargs[field.name] = CopyAPIView._decrypt_model_field_if_needed(
obj, field.name, field_val
)
new_obj = model.objects.create(**create_kwargs)
logger.debug(
six.text_type("Deep copy: Created new object {}({})").format(new_obj, model)
)
# Need to save separatedly because Djang-crum get_current_user would
# not work properly in non-request-response-cycle context.
new_obj.created_by = creater
new_obj.save()
for m2m in m2m_to_preserve:
for related_obj in m2m_to_preserve[m2m].all():
getattr(new_obj, m2m).add(related_obj)
if not old_parent:
sub_objects = []
for o2m in o2m_to_preserve:
for sub_obj in o2m_to_preserve[o2m].all():
sub_model = type(sub_obj)
sub_objects.append(
(sub_model.__module__, sub_model.__name__, sub_obj.pk)
)
return new_obj, sub_objects
ret = {obj: new_obj}
for o2m in o2m_to_preserve:
for sub_obj in o2m_to_preserve[o2m].all():
ret.update(
CopyAPIView.copy_model_obj(
obj, new_obj, type(sub_obj), sub_obj, creater
)
)
return ret
|
def copy_model_obj(
old_parent, new_parent, model, obj, creater, copy_name="", create_kwargs=None
):
fields_to_preserve = set(getattr(model, "FIELDS_TO_PRESERVE_AT_COPY", []))
fields_to_discard = set(getattr(model, "FIELDS_TO_DISCARD_AT_COPY", []))
m2m_to_preserve = {}
o2m_to_preserve = {}
create_kwargs = create_kwargs or {}
for field_name in fields_to_discard:
create_kwargs.pop(field_name, None)
for field in model._meta.get_fields():
try:
field_val = getattr(obj, field.name)
except AttributeError:
continue
# Adjust copy blacklist fields here.
if (
field.name in fields_to_discard
or field.name
in [
"id",
"pk",
"polymorphic_ctype",
"unifiedjobtemplate_ptr",
"created_by",
"modified_by",
]
or field.name.endswith("_role")
):
create_kwargs.pop(field.name, None)
continue
if field.one_to_many:
if field.name in fields_to_preserve:
o2m_to_preserve[field.name] = field_val
elif field.many_to_many:
if field.name in fields_to_preserve and not old_parent:
m2m_to_preserve[field.name] = field_val
elif field.many_to_one and not field_val:
create_kwargs.pop(field.name, None)
elif field.many_to_one and field_val == old_parent:
create_kwargs[field.name] = new_parent
elif field.name == "name" and not old_parent:
create_kwargs[field.name] = copy_name or field_val + " copy"
elif field.name in fields_to_preserve:
create_kwargs[field.name] = CopyAPIView._decrypt_model_field_if_needed(
obj, field.name, field_val
)
new_obj = model.objects.create(**create_kwargs)
# Need to save separatedly because Djang-crum get_current_user would
# not work properly in non-request-response-cycle context.
new_obj.created_by = creater
new_obj.save()
for m2m in m2m_to_preserve:
for related_obj in m2m_to_preserve[m2m].all():
getattr(new_obj, m2m).add(related_obj)
if not old_parent:
sub_objects = []
for o2m in o2m_to_preserve:
for sub_obj in o2m_to_preserve[o2m].all():
sub_model = type(sub_obj)
sub_objects.append(
(sub_model.__module__, sub_model.__name__, sub_obj.pk)
)
return new_obj, sub_objects
ret = {obj: new_obj}
for o2m in o2m_to_preserve:
for sub_obj in o2m_to_preserve[o2m].all():
ret.update(
CopyAPIView.copy_model_obj(
obj, new_obj, type(sub_obj), sub_obj, creater
)
)
return ret
|
https://github.com/ansible/awx/issues/1664
|
2018-03-23 15:38:36,900 ERROR django.request Internal Server Error: /api/v2/inventory_sources/25/
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/usr/lib/python2.7/site-packages/awx/wsgi.py", line 65, in _legacy_get_response
return super(AWXWSGIHandler, self)._legacy_get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 249, in _legacy_get_response
response = self._get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/utils/decorators.py", line 185, in inner
return func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
return view_func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/generic/base.py", line 68, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/api/generics.py", line 284, in dispatch
return super(APIView, self).dispatch(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 494, in dispatch
response = self.handle_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 454, in handle_exception
self.raise_uncaught_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 491, in dispatch
response = handler(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/generics.py", line 257, in put
return self.update(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/api/generics.py", line 741, in update
return super(RetrieveUpdateAPIView, self).update(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/mixins.py", line 67, in update
instance = self.get_object()
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/generics.py", line 101, in get_object
self.check_object_permissions(self.request, obj)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 344, in check_object_permissions
if not permission.has_object_permission(request, self, obj):
File "/usr/lib/python2.7/site-packages/awx/api/permissions.py", line 138, in has_object_permission
return self.has_permission(request, view, obj)
File "/usr/lib/python2.7/site-packages/awx/api/permissions.py", line 129, in has_permission
response = self.check_permissions(request, view, obj)
File "/usr/lib/python2.7/site-packages/awx/api/permissions.py", line 118, in check_permissions
result = check_method and check_method(request, view, obj)
File "/usr/lib/python2.7/site-packages/awx/api/permissions.py", line 82, in check_put_permissions
request.data)
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 117, in check_user_access
result = access_method(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 157, in wrapper
return func(self, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 931, in can_change
self.check_related('credential', Credential, data, obj=obj, role_field='use_role') and
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 290, in check_related
if current and (changed or mandatory) and (not user_has_resource_access(current)):
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 283, in user_has_resource_access
access_method_type = {'admin_role': 'change', 'execute_role': 'start'}[role_field]
KeyError: 'use_role'
|
KeyError
|
def deep_copy_permission_check_func(user, new_objs):
for obj in new_objs:
for field_name in obj._get_workflow_job_field_names():
item = getattr(obj, field_name, None)
if item is None:
continue
elif field_name in ["inventory"]:
if not user.can_access(item.__class__, "use", item):
setattr(obj, field_name, None)
elif field_name in ["unified_job_template"]:
if not user.can_access(
item.__class__, "start", item, validate_license=False
):
setattr(obj, field_name, None)
elif field_name in ["credentials"]:
for cred in item.all():
if not user.can_access(cred.__class__, "use", cred):
logger.debug(
six.text_type(
"Deep copy: removing {} from relationship due to permissions"
).format(cred)
)
item.remove(cred.pk)
obj.save()
|
def deep_copy_permission_check_func(user, new_objs):
for obj in new_objs:
for field_name in obj._get_workflow_job_field_names():
item = getattr(obj, field_name, None)
if item is None:
continue
if field_name in ["inventory"]:
if not user.can_access(item.__class__, "use", item):
setattr(obj, field_name, None)
if field_name in ["unified_job_template"]:
if not user.can_access(
item.__class__, "start", item, validate_license=False
):
setattr(obj, field_name, None)
obj.save()
|
https://github.com/ansible/awx/issues/1664
|
2018-03-23 15:38:36,900 ERROR django.request Internal Server Error: /api/v2/inventory_sources/25/
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/usr/lib/python2.7/site-packages/awx/wsgi.py", line 65, in _legacy_get_response
return super(AWXWSGIHandler, self)._legacy_get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 249, in _legacy_get_response
response = self._get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/utils/decorators.py", line 185, in inner
return func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
return view_func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/generic/base.py", line 68, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/api/generics.py", line 284, in dispatch
return super(APIView, self).dispatch(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 494, in dispatch
response = self.handle_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 454, in handle_exception
self.raise_uncaught_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 491, in dispatch
response = handler(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/generics.py", line 257, in put
return self.update(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/api/generics.py", line 741, in update
return super(RetrieveUpdateAPIView, self).update(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/mixins.py", line 67, in update
instance = self.get_object()
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/generics.py", line 101, in get_object
self.check_object_permissions(self.request, obj)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 344, in check_object_permissions
if not permission.has_object_permission(request, self, obj):
File "/usr/lib/python2.7/site-packages/awx/api/permissions.py", line 138, in has_object_permission
return self.has_permission(request, view, obj)
File "/usr/lib/python2.7/site-packages/awx/api/permissions.py", line 129, in has_permission
response = self.check_permissions(request, view, obj)
File "/usr/lib/python2.7/site-packages/awx/api/permissions.py", line 118, in check_permissions
result = check_method and check_method(request, view, obj)
File "/usr/lib/python2.7/site-packages/awx/api/permissions.py", line 82, in check_put_permissions
request.data)
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 117, in check_user_access
result = access_method(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 157, in wrapper
return func(self, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 931, in can_change
self.check_related('credential', Credential, data, obj=obj, role_field='use_role') and
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 290, in check_related
if current and (changed or mandatory) and (not user_has_resource_access(current)):
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 283, in user_has_resource_access
access_method_type = {'admin_role': 'change', 'execute_role': 'start'}[role_field]
KeyError: 'use_role'
|
KeyError
|
def can_copy(self, obj):
if self.save_messages:
missing_ujt = []
missing_credentials = []
missing_inventories = []
qs = obj.workflow_job_template_nodes
qs = qs.prefetch_related(
"unified_job_template", "inventory__use_role", "credentials__use_role"
)
for node in qs.all():
node_errors = {}
if node.inventory and self.user not in node.inventory.use_role:
missing_inventories.append(node.inventory.name)
for cred in node.credentials.all():
if self.user not in cred.use_role:
missing_credentials.append(cred.name)
ujt = node.unified_job_template
if ujt and not self.user.can_access(
UnifiedJobTemplate, "start", ujt, validate_license=False
):
missing_ujt.append(ujt.name)
if node_errors:
wfjt_errors[node.id] = node_errors
if missing_ujt:
self.messages["templates_unable_to_copy"] = missing_ujt
if missing_credentials:
self.messages["credentials_unable_to_copy"] = missing_credentials
if missing_inventories:
self.messages["inventories_unable_to_copy"] = missing_inventories
return self.check_related(
"organization",
Organization,
{"reference_obj": obj},
role_field="workflow_admin_role",
mandatory=True,
)
|
def can_copy(self, obj):
if self.save_messages:
missing_ujt = []
missing_credentials = []
missing_inventories = []
qs = obj.workflow_job_template_nodes
qs = qs.prefetch_related(
"unified_job_template", "inventory__use_role", "credentials__use_role"
)
for node in qs.all():
node_errors = {}
if node.inventory and self.user not in node.inventory.use_role:
missing_inventories.append(node.inventory.name)
for cred in node.credentials.all():
if self.user not in cred.use_role:
missing_credentials.append(node.credential.name)
ujt = node.unified_job_template
if ujt and not self.user.can_access(
UnifiedJobTemplate, "start", ujt, validate_license=False
):
missing_ujt.append(ujt.name)
if node_errors:
wfjt_errors[node.id] = node_errors
if missing_ujt:
self.messages["templates_unable_to_copy"] = missing_ujt
if missing_credentials:
self.messages["credentials_unable_to_copy"] = missing_credentials
if missing_inventories:
self.messages["inventories_unable_to_copy"] = missing_inventories
return self.check_related(
"organization",
Organization,
{"reference_obj": obj},
role_field="workflow_admin_role",
mandatory=True,
)
|
https://github.com/ansible/awx/issues/1664
|
2018-03-23 15:38:36,900 ERROR django.request Internal Server Error: /api/v2/inventory_sources/25/
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/usr/lib/python2.7/site-packages/awx/wsgi.py", line 65, in _legacy_get_response
return super(AWXWSGIHandler, self)._legacy_get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 249, in _legacy_get_response
response = self._get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/utils/decorators.py", line 185, in inner
return func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
return view_func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/generic/base.py", line 68, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/api/generics.py", line 284, in dispatch
return super(APIView, self).dispatch(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 494, in dispatch
response = self.handle_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 454, in handle_exception
self.raise_uncaught_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 491, in dispatch
response = handler(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/generics.py", line 257, in put
return self.update(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/api/generics.py", line 741, in update
return super(RetrieveUpdateAPIView, self).update(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/mixins.py", line 67, in update
instance = self.get_object()
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/generics.py", line 101, in get_object
self.check_object_permissions(self.request, obj)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 344, in check_object_permissions
if not permission.has_object_permission(request, self, obj):
File "/usr/lib/python2.7/site-packages/awx/api/permissions.py", line 138, in has_object_permission
return self.has_permission(request, view, obj)
File "/usr/lib/python2.7/site-packages/awx/api/permissions.py", line 129, in has_permission
response = self.check_permissions(request, view, obj)
File "/usr/lib/python2.7/site-packages/awx/api/permissions.py", line 118, in check_permissions
result = check_method and check_method(request, view, obj)
File "/usr/lib/python2.7/site-packages/awx/api/permissions.py", line 82, in check_put_permissions
request.data)
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 117, in check_user_access
result = access_method(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 157, in wrapper
return func(self, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 931, in can_change
self.check_related('credential', Credential, data, obj=obj, role_field='use_role') and
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 290, in check_related
if current and (changed or mandatory) and (not user_has_resource_access(current)):
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 283, in user_has_resource_access
access_method_type = {'admin_role': 'change', 'execute_role': 'start'}[role_field]
KeyError: 'use_role'
|
KeyError
|
def _reconstruct_relationships(copy_mapping):
for old_obj, new_obj in copy_mapping.items():
model = type(old_obj)
for field_name in getattr(model, "FIELDS_TO_PRESERVE_AT_COPY", []):
field = model._meta.get_field(field_name)
if isinstance(field, ForeignKey):
if getattr(new_obj, field_name, None):
continue
related_obj = getattr(old_obj, field_name)
related_obj = copy_mapping.get(related_obj, related_obj)
setattr(new_obj, field_name, related_obj)
elif field.many_to_many:
for related_obj in getattr(old_obj, field_name).all():
logger.debug(
six.text_type(
"Deep copy: Adding {} to {}({}).{} relationship"
).format(related_obj, new_obj, model, field_name)
)
getattr(new_obj, field_name).add(
copy_mapping.get(related_obj, related_obj)
)
new_obj.save()
|
def _reconstruct_relationships(copy_mapping):
for old_obj, new_obj in copy_mapping.items():
model = type(old_obj)
for field_name in getattr(model, "FIELDS_TO_PRESERVE_AT_COPY", []):
field = model._meta.get_field(field_name)
if isinstance(field, ForeignKey):
if getattr(new_obj, field_name, None):
continue
related_obj = getattr(old_obj, field_name)
related_obj = copy_mapping.get(related_obj, related_obj)
setattr(new_obj, field_name, related_obj)
elif field.many_to_many:
for related_obj in getattr(old_obj, field_name).all():
getattr(new_obj, field_name).add(
copy_mapping.get(related_obj, related_obj)
)
new_obj.save()
|
https://github.com/ansible/awx/issues/1664
|
2018-03-23 15:38:36,900 ERROR django.request Internal Server Error: /api/v2/inventory_sources/25/
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/usr/lib/python2.7/site-packages/awx/wsgi.py", line 65, in _legacy_get_response
return super(AWXWSGIHandler, self)._legacy_get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 249, in _legacy_get_response
response = self._get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/utils/decorators.py", line 185, in inner
return func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
return view_func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/generic/base.py", line 68, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/api/generics.py", line 284, in dispatch
return super(APIView, self).dispatch(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 494, in dispatch
response = self.handle_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 454, in handle_exception
self.raise_uncaught_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 491, in dispatch
response = handler(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/generics.py", line 257, in put
return self.update(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/api/generics.py", line 741, in update
return super(RetrieveUpdateAPIView, self).update(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/mixins.py", line 67, in update
instance = self.get_object()
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/generics.py", line 101, in get_object
self.check_object_permissions(self.request, obj)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 344, in check_object_permissions
if not permission.has_object_permission(request, self, obj):
File "/usr/lib/python2.7/site-packages/awx/api/permissions.py", line 138, in has_object_permission
return self.has_permission(request, view, obj)
File "/usr/lib/python2.7/site-packages/awx/api/permissions.py", line 129, in has_permission
response = self.check_permissions(request, view, obj)
File "/usr/lib/python2.7/site-packages/awx/api/permissions.py", line 118, in check_permissions
result = check_method and check_method(request, view, obj)
File "/usr/lib/python2.7/site-packages/awx/api/permissions.py", line 82, in check_put_permissions
request.data)
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 117, in check_user_access
result = access_method(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 157, in wrapper
return func(self, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 931, in can_change
self.check_related('credential', Credential, data, obj=obj, role_field='use_role') and
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 290, in check_related
if current and (changed or mandatory) and (not user_has_resource_access(current)):
File "/usr/lib/python2.7/site-packages/awx/main/access.py", line 283, in user_has_resource_access
access_method_type = {'admin_role': 'change', 'execute_role': 'start'}[role_field]
KeyError: 'use_role'
|
KeyError
|
def create_config_from_prompts(self, kwargs):
"""
Create a launch configuration entry for this job, given prompts
returns None if it can not be created
"""
if self.unified_job_template is None:
return None
JobLaunchConfig = self._meta.get_field("launch_config").related_model
config = JobLaunchConfig(job=self)
valid_fields = self.unified_job_template.get_ask_mapping().keys()
# Special cases allowed for workflows
if hasattr(self, "extra_vars"):
valid_fields.extend(["survey_passwords", "extra_vars"])
else:
kwargs.pop("survey_passwords", None)
for field_name, value in kwargs.items():
if field_name not in valid_fields:
raise Exception("Unrecognized launch config field {}.".format(field_name))
if field_name == "credentials":
continue
key = field_name
if key == "extra_vars":
key = "extra_data"
setattr(config, key, value)
config.save()
job_creds = set(kwargs.get("credentials", [])) - set(
self.unified_job_template.credentials.all()
)
if job_creds:
config.credentials.add(*job_creds)
return config
|
def create_config_from_prompts(self, kwargs):
"""
Create a launch configuration entry for this job, given prompts
returns None if it can not be created
"""
if self.unified_job_template is None:
return None
JobLaunchConfig = self._meta.get_field("launch_config").related_model
config = JobLaunchConfig(job=self)
valid_fields = self.unified_job_template.get_ask_mapping().keys()
if hasattr(self, "extra_vars"):
valid_fields.extend(["survey_passwords", "extra_vars"])
for field_name, value in kwargs.items():
if field_name not in valid_fields:
raise Exception("Unrecognized launch config field {}.".format(field_name))
if field_name == "credentials":
continue
key = field_name
if key == "extra_vars":
key = "extra_data"
setattr(config, key, value)
config.save()
job_creds = set(kwargs.get("credentials", [])) - set(
self.unified_job_template.credentials.all()
)
if job_creds:
config.credentials.add(*job_creds)
return config
|
https://github.com/ansible/awx/issues/1658
|
2018-03-23 00:17:12,246 ERROR awx.main.scheduler Task awx.main.scheduler.tasks.run_task_manager encountered exception.
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/trace.py", line 240, in trace_task
R = retval = fun(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/trace.py", line 438, in __protected_call__
return self.run(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/tasks.py", line 37, in run_task_manager
TaskManager().schedule()
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/task_manager.py", line 629, in schedule
finished_wfjs = self._schedule()
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/task_manager.py", line 614, in _schedule
self.spawn_workflow_graph_jobs(running_workflow_tasks)
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/task_manager.py", line 196, in spawn_workflow_graph_jobs
job = spawn_node.unified_job_template.create_unified_job(**kv)
File "/usr/lib/python2.7/site-packages/awx/main/models/inventory.py", line 1500, in create_unified_job
return super(InventorySource, self).create_unified_job(**kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/models/unified_jobs.py", line 393, in create_unified_job
unified_job.create_config_from_prompts(kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/models/unified_jobs.py", line 880, in create_config_from_prompts
raise Exception('Unrecognized launch config field {}.'.format(field_name))
Exception: Unrecognized launch config field survey_passwords.
[2018-03-23 00:17:12,249: ERROR/MainProcess] Task awx.main.scheduler.tasks.run_task_manager[c29823d8-2fc7-40a4-ba2d-c79253cb4596] raised unexpected: Exception('Unrecognized launch config field survey_passwords.',)
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/trace.py", line 240, in trace_task
R = retval = fun(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/trace.py", line 438, in __protected_call__
return self.run(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/tasks.py", line 37, in run_task_manager
TaskManager().schedule()
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/task_manager.py", line 629, in schedule
finished_wfjs = self._schedule()
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/task_manager.py", line 614, in _schedule
self.spawn_workflow_graph_jobs(running_workflow_tasks)
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/task_manager.py", line 196, in spawn_workflow_graph_jobs
job = spawn_node.unified_job_template.create_unified_job(**kv)
File "/usr/lib/python2.7/site-packages/awx/main/models/inventory.py", line 1500, in create_unified_job
return super(InventorySource, self).create_unified_job(**kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/models/unified_jobs.py", line 393, in create_unified_job
unified_job.create_config_from_prompts(kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/models/unified_jobs.py", line 880, in create_config_from_prompts
raise Exception('Unrecognized launch config field {}.'.format(field_name))
Exception: Unrecognized launch config field survey_passwords.
[2018-03-23 00:17:26,777: INFO/MainProcess] Scaling down -2 processes.
[2018-03-23 00:17:26,778: DEBUG/MainProcess] basic.qos: prefetch_count->5432
[2018-03-23 00:17:31,896: DEBUG/Beat] beat: Synchronizing schedule...
[2018-03-23 00:17:31,916: INFO/Beat] Scheduler: Sending due task tower_scheduler (awx.main.tasks.awx_periodic_scheduler)
[2018-03-23 00:17:31,918: DEBUG/Beat] awx.main.tasks.awx_periodic_scheduler sent. id->c0466b22-2a05-4ac1-868c-0c28b34d0980
[2018-03-23 00:17:31,919: DEBUG/Beat] beat: Waking up now.
[2018-03-23 00:17:31,919: INFO/MainProcess] Received task: awx.main.tasks.awx_periodic_scheduler[c0466b22-2a05-4ac1-868c-0c28b34d0980] expires:[2018-03-23 00:17:51.917332+00:00]
[2018-03-23 00:17:31,919: DEBUG/MainProcess] TaskPool: Apply <function _fast_trace_task at 0x30f8aa0> (args:(u'awx.main.tasks.awx_periodic_scheduler', u'c0466b22-2a05-4ac1-868c-0c28b34d0980', [], {}, {u'utc': True, u'is_eager': False, u'chord': None, u'group': None, u'args': [], u'retries': 0, u'delivery_info': {u'priority': None, u'redelivered': False, u'routing_key': u'tower', u'exchange': u'tower'}, u'expires': u'2018-03-23T00:17:51.917332+00:00', u'hostname': 'celery@awx', u'task': u'awx.main.tasks.awx_periodic_scheduler', u'callbacks': None, u'correlation_id': u'c0466b22-2a05-4ac1-868c-0c28b34d0980', u'errbacks': None, u'timelimit': [None, None], u'taskset': None, u'kwargs': {}, u'eta': None, u'reply_to': u'ebf3eb54-7a03-3352-a5e6-df53ebd2ceb6', u'id': u'c0466b22-2a05-4ac1-868c-0c28b34d0980', u'headers': {}}) kwargs:{})
[2018-03-23 00:17:31,921: DEBUG/MainProcess] Task accepted: awx.main.tasks.awx_periodic_scheduler[c0466b22-2a05-4ac1-868c-0c28b34d0980] pid:20372
2018-03-23 00:17:31,935 DEBUG awx.main.tasks Last scheduler run was: 2018-03-23 00:17:01.909449+00:00
[2018-03-23 00:17:31,960: INFO/MainProcess] Task awx.main.tasks.awx_periodic_scheduler[c0466b22-2a05-4ac1-868c-0c28b34d0980] succeeded in 0.0399558200006s: None
[2018-03-23 00:17:31,973: INFO/Beat] Scheduler: Sending due task task_manager (awx.main.scheduler.tasks.run_task_manager)
[2018-03-23 00:17:31,974: DEBUG/Beat] awx.main.scheduler.tasks.run_task_manager sent. id->0b292032-d685-4beb-a953-9b5fe6e6c812
[2018-03-23 00:17:31,975: DEBUG/Beat] beat: Waking up in 18.41 seconds.
[2018-03-23 00:17:31,975: INFO/MainProcess] Received task: awx.main.scheduler.tasks.run_task_manager[0b292032-d685-4beb-a953-9b5fe6e6c812] expires:[2018-03-23 00:17:51.974214+00:00]
[2018-03-23 00:17:31,976: DEBUG/MainProcess] TaskPool: Apply <function _fast_trace_task at 0x30f8aa0> (args:(u'awx.main.scheduler.tasks.run_task_manager', u'0b292032-d685-4beb-a953-9b5fe6e6c812', [], {}, {u'utc': True, u'is_eager': False, u'chord': None, u'group': None, u'args': [], u'retries': 0, u'delivery_info': {u'priority': 0, u'redelivered': False, u'routing_key': u'tower', u'exchange': u'tower'}, u'expires': u'2018-03-23T00:17:51.974214+00:00', u'hostname': 'celery@awx', u'task': u'awx.main.scheduler.tasks.run_task_manager', u'callbacks': None, u'correlation_id': u'0b292032-d685-4beb-a953-9b5fe6e6c812', u'errbacks': None, u'timelimit': [None, None], u'taskset': None, u'kwargs': {}, u'eta': None, u'reply_to': u'ebf3eb54-7a03-3352-a5e6-df53ebd2ceb6', u'id': u'0b292032-d685-4beb-a953-9b5fe6e6c812', u'headers': {}}) kwargs:{})
[2018-03-23 00:17:31,977: DEBUG/MainProcess] Task accepted: awx.main.scheduler.tasks.run_task_manager[0b292032-d685-4beb-a953-9b5fe6e6c812] pid:20372
2018-03-23 00:17:31,989 DEBUG awx.main.scheduler Running Tower task manager.
2018-03-23 00:17:31,997 DEBUG awx.main.scheduler Starting Scheduler
2018-03-23 00:17:32,187 WARNING awx.main.models.unified_jobs Fields set(['extra_vars']) are not allowed as overrides.
[2018-03-23 00:17:32,187: WARNING/Worker-445] Fields set(['extra_vars']) are not allowed as overrides.
2018-03-23 00:17:32,219 ERROR awx.main.scheduler Task awx.main.scheduler.tasks.run_task_manager encountered exception.
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/trace.py", line 240, in trace_task
R = retval = fun(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/trace.py", line 438, in __protected_call__
return self.run(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/tasks.py", line 37, in run_task_manager
TaskManager().schedule()
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/task_manager.py", line 629, in schedule
finished_wfjs = self._schedule()
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/task_manager.py", line 614, in _schedule
self.spawn_workflow_graph_jobs(running_workflow_tasks)
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/task_manager.py", line 196, in spawn_workflow_graph_jobs
job = spawn_node.unified_job_template.create_unified_job(**kv)
File "/usr/lib/python2.7/site-packages/awx/main/models/inventory.py", line 1500, in create_unified_job
return super(InventorySource, self).create_unified_job(**kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/models/unified_jobs.py", line 393, in create_unified_job
unified_job.create_config_from_prompts(kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/models/unified_jobs.py", line 880, in create_config_from_prompts
raise Exception('Unrecognized launch config field {}.'.format(field_name))
Exception: Unrecognized launch config field survey_passwords.
|
Exception
|
def can_copy(self, obj):
if self.save_messages:
missing_ujt = []
missing_credentials = []
missing_inventories = []
qs = obj.workflow_job_template_nodes
qs = qs.prefetch_related(
"unified_job_template", "inventory__use_role", "credentials__use_role"
)
for node in qs.all():
node_errors = {}
if node.inventory and self.user not in node.inventory.use_role:
missing_inventories.append(node.inventory.name)
for cred in node.credentials.all():
if self.user not in cred.use_role:
missing_credentials.append(node.credential.name)
ujt = node.unified_job_template
if ujt and not self.user.can_access(
UnifiedJobTemplate, "start", ujt, validate_license=False
):
missing_ujt.append(ujt.name)
if node_errors:
wfjt_errors[node.id] = node_errors
if missing_ujt:
self.messages["templates_unable_to_copy"] = missing_ujt
if missing_credentials:
self.messages["credentials_unable_to_copy"] = missing_credentials
if missing_inventories:
self.messages["inventories_unable_to_copy"] = missing_inventories
return self.check_related(
"organization",
Organization,
{"reference_obj": obj},
role_field="workflow_admin_role",
mandatory=True,
)
|
def can_copy(self, obj):
if self.save_messages:
missing_ujt = []
missing_credentials = []
missing_inventories = []
qs = obj.workflow_job_template_nodes
qs = qs.prefetch_related(
"unified_job_template", "inventory__use_role", "credential__use_role"
)
for node in qs.all():
node_errors = {}
if node.inventory and self.user not in node.inventory.use_role:
missing_inventories.append(node.inventory.name)
if node.credential and self.user not in node.credential.use_role:
missing_credentials.append(node.credential.name)
ujt = node.unified_job_template
if ujt and not self.user.can_access(
UnifiedJobTemplate, "start", ujt, validate_license=False
):
missing_ujt.append(ujt.name)
if node_errors:
wfjt_errors[node.id] = node_errors
if missing_ujt:
self.messages["templates_unable_to_copy"] = missing_ujt
if missing_credentials:
self.messages["credentials_unable_to_copy"] = missing_credentials
if missing_inventories:
self.messages["inventories_unable_to_copy"] = missing_inventories
return self.check_related(
"organization",
Organization,
{"reference_obj": obj},
role_field="workflow_admin_role",
mandatory=True,
)
|
https://github.com/ansible/awx/issues/1658
|
2018-03-23 00:17:12,246 ERROR awx.main.scheduler Task awx.main.scheduler.tasks.run_task_manager encountered exception.
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/trace.py", line 240, in trace_task
R = retval = fun(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/trace.py", line 438, in __protected_call__
return self.run(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/tasks.py", line 37, in run_task_manager
TaskManager().schedule()
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/task_manager.py", line 629, in schedule
finished_wfjs = self._schedule()
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/task_manager.py", line 614, in _schedule
self.spawn_workflow_graph_jobs(running_workflow_tasks)
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/task_manager.py", line 196, in spawn_workflow_graph_jobs
job = spawn_node.unified_job_template.create_unified_job(**kv)
File "/usr/lib/python2.7/site-packages/awx/main/models/inventory.py", line 1500, in create_unified_job
return super(InventorySource, self).create_unified_job(**kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/models/unified_jobs.py", line 393, in create_unified_job
unified_job.create_config_from_prompts(kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/models/unified_jobs.py", line 880, in create_config_from_prompts
raise Exception('Unrecognized launch config field {}.'.format(field_name))
Exception: Unrecognized launch config field survey_passwords.
[2018-03-23 00:17:12,249: ERROR/MainProcess] Task awx.main.scheduler.tasks.run_task_manager[c29823d8-2fc7-40a4-ba2d-c79253cb4596] raised unexpected: Exception('Unrecognized launch config field survey_passwords.',)
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/trace.py", line 240, in trace_task
R = retval = fun(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/trace.py", line 438, in __protected_call__
return self.run(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/tasks.py", line 37, in run_task_manager
TaskManager().schedule()
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/task_manager.py", line 629, in schedule
finished_wfjs = self._schedule()
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/task_manager.py", line 614, in _schedule
self.spawn_workflow_graph_jobs(running_workflow_tasks)
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/task_manager.py", line 196, in spawn_workflow_graph_jobs
job = spawn_node.unified_job_template.create_unified_job(**kv)
File "/usr/lib/python2.7/site-packages/awx/main/models/inventory.py", line 1500, in create_unified_job
return super(InventorySource, self).create_unified_job(**kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/models/unified_jobs.py", line 393, in create_unified_job
unified_job.create_config_from_prompts(kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/models/unified_jobs.py", line 880, in create_config_from_prompts
raise Exception('Unrecognized launch config field {}.'.format(field_name))
Exception: Unrecognized launch config field survey_passwords.
[2018-03-23 00:17:26,777: INFO/MainProcess] Scaling down -2 processes.
[2018-03-23 00:17:26,778: DEBUG/MainProcess] basic.qos: prefetch_count->5432
[2018-03-23 00:17:31,896: DEBUG/Beat] beat: Synchronizing schedule...
[2018-03-23 00:17:31,916: INFO/Beat] Scheduler: Sending due task tower_scheduler (awx.main.tasks.awx_periodic_scheduler)
[2018-03-23 00:17:31,918: DEBUG/Beat] awx.main.tasks.awx_periodic_scheduler sent. id->c0466b22-2a05-4ac1-868c-0c28b34d0980
[2018-03-23 00:17:31,919: DEBUG/Beat] beat: Waking up now.
[2018-03-23 00:17:31,919: INFO/MainProcess] Received task: awx.main.tasks.awx_periodic_scheduler[c0466b22-2a05-4ac1-868c-0c28b34d0980] expires:[2018-03-23 00:17:51.917332+00:00]
[2018-03-23 00:17:31,919: DEBUG/MainProcess] TaskPool: Apply <function _fast_trace_task at 0x30f8aa0> (args:(u'awx.main.tasks.awx_periodic_scheduler', u'c0466b22-2a05-4ac1-868c-0c28b34d0980', [], {}, {u'utc': True, u'is_eager': False, u'chord': None, u'group': None, u'args': [], u'retries': 0, u'delivery_info': {u'priority': None, u'redelivered': False, u'routing_key': u'tower', u'exchange': u'tower'}, u'expires': u'2018-03-23T00:17:51.917332+00:00', u'hostname': 'celery@awx', u'task': u'awx.main.tasks.awx_periodic_scheduler', u'callbacks': None, u'correlation_id': u'c0466b22-2a05-4ac1-868c-0c28b34d0980', u'errbacks': None, u'timelimit': [None, None], u'taskset': None, u'kwargs': {}, u'eta': None, u'reply_to': u'ebf3eb54-7a03-3352-a5e6-df53ebd2ceb6', u'id': u'c0466b22-2a05-4ac1-868c-0c28b34d0980', u'headers': {}}) kwargs:{})
[2018-03-23 00:17:31,921: DEBUG/MainProcess] Task accepted: awx.main.tasks.awx_periodic_scheduler[c0466b22-2a05-4ac1-868c-0c28b34d0980] pid:20372
2018-03-23 00:17:31,935 DEBUG awx.main.tasks Last scheduler run was: 2018-03-23 00:17:01.909449+00:00
[2018-03-23 00:17:31,960: INFO/MainProcess] Task awx.main.tasks.awx_periodic_scheduler[c0466b22-2a05-4ac1-868c-0c28b34d0980] succeeded in 0.0399558200006s: None
[2018-03-23 00:17:31,973: INFO/Beat] Scheduler: Sending due task task_manager (awx.main.scheduler.tasks.run_task_manager)
[2018-03-23 00:17:31,974: DEBUG/Beat] awx.main.scheduler.tasks.run_task_manager sent. id->0b292032-d685-4beb-a953-9b5fe6e6c812
[2018-03-23 00:17:31,975: DEBUG/Beat] beat: Waking up in 18.41 seconds.
[2018-03-23 00:17:31,975: INFO/MainProcess] Received task: awx.main.scheduler.tasks.run_task_manager[0b292032-d685-4beb-a953-9b5fe6e6c812] expires:[2018-03-23 00:17:51.974214+00:00]
[2018-03-23 00:17:31,976: DEBUG/MainProcess] TaskPool: Apply <function _fast_trace_task at 0x30f8aa0> (args:(u'awx.main.scheduler.tasks.run_task_manager', u'0b292032-d685-4beb-a953-9b5fe6e6c812', [], {}, {u'utc': True, u'is_eager': False, u'chord': None, u'group': None, u'args': [], u'retries': 0, u'delivery_info': {u'priority': 0, u'redelivered': False, u'routing_key': u'tower', u'exchange': u'tower'}, u'expires': u'2018-03-23T00:17:51.974214+00:00', u'hostname': 'celery@awx', u'task': u'awx.main.scheduler.tasks.run_task_manager', u'callbacks': None, u'correlation_id': u'0b292032-d685-4beb-a953-9b5fe6e6c812', u'errbacks': None, u'timelimit': [None, None], u'taskset': None, u'kwargs': {}, u'eta': None, u'reply_to': u'ebf3eb54-7a03-3352-a5e6-df53ebd2ceb6', u'id': u'0b292032-d685-4beb-a953-9b5fe6e6c812', u'headers': {}}) kwargs:{})
[2018-03-23 00:17:31,977: DEBUG/MainProcess] Task accepted: awx.main.scheduler.tasks.run_task_manager[0b292032-d685-4beb-a953-9b5fe6e6c812] pid:20372
2018-03-23 00:17:31,989 DEBUG awx.main.scheduler Running Tower task manager.
2018-03-23 00:17:31,997 DEBUG awx.main.scheduler Starting Scheduler
2018-03-23 00:17:32,187 WARNING awx.main.models.unified_jobs Fields set(['extra_vars']) are not allowed as overrides.
[2018-03-23 00:17:32,187: WARNING/Worker-445] Fields set(['extra_vars']) are not allowed as overrides.
2018-03-23 00:17:32,219 ERROR awx.main.scheduler Task awx.main.scheduler.tasks.run_task_manager encountered exception.
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/trace.py", line 240, in trace_task
R = retval = fun(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/celery/app/trace.py", line 438, in __protected_call__
return self.run(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/tasks.py", line 37, in run_task_manager
TaskManager().schedule()
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/task_manager.py", line 629, in schedule
finished_wfjs = self._schedule()
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/task_manager.py", line 614, in _schedule
self.spawn_workflow_graph_jobs(running_workflow_tasks)
File "/usr/lib/python2.7/site-packages/awx/main/scheduler/task_manager.py", line 196, in spawn_workflow_graph_jobs
job = spawn_node.unified_job_template.create_unified_job(**kv)
File "/usr/lib/python2.7/site-packages/awx/main/models/inventory.py", line 1500, in create_unified_job
return super(InventorySource, self).create_unified_job(**kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/models/unified_jobs.py", line 393, in create_unified_job
unified_job.create_config_from_prompts(kwargs)
File "/usr/lib/python2.7/site-packages/awx/main/models/unified_jobs.py", line 880, in create_config_from_prompts
raise Exception('Unrecognized launch config field {}.'.format(field_name))
Exception: Unrecognized launch config field survey_passwords.
|
Exception
|
def _obj_capability_dict(self, obj):
"""
Returns the user_capabilities dictionary for a single item
If inside of a list view, it runs the prefetching algorithm for
the entire current page, saves it into context
"""
view = self.context.get("view", None)
parent_obj = None
if view and hasattr(view, "parent_model") and hasattr(view, "get_parent_object"):
parent_obj = view.get_parent_object()
if view and view.request and view.request.user:
capabilities_cache = {}
# if serializer has parent, it is ListView, apply page capabilities prefetch
if (
self.parent
and hasattr(self, "capabilities_prefetch")
and self.capabilities_prefetch
):
qs = self.parent.instance
if "capability_map" not in self.context:
if hasattr(self, "polymorphic_base"):
model = self.polymorphic_base.Meta.model
prefetch_list = self.polymorphic_base._capabilities_prefetch
else:
model = self.Meta.model
prefetch_list = self.capabilities_prefetch
self.context["capability_map"] = prefetch_page_capabilities(
model, qs, prefetch_list, view.request.user
)
if obj.id in self.context["capability_map"]:
capabilities_cache = self.context["capability_map"][obj.id]
return get_user_capabilities(
view.request.user,
obj,
method_list=self.show_capabilities,
parent_obj=parent_obj,
capabilities_cache=capabilities_cache,
)
else:
# Contextual information to produce user_capabilities doesn't exist
return {}
|
def _obj_capability_dict(self, obj):
"""
Returns the user_capabilities dictionary for a single item
If inside of a list view, it runs the prefetching algorithm for
the entire current page, saves it into context
"""
view = self.context.get("view", None)
parent_obj = None
if view and hasattr(view, "parent_model") and hasattr(view, "get_parent_object"):
parent_obj = view.get_parent_object()
if view and view.request and view.request.user:
capabilities_cache = {}
# if serializer has parent, it is ListView, apply page capabilities prefetch
if (
self.parent
and hasattr(self, "capabilities_prefetch")
and self.capabilities_prefetch
):
qs = self.parent.instance
if "capability_map" not in self.context:
if hasattr(self, "polymorphic_base"):
model = self.polymorphic_base.Meta.model
prefetch_list = self.polymorphic_base.capabilities_prefetch
else:
model = self.Meta.model
prefetch_list = self.capabilities_prefetch
self.context["capability_map"] = prefetch_page_capabilities(
model, qs, prefetch_list, view.request.user
)
if obj.id in self.context["capability_map"]:
capabilities_cache = self.context["capability_map"][obj.id]
return get_user_capabilities(
view.request.user,
obj,
method_list=self.show_capabilities,
parent_obj=parent_obj,
capabilities_cache=capabilities_cache,
)
else:
# Contextual information to produce user_capabilities doesn't exist
return {}
|
https://github.com/ansible/awx/issues/1546
|
AttributeError: 'super' object has no attribute 'accessible_pk_qs'
2018-03-13 19:49:42,026 ERROR django.request Internal Server Error: /api/v2/inventory_sources/
Traceback (most recent call last):
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "/usr/lib/python2.7/site-packages/awx/wsgi.py", line 65, in _legacy_get_response
return super(AWXWSGIHandler, self)._legacy_get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 249, in _legacy_get_response
response = self._get_response(request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 187, in _get_response
response = self.process_exception_by_middleware(e, request)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/utils/decorators.py", line 185, in inner
return func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
return view_func(*args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/django/views/generic/base.py", line 68, in view
return self.dispatch(request, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/awx/api/generics.py", line 284, in dispatch
return super(APIView, self).dispatch(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 494, in dispatch
response = self.handle_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 454, in handle_exception
self.raise_uncaught_exception(exc)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/views.py", line 491, in dispatch
response = handler(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/generics.py", line 201, in get
return self.list(request, *args, **kwargs)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/mixins.py", line 45, in list
return self.get_paginated_response(serializer.data)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 742, in data
ret = super(ListSerializer, self).data
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 262, in data
self._data = self.to_representation(self.instance)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 660, in to_representation
self.child.to_representation(item) for item in iterable
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 2070, in to_representation
ret = super(InventorySourceSerializer, self).to_representation(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 683, in to_representation
return super(UnifiedJobTemplateSerializer, self).to_representation(obj)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/serializers.py", line 504, in to_representation
ret[field.field_name] = field.to_representation(attribute)
File "/var/lib/awx/venv/awx/lib/python2.7/site-packages/rest_framework/fields.py", line 1816, in to_representation
return method(value)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 346, in _get_summary_fields
return {} if obj is None else self.get_summary_fields(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 2038, in get_summary_fields
summary_fields = super(InventorySourceSerializer, self).get_summary_fields(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 1973, in get_summary_fields
summary_fields = super(InventorySourceOptionsSerializer, self).get_summary_fields(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 407, in get_summary_fields
user_capabilities = self._obj_capability_dict(obj)
File "/usr/lib/python2.7/site-packages/awx/api/serializers.py", line 436, in _obj_capability_dict
model, qs, prefetch_list, view.request.user
File "/usr/lib/python2.7/site-packages/awx/main/utils/common.py", line 571, in prefetch_page_capabilities
filter_args.append(Q(**{'pk__in': model.accessible_pk_qs(user, '%s_role' % role_type)}))
File "/usr/lib/python2.7/site-packages/awx/main/models/unified_jobs.py", line 199, in accessible_pk_qs
return super(UnifiedJobTemplate, cls).accessible_pk_qs(accessor, role_field)
AttributeError: 'super' object has no attribute 'accessible_pk_qs'
|
AttributeError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.